text
stringlengths 2
999k
|
|---|
import argparse
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as T
from tqdm import tqdm
from torchvision.datasets import CIFAR10, CIFAR100, STL10, ImageNet, ImageFolder
import numpy as np
import pandas as pd
from models.neuralhash import NeuralHash
from utils.hashing import load_hash_matrix
from utils.transforms import Rotate, Translate, ChangeSaturation, ChangeHue, ChangeContrast, ChangeBrightness, \
JpegCompression, HorizontalFlipping, BlackBorder, CenterCrop, VerticalFlipping
def get_dataset(dataset_name: str, additional_transforms=None):
img_transform = get_transforms(additional_transforms=additional_transforms)
if dataset_name.lower() == 'stl10':
dataset = STL10(root='data', split='train', transform=img_transform, download=True)
elif dataset_name.lower() == 'cifar10':
dataset = CIFAR10(root='data', train=True, transform=img_transform, download=True)
elif dataset_name.lower() == 'cifar100':
dataset = CIFAR100(root='data', train=True, transform=img_transform, download=True)
elif dataset_name.lower() == 'imagenet_test':
dataset = ImageFolder(root='data/ILSVRC2012_test', transform=img_transform)
elif dataset_name.lower() == 'imagenet_train':
dataset = ImageNet(root='data/ILSVRC2012', split='train', transform=img_transform)
elif dataset_name.lower() == 'imagenet_val':
dataset = ImageNet(root='data/ILSVRC2012', split='val', transform=img_transform)
else:
raise RuntimeError(f'Dataset with name {dataset_name} was not found.')
return dataset
def get_transforms(additional_transforms=None):
transforms = [
T.Resize((360, 360)),
T.ToTensor()
]
if additional_transforms is not None and type(additional_transforms) == list:
transforms.extend(additional_transforms)
transforms.append(T.Lambda(lambda x: x * 2 - 1))
img_transform = T.Compose(transforms)
return img_transform
def get_translation_tuples(max_trans, trans_log_base, trans_steps):
translations = []
values = np.unique(
np.ceil(
np.logspace(0, np.log(max_trans) / np.log(trans_log_base), trans_steps, endpoint=True, base=trans_log_base)
).astype(int)
)
values = [0] + values.tolist()
for hor_trans in values:
for vert_trans in values:
translations.append((hor_trans, vert_trans))
return translations
def get_rotation_angles(max_rot_angle, rot_log_base, rot_steps):
# create the list of angle and rotation values
angles = np.unique(
np.ceil(
np.logspace(0, np.log(max_rot_angle) / np.log(rot_log_base), rot_steps, endpoint=True, base=rot_log_base)
).astype(int)
)
angles = np.flip(-angles).tolist() + [0] + angles.tolist()
return angles
def get_hashes(dataset, model, seed, device, batch_size=128, num_workers=8):
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
binary_hashes = []
hex_hashes = []
with torch.no_grad():
for x, y in tqdm(dataloader, desc='Getting Neural Hashes', leave=False):
x = x.to(device)
hash = model(x).squeeze().unsqueeze(2)
hash = torch.matmul(seed.repeat(len(x), 1, 1), hash)
hash = torch.sign(hash).view(len(x), -1).cpu()
# convert the tensor from [-1, 1] to [0, 1]
hash = (hash > 0).type(torch.IntTensor)
hash_bin = [''.join(list(map(str, x.tolist()))) for x in hash]
hash_hex = ['{:0{}x}'.format(int(hash_bits, 2), len(hash_bits) // 4) for hash_bits in hash_bin]
binary_hashes.extend(hash_bin)
hex_hashes.extend(hash_hex)
return binary_hashes, hex_hashes
def run_augmentation(dataset, model, seed, device, augmentation, augmentation_inputs, file_paths, batch_size=128,
num_workers=8):
for augm_input, file_path in tqdm(zip(augmentation_inputs, file_paths), desc=augmentation.__name__ if augmentation else 'Original', total=len(augmentation_inputs)):
if os.path.exists(file_path):
continue
# make an empty dummy file to support multiple runs to work together at the same time
if not os.path.exists(file_path):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
Path(file_path).touch(exist_ok=False)
if augmentation is not None:
new_transforms = get_transforms(additional_transforms=[augmentation(augm_input) if augm_input is not None else augmentation()])
dataset.transform = new_transforms
binary_hashes, hex_hashes = get_hashes(dataset, model, seed, device, batch_size=batch_size,
num_workers=num_workers)
hash_df = pd.DataFrame(columns=['image', 'hash_bin', 'hash_hex'])
hash_df = hash_df.assign(hash_bin=binary_hashes, hash_hex=hex_hashes)
if hasattr(dataset, 'imgs'):
hash_df = hash_df.assign(image=list(np.array(dataset.imgs)[:, 0]))
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path), exist_ok=False)
hash_df.to_csv(file_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='imagenet_train',
choices=['stl10', 'cifar10', 'cifar100', 'imagenet_test', 'imagenet_train', 'imagenet_val'], type=str,
help='The dataset that is used')
parser.add_argument('--batch_size', default=128, type=int, help='The batch size used for inference')
parser.add_argument('--max_rot_angle', default=64, type=int,
help='The angle (in degrees) by which the image is rotated clockwise and counterclockwise')
parser.add_argument('--rot_log_base', default=2, type=int, help='The logarithm base')
parser.add_argument('--rot_steps', default=7, type=int, help='The number of rotations steps')
parser.add_argument('--max_trans', default=64, type=int,
help='The max translation in pixels by which the image is going to be translated')
parser.add_argument('--trans_log_base', default=2, type=int, help='The logarithm base')
parser.add_argument('--trans_steps', default=7, type=int,
help='The number of translation steps in vertical and horizontal direction, respectively')
parser.add_argument('--device', default='cuda', type=str, help='The device used for inference')
parser.add_argument('--num_workers', default=8, type=int,
help='The number of workers that is used for loading the data')
parser.add_argument('--output_dir', default='logs', type=str,
help='The output directory where the results are going to be saved as CSV files')
args = parser.parse_args()
device = torch.device(args.device)
model = NeuralHash()
model.load_state_dict(torch.load('./models/model.pth'))
model = model.to(device)
seed = torch.tensor(load_hash_matrix())
seed = seed.to(device)
output_dir = os.path.join(args.output_dir, f'{args.dataset}')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset = get_dataset(args.dataset)
# get the rotation angles and the translation tuples
angles = get_rotation_angles(args.max_rot_angle, args.rot_log_base, args.rot_steps)
translations = get_translation_tuples(args.max_trans, args.trans_log_base, args.trans_steps)
hue_values = list(range(-180, 180, 30))
saturation_values = list(np.linspace(0, 2, 9, endpoint=True))
brightness_values = list(np.linspace(0, 2, 9, endpoint=True))
contrast_values = list(np.linspace(0, 2, 9, endpoint=True))
compression_values = [100] + list(
(100 - np.ceil(np.logspace(0, np.log(100) / np.log(1.5), 10, endpoint=True, base=1.5))).clip(0, 100)
)
crop_values = list(
filter(
lambda x: x != 359,
[360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180]))
)
)
downsizing_values = list(
filter(
lambda x: x != 359,
[360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180]))
)
)
iterations = len(angles) + len(translations) + len(hue_values) + len(saturation_values) + \
len(brightness_values) + len(contrast_values) + len(compression_values) + len(crop_values) + len(downsizing_values) + 1
# get the initial hashes
run_augmentation(
dataset,
model,
seed,
device,
None,
[None],
[os.path.join(output_dir, f'{args.dataset}_original.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against rotations
run_augmentation(
dataset,
model,
seed,
device,
Rotate,
angles,
[os.path.join(output_dir, 'rotation', f'{args.dataset}_rotation_{angle}.csv') for angle in angles],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against translations
run_augmentation(
dataset,
model,
seed,
device,
Translate,
translations,
[os.path.join(output_dir, 'translation', f'{args.dataset}_translation_{translation[0]}_{translation[1]}.csv') for translation in translations],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against hue changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeHue,
hue_values,
[os.path.join(output_dir, 'hue', f'{args.dataset}_hue_{hue}.csv') for hue in hue_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against saturation changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeSaturation,
saturation_values,
[os.path.join(output_dir, 'saturation', f'{args.dataset}_saturation_{saturation}.csv') for saturation in saturation_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against brightness changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeBrightness,
brightness_values,
[os.path.join(output_dir, 'brightness', f'{args.dataset}_brightness_{brightness}.csv') for brightness in brightness_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against contrast changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeContrast,
contrast_values,
[os.path.join(output_dir, 'contrast', f'{args.dataset}_contrast_{contrast}.csv') for contrast in contrast_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against compression
run_augmentation(
dataset,
model,
seed,
device,
JpegCompression,
compression_values,
[os.path.join(output_dir, 'compression', f'{args.dataset}_compression_{compression}.csv') for compression in compression_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
CenterCrop,
crop_values,
[os.path.join(output_dir, 'crop', f'{args.dataset}_crop_{crop}.csv') for crop in crop_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
HorizontalFlipping,
[None],
[os.path.join(output_dir, 'hflip', f'{args.dataset}_hflip.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
VerticalFlipping,
[None],
[os.path.join(output_dir, 'vflip', f'{args.dataset}_vflip.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
BlackBorder,
downsizing_values,
[os.path.join(output_dir, 'downsizing', f'{args.dataset}_downsizing_{size}.csv') for size in downsizing_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/availability-msgs.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dproto/availability-msgs.proto\x12\x05proto\"#\n\x10SiteAvailableReq\x12\x0f\n\x07site_id\x18\x01 \x01(\x03\"-\n\x10SiteAvailableRes\x12\x19\n\x04site\x18\x01 \x01(\x0b\x32\x0b.proto.Site\"\x13\n\x11SitesAvailableReq\"?\n\x11SitesAvailableRes\x12*\n\tresponses\x18\x01 \x03(\x0b\x32\x17.proto.SiteAvailableRes\"*\n\x04Site\x12\x0f\n\x07site_id\x18\x01 \x01(\x03\x12\x11\n\tavailable\x18\x02 \x01(\x08\x42\tZ\x07.;protob\x06proto3')
_SITEAVAILABLEREQ = DESCRIPTOR.message_types_by_name['SiteAvailableReq']
_SITEAVAILABLERES = DESCRIPTOR.message_types_by_name['SiteAvailableRes']
_SITESAVAILABLEREQ = DESCRIPTOR.message_types_by_name['SitesAvailableReq']
_SITESAVAILABLERES = DESCRIPTOR.message_types_by_name['SitesAvailableRes']
_SITE = DESCRIPTOR.message_types_by_name['Site']
SiteAvailableReq = _reflection.GeneratedProtocolMessageType('SiteAvailableReq', (_message.Message,), {
'DESCRIPTOR' : _SITEAVAILABLEREQ,
'__module__' : 'proto.availability_msgs_pb2'
# @@protoc_insertion_point(class_scope:proto.SiteAvailableReq)
})
_sym_db.RegisterMessage(SiteAvailableReq)
SiteAvailableRes = _reflection.GeneratedProtocolMessageType('SiteAvailableRes', (_message.Message,), {
'DESCRIPTOR' : _SITEAVAILABLERES,
'__module__' : 'proto.availability_msgs_pb2'
# @@protoc_insertion_point(class_scope:proto.SiteAvailableRes)
})
_sym_db.RegisterMessage(SiteAvailableRes)
SitesAvailableReq = _reflection.GeneratedProtocolMessageType('SitesAvailableReq', (_message.Message,), {
'DESCRIPTOR' : _SITESAVAILABLEREQ,
'__module__' : 'proto.availability_msgs_pb2'
# @@protoc_insertion_point(class_scope:proto.SitesAvailableReq)
})
_sym_db.RegisterMessage(SitesAvailableReq)
SitesAvailableRes = _reflection.GeneratedProtocolMessageType('SitesAvailableRes', (_message.Message,), {
'DESCRIPTOR' : _SITESAVAILABLERES,
'__module__' : 'proto.availability_msgs_pb2'
# @@protoc_insertion_point(class_scope:proto.SitesAvailableRes)
})
_sym_db.RegisterMessage(SitesAvailableRes)
Site = _reflection.GeneratedProtocolMessageType('Site', (_message.Message,), {
'DESCRIPTOR' : _SITE,
'__module__' : 'proto.availability_msgs_pb2'
# @@protoc_insertion_point(class_scope:proto.Site)
})
_sym_db.RegisterMessage(Site)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z\007.;proto'
_SITEAVAILABLEREQ._serialized_start=40
_SITEAVAILABLEREQ._serialized_end=75
_SITEAVAILABLERES._serialized_start=77
_SITEAVAILABLERES._serialized_end=122
_SITESAVAILABLEREQ._serialized_start=124
_SITESAVAILABLEREQ._serialized_end=143
_SITESAVAILABLERES._serialized_start=145
_SITESAVAILABLERES._serialized_end=208
_SITE._serialized_start=210
_SITE._serialized_end=252
# @@protoc_insertion_point(module_scope)
|
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name = 'pkgname',
version = '0.1.0',
author = ['Ryan J. Price'],
author_email = ['ryapric@gmail.com'],
description = 'Short description',
long_description = long_description,
url = 'https://url_to_repo',
packages = setuptools.find_packages(),
python_requires = '>= 3.6.*',
install_requires = [
'pandas >= 0.23.4'
],
extras_require = {
'dev': [
'coverage',
'pytest',
'pytest-cov',
'mypy'
]
},
classifiers = [
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
entry_points = {
'console_scripts': [
'modulename-main = pkgname.modulename.modulename_main:main'
]
},
include_package_data = True
)
|
def remove_intersections(intervals):
result = []
s = set(range(intervals[0][0], intervals[0][1] + 1))
i = 1
while i < len(intervals):
new_interval = set(range(intervals[i][0], intervals[i][1] + 1))
if not s.isdisjoint(new_interval):
s = s.union(new_interval)
i += 1
else:
l = list(s)
s = set(range(intervals[i][0], intervals[i][1] + 1))
result.append((l[0], l[-1]))
if s != {}:
l = list(s)
result.append((l[0], l[-1]))
return result
def test1():
intervals = [(1, 5), (2, 3), (4, 6), (7, 8), (8, 10), (12, 15)]
expected = [(1, 6), (7, 10), (12, 15)]
print(remove_intersections(intervals) == expected)
def test2():
intervals = [(9, 10), (10, 12), (11, 13), (14, 18), (15, 18)]
expected = [(9, 13), (14, 18)]
print(remove_intersections(intervals) == expected)
def main():
test1()
test2()
if __name__ == '__main__':
main()
|
# Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os, sys, time, math
import numpy as np
import nibabel as nib
import tensorflow.compat.v1 as tf
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from image_utils import rescale_intensity
""" Deployment parameters """
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_dir', '/vol/bitbucket/wbai/own_work/ukbb_cardiac_demo',
'Path to the data set directory, under which images '
'are organised in subdirectories for each subject.')
tf.app.flags.DEFINE_string('model_path',
'',
'Path to the saved trained model.')
tf.app.flags.DEFINE_boolean('process_seq', True,
'Process a time sequence of images.')
tf.app.flags.DEFINE_boolean('save_seg', True,
'Save segmentation.')
tf.app.flags.DEFINE_boolean('seg4', False,
'Segment all the 4 chambers in long-axis 4 chamber view. '
'This seg4 network is trained using 200 subjects from Application 18545.'
'By default, for all the other tasks (ventricular segmentation'
'on short-axis images and atrial segmentation on long-axis images,'
'the networks are trained using 3,975 subjects from Application 2964.')
if __name__ == '__main__':
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Import the computation graph and restore the variable values
saver = tf.train.import_meta_graph('{0}.meta'.format(FLAGS.model_path))
saver.restore(sess, '{0}'.format(FLAGS.model_path))
print('Start deployment on the data set ...')
start_time = time.time()
# Process each subject subdirectory
data_list = sorted(os.listdir(FLAGS.data_dir))
processed_list = []
table_time = []
for data in data_list:
print(data)
data_dir = os.path.join(FLAGS.data_dir, data)
if FLAGS.process_seq:
# Process the temporal sequence
image_name = '{0}/sa.nii'.format(data_dir)
if not os.path.exists(image_name):
print(' Directory {0} does not contain an image with file '
'name {1}. Skip.'.format(data_dir, os.path.basename(image_name)))
continue
# Read the image
print(' Reading {} ...'.format(image_name))
nim = nib.load(image_name)
image = nim.get_data()
X, Y, Z, T = image.shape
orig_image = image
print(' Segmenting full sequence ...')
start_seg_time = time.time()
# Intensity rescaling
image = rescale_intensity(image, (1, 99))
# Prediction (segmentation)
pred = np.zeros(image.shape)
# Pad the image size to be a factor of 16 so that the
# downsample and upsample procedures in the network will
# result in the same image size at each resolution level.
X2, Y2 = int(math.ceil(X / 16.0)) * 16, int(math.ceil(Y / 16.0)) * 16
x_pre, y_pre = int((X2 - X) / 2), int((Y2 - Y) / 2)
x_post, y_post = (X2 - X) - x_pre, (Y2 - Y) - y_pre
image = np.pad(image, ((x_pre, x_post), (y_pre, y_post), (0, 0), (0, 0)), 'constant')
# Process each time frame
for t in range(T):
# Transpose the shape to NXYC
image_fr = image[:, :, :, t]
image_fr = np.transpose(image_fr, axes=(2, 0, 1)).astype(np.float32)
image_fr = np.expand_dims(image_fr, axis=-1)
# Evaluate the network
prob_fr, pred_fr = sess.run(['prob:0', 'pred:0'],
feed_dict={'image:0': image_fr, 'training:0': False})
# Transpose and crop segmentation to recover the original size
pred_fr = np.transpose(pred_fr, axes=(1, 2, 0))
pred_fr = pred_fr[x_pre:x_pre + X, y_pre:y_pre + Y]
pred[:, :, :, t] = pred_fr
seg_time = time.time() - start_seg_time
print(' Segmentation time = {:3f}s'.format(seg_time))
table_time += [seg_time]
processed_list += [data]
# ED frame defaults to be the first time frame.
# Determine ES frame according to the minimum LV volume.
k = {}
k['ED'] = 0
k['ES'] = np.argmin(np.sum(pred == 1, axis=(0, 1, 2)))
print(' ED frame = {:d}, ES frame = {:d}'.format(k['ED'], k['ES']))
# Save the segmentation
if FLAGS.save_seg:
print(' Saving segmentation ...')
nim2 = nib.Nifti1Image(pred, nim.affine)
nim2.header['pixdim'] = nim.header['pixdim']
seg_name = '{0}/seg_sa.nii.gz'.format(data_dir)
nib.save(nim2, seg_name)
for fr in ['ED', 'ES']:
nib.save(nib.Nifti1Image(orig_image[:, :, :, k[fr]], nim.affine),
'{0}/sa_{1}.nii.gz'.format(data_dir, fr))
seg_name = '{0}/seg_sa_{1}.nii.gz'.format(data_dir, fr)
nib.save(nib.Nifti1Image(pred[:, :, :, k[fr]], nim.affine), seg_name)
print('Average segmentation time = {:.3f}s per sequence'.format(np.mean(table_time)))
process_time = time.time() - start_time
print('Including image I/O, CUDA resource allocation, '
'it took {:.3f}s for processing {:d} subjects ({:.3f}s per subjects).'.format(
process_time, len(processed_list), process_time / len(processed_list)))
|
from .signature_help import (
create_signature_help, SignatureHelp, get_documentation,
parse_signature_information, ScopeRenderer, render_signature_label
)
import unittest
signature = {
'label': 'foo_bar(value: int) -> None',
'documentation': {'value': 'The default function for foobaring'},
'parameters': [{
'label': 'value',
'documentation': {
'value': 'A number to foobar on'
}
}]
} # type: dict
signature_overload = {
'label': 'foo_bar(value: int, multiplier: int) -> None',
'documentation': {'value': 'Foobaring with a multiplier'},
'parameters': [{
'label': 'value',
'documentation': {
'value': 'A number to foobar on'
}
}, {
'label': 'multiplier',
'documentation': 'Change foobar to work on larger increments'
}]
} # type: dict
signature_missing_label = {
'documentation': '',
'parameters': [{
'documentation': None,
'label': 'verbose_name'
}, {
'documentation': None,
'label': 'name'
}, {
'documentation': None,
'label': 'primary_key'
}, {
'documentation': None,
'label': 'max_length'
}],
'label': ''
}
signature_information = parse_signature_information(signature)
signature_overload_information = parse_signature_information(signature_overload)
signature_missing_label_information = parse_signature_information(signature_missing_label)
SINGLE_SIGNATURE = """<div class="highlight"><pre>
<entity.name.function>foo_bar
<punctuation>(</punctuation>
<variable.parameter emphasize>value</variable.parameter>: int
<punctuation>)</punctuation> -> None</entity.name.function>
</pre></div>
<p>The default function for foobaring</p>
<p><b>value</b>: A number to foobar on</p>"""
MISSING_LABEL_SIGNATURE = """<div class="highlight"><pre>
<entity.name.function></entity.name.function>
</pre></div>"""
OVERLOADS_FIRST = """**1** of **2** overloads (use the ↑ ↓ keys to navigate):
<div class="highlight"><pre>
<entity.name.function>foo_bar
<punctuation>(</punctuation>
<variable.parameter emphasize>value</variable.parameter>: int
<punctuation>)</punctuation> -> None</entity.name.function>
</pre></div>
<p>The default function for foobaring</p>
<p><b>value</b>: A number to foobar on</p>"""
OVERLOADS_SECOND = """**2** of **2** overloads (use the ↑ ↓ keys to navigate):
<div class="highlight"><pre>
<entity.name.function>foo_bar
<punctuation>(</punctuation>
<variable.parameter emphasize>value</variable.parameter>: int,\
\n<variable.parameter>multiplier</variable.parameter>: int
<punctuation>)</punctuation> -> None</entity.name.function>
</pre></div>
<p>Foobaring with a multiplier</p>
<p><b>value</b>: A number to foobar on</p>"""
OVERLOADS_SECOND_SECOND_PARAMETER = """**2** of **2** overloads (use the ↑ ↓ keys to navigate):
<div class="highlight"><pre>
<entity.name.function>foo_bar
<punctuation>(</punctuation>
<variable.parameter>value</variable.parameter>: int,\
\n<variable.parameter emphasize>multiplier</variable.parameter>: int
<punctuation>)</punctuation> -> None</entity.name.function>
</pre></div>
<p>Foobaring with a multiplier</p>
<p><b>multiplier</b>: Change foobar to work on larger increments</p>"""
JSON_STRINGIFY = """"""
def create_signature(label: str, *param_labels, **kwargs) -> dict:
raw = dict(label=label, parameters=list(dict(label=param_label) for param_label in param_labels))
raw.update(kwargs)
return raw
class TestRenderer(ScopeRenderer):
def function(self, content: str, escape: bool = True) -> str:
return self._wrap_with_scope_style(content, "entity.name.function")
def punctuation(self, content: str) -> str:
return self._wrap_with_scope_style(content, "punctuation")
def parameter(self, content: str, emphasize: bool = False) -> str:
return self._wrap_with_scope_style(content, "variable.parameter", emphasize)
def _wrap_with_scope_style(self, content: str, scope: str, emphasize: bool = False) -> str:
return '\n<{}{}>{}</{}>'.format(scope, " emphasize" if emphasize else "", content, scope)
def markdown(self, content: str) -> str:
return content
renderer = TestRenderer()
class GetDocumentationTests(unittest.TestCase):
def test_absent(self):
self.assertIsNone(get_documentation({}))
def test_is_str(self):
self.assertEqual(get_documentation({'documentation': 'str'}), 'str')
def test_is_dict(self):
self.assertEqual(get_documentation({'documentation': {'value': 'value'}}), 'value')
class CreateSignatureHelpTests(unittest.TestCase):
def test_none(self):
self.assertIsNone(create_signature_help(None))
def test_empty(self):
self.assertIsNone(create_signature_help({}))
def test_default_indices(self):
help = create_signature_help({"signatures": [signature]})
self.assertIsNotNone(help)
if help:
self.assertEqual(help._active_signature_index, 0)
self.assertEqual(help._active_parameter_index, -1)
class RenderSignatureLabelTests(unittest.TestCase):
def test_no_parameters(self):
sig = create_signature("foobar()")
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 0)
self.assertEqual(label, "\n<entity.name.function>foobar()</entity.name.function>")
def test_params(self):
sig = create_signature("foobar(foo, foo)", "foo", "foo", activeParameter=1)
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 1)
self.assertEqual(label, """
<entity.name.function>foobar
<punctuation>(</punctuation>
<variable.parameter>foo</variable.parameter>,\
\n<variable.parameter emphasize>foo</variable.parameter>
<punctuation>)</punctuation></entity.name.function>""")
def test_params_are_substrings(self):
sig = create_signature("foobar(self, foo: str, foo: i32)", "foo", "foo", activeParameter=1)
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 1)
self.assertEqual(label, """
<entity.name.function>foobar
<punctuation>(</punctuation>self,\
\n<variable.parameter>foo</variable.parameter>: str,\
\n<variable.parameter emphasize>foo</variable.parameter>: i32
<punctuation>)</punctuation></entity.name.function>""")
def test_params_are_substrings_before_comma(self):
sig = create_signature("f(x: str, t)", "x", "t")
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 0)
self.assertEqual(label, """
<entity.name.function>f
<punctuation>(</punctuation>\
\n<variable.parameter emphasize>x</variable.parameter>: str,\
\n<variable.parameter>t</variable.parameter>
<punctuation>)</punctuation></entity.name.function>""")
def test_params_with_range(self):
sig = create_signature("foobar(foo, foo)", [7, 10], [12, 15], activeParameter=1)
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 1)
self.assertEqual(label, """
<entity.name.function>foobar
<punctuation>(</punctuation>
<variable.parameter>foo</variable.parameter>,\
\n<variable.parameter emphasize>foo</variable.parameter>
<punctuation>)</punctuation></entity.name.function>""")
def test_params_no_parens(self):
# note: will not work without ranges: first "foo" param will match "foobar"
sig = create_signature("foobar foo foo", [7, 10], [11, 14], activeParameter=1)
help = create_signature_help(dict(signatures=[sig]))
if help:
label = render_signature_label(renderer, help.active_signature(), 1)
self.assertEqual(label, """
<entity.name.function>foobar\
\n<variable.parameter>foo</variable.parameter>\
\n<variable.parameter emphasize>foo</variable.parameter></entity.name.function>""")
class SignatureHelpTests(unittest.TestCase):
def test_single_signature(self):
help = SignatureHelp([signature_information])
self.assertIsNotNone(help)
if help:
content = help.build_popup_content(renderer)
self.assertFalse(help.has_multiple_signatures())
self.assertEqual(content, SINGLE_SIGNATURE)
def test_signature_missing_label(self):
help = SignatureHelp([signature_missing_label_information])
self.assertIsNotNone(help)
if help:
content = help.build_popup_content(renderer)
self.assertFalse(help.has_multiple_signatures())
self.assertEqual(content, MISSING_LABEL_SIGNATURE)
def test_overload(self):
help = SignatureHelp([signature_information, signature_overload_information])
self.assertIsNotNone(help)
if help:
content = help.build_popup_content(renderer)
self.assertTrue(help.has_multiple_signatures())
self.assertEqual(content, OVERLOADS_FIRST)
help.select_signature(1)
help.select_signature(1) # verify we don't go out of bounds,
content = help.build_popup_content(renderer)
self.assertEqual(content, OVERLOADS_SECOND)
def test_active_parameter(self):
help = SignatureHelp([signature_information, signature_overload_information], active_signature=1,
active_parameter=1)
self.assertIsNotNone(help)
if help:
content = help.build_popup_content(renderer)
self.assertTrue(help.has_multiple_signatures())
self.assertEqual(content, OVERLOADS_SECOND_SECOND_PARAMETER)
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore
DB_DEBUG = False
# DATE
CURRENT_DATE = QtCore.QDate.currentDate(QtCore.QDate())
CURRENT_DATETIME = QtCore.QDateTime.currentDateTime(QtCore.QDateTime())
DATE_LEFT_INF = QtCore.QDate(2000, 1, 1)
DATE_RIGHT_INF = QtCore.QDate(2200, 1, 1)
DATETIME_LEFT_INF = QtCore.QDateTime(DATE_LEFT_INF)
DATETIME_RIGHT_INF = QtCore.QDateTime(DATE_RIGHT_INF)
|
# Generated by Django 2.2 on 2020-10-19 07:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_householdappliance'),
]
operations = [
migrations.RenameModel(
old_name='ComputersLaptopsAndSoftware',
new_name='Products',
),
migrations.DeleteModel(
name='HouseholdAppliance',
),
]
|
# Modifications copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original copyright (c) 2017 TU Berlin, Communication Systems Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Data class to store information of a single tracked detection."""
import numpy as np
class Track: # pylint: disable=too-few-public-methods
"""Stores information for each tracked detection.
Args:
track_id (int): Tracking ID of the detection.
bbox (np.ndarray): Bounding box coordinates with (t, l, w, h) format
where (t, l) is the top-left corner, w is the width, and h is the
height.
Attributes:
bbox (np.ndarray): Bounding box coordinates with (t, l, w, h) format
where (t, l) is the top-left corner, w is the width, and h is the
height.
iou_score (float): The Intersection-over-Union value between the
current `bbox` and the immediate previous `bbox`.
lost (int): The number of consecutive frames where this detection is
not found in the video frame.
track_id (int): Tracking ID of the detection.
"""
def __init__(self, track_id: int, bbox: np.ndarray) -> None:
self.track_id = track_id
self.lost = 0
self.update(bbox)
def update(self, bbox: np.ndarray, iou_score: float = 0.0) -> None:
"""Updates the tracking result with information from the latest frame.
Args:
bbox (np.ndarray): Bounding box with format (t, l, w, h) where
(t, l) is the top-left corner, w is the width, and h is the
height.
iou_score (float): Intersection-over-Union between the current
detection bounding box and its last detected bounding box.
"""
self.bbox = bbox
self.iou_score = iou_score
self.lost = 0
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from setuptools import setup, find_packages, Extension
ext_modules = [
Extension(
'enaml.weakmethod',
['enaml/src/weakmethod.cpp'],
language='c++',
),
Extension(
'enaml.callableref',
['enaml/src/callableref.cpp'],
language='c++',
),
Extension(
'enaml.signaling',
['enaml/src/signaling.cpp'],
language='c++',
),
Extension(
'enaml.core.funchelper',
['enaml/src/funchelper.cpp'],
language='c++',
),
Extension(
'enaml.colorext',
['enaml/src/colorext.cpp'],
language='c++',
),
Extension(
'enaml.fontext',
['enaml/src/fontext.cpp'],
language='c++',
)
]
setup(
name='enaml',
version='0.7.0',
author='The Nucleic Development Team',
author_email='sccolbert@gmail.com',
url='https://github.com/nucleic/enaml',
description='Declarative DSL for building rich user interfaces in Python',
long_description=open('README.md').read(),
requires=['atom', 'PyQt', 'ply', 'casuarius'],
install_requires=['distribute'],
packages=find_packages(),
package_data={'enaml.stdlib': ['*.enaml']},
entry_points={'console_scripts': ['enaml-run = enaml.runner:main']},
ext_modules=ext_modules,
)
|
"""
Created By Jivansh Sharma
September 2020
@parzuko
"""
import discord
import os
from get_token import token as TOKEN
from discord.ext import commands
elvis = commands.Bot(command_prefix = ".")
elvis.remove_command("help")
@elvis.event
async def on_ready():
print(f'{elvis.user} has logged in.\nStarting loading')
elvis.load_extension("cogs.weather")
elvis.load_extension("cogs.basic")
elvis.load_extension("cogs.help")
elvis.load_extension("cogs.music")
elvis.run(TOKEN)
|
import yaml
from Utils.utils import Log
from Handlers.data_handler import DataHandler
from Crawlers.naver_news_crawler import NaverNewsCrawler
yaml.warnings({'YAMLLoadWarning': False})
with open("config.yaml", "rt", encoding="utf-8") as stream:
CONFIG = yaml.load(stream)['NewsCrawler']
if __name__ == '__main__':
log = Log(__name__)
data_handler = DataHandler()
naver_crawler = NaverNewsCrawler(data_handler)
if CONFIG['is_input_keywords']:
search_keywords = CONFIG['keywords']
else:
search_keywords = data_handler.get_search_keywords()
if CONFIG['iterate']:
s_date, e_date = data_handler.get_range_search_date()
else:
s_date = CONFIG['start_date']
e_date = CONFIG['end_date']
url = naver_crawler.get_target_url(s_date, e_date)
naver_crawler.execute_crawler(search_keywords, url)
|
#!/bin/env python
# -*- coding: utf-8 -*-
import os,time,datetime,sys
import shutil
import logging
from pathlib import Path
from gphotos.LocalFilesMedia import LocalFilesMedia
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
try:
path = Path("../photostream/photos/2022-03/_dsc0624.jpg")
lf = LocalFilesMedia(path)
logger.info("indexed local file: %s %s %s %s, desc=%s %s",
lf.relative_folder,
lf.filename,
lf.camera_model,
lf.uid,
lf.description,
lf.exif,
)
except Exception:
logger.error("file %s could not be made into a media obj", path, exc_info=True)
# local_file = LocalFilesMedia("")
|
"""
TibLib Package for the implementation of models and algorithms from the MLPatternRecognition polito course
"""
|
# Copyright 2016 Leon Poon and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import res
class TestRes(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_hasattr(self):
self.assertTrue(hasattr(res, 'pydtsxplode'))
def test_readRes(self):
f = res.pydtsxplode.dtsx['Package.dtsx']('rb') # @UndefinedVariable
try:
self.assertTrue(f.read())
finally:
f.close()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import json
import logging
import os
import re
import traceback
from time import sleep
import hypothesis.strategies as hst
import numpy as np
import pytest
from hypothesis import HealthCheck, given, settings
from numpy.testing import assert_allclose, assert_array_equal
from unittest.mock import patch
import qcodes as qc
from qcodes.dataset.data_set import load_by_id
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
from qcodes.dataset.experiment_container import new_experiment
from qcodes.dataset.legacy_import import import_dat_file
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.sqlite.connection import atomic_transaction
from qcodes.instrument.parameter import (ArrayParameter, Parameter,
expand_setpoints_helper)
from qcodes.tests.common import retry_until_does_not_throw, reset_config_on_exit
# pylint: disable=unused-import
from qcodes.tests.test_station import set_default_station_to_none
from qcodes.utils.validators import Arrays
from qcodes.dataset.export_config import DataExportType
def test_log_messages(caplog, meas_with_registered_param):
caplog.set_level(logging.INFO)
with meas_with_registered_param.run():
pass
assert "Set the run_timestamp of run_id" in caplog.text
assert "Starting measurement with guid" in caplog.text
assert "Finished measurement with guid" in caplog.text
def test_log_includes_extra_info(caplog, meas_with_registered_param):
caplog.set_level(logging.INFO)
meas_with_registered_param._extra_log_info = "some extra info"
with meas_with_registered_param.run():
pass
assert "some extra info" in caplog.text
def test_register_parameter_numbers(DAC, DMM):
"""
Test the registration of scalar QCoDeS parameters
"""
parameters = [DAC.ch1, DAC.ch2, DMM.v1, DMM.v2]
not_parameters = ['', 'Parameter', 0, 1.1, Measurement]
meas = Measurement()
for not_a_parameter in not_parameters:
with pytest.raises(ValueError):
meas.register_parameter(not_a_parameter)
my_param = DAC.ch1
meas.register_parameter(my_param)
assert len(meas.parameters) == 1
paramspec = meas.parameters[str(my_param)]
assert paramspec.name == str(my_param)
assert paramspec.label == my_param.label
assert paramspec.unit == my_param.unit
assert paramspec.type == 'numeric'
# we allow the registration of the EXACT same parameter twice...
meas.register_parameter(my_param)
# ... but not a different parameter with a new name
attrs = ['label', 'unit']
vals = ['new label', 'new unit']
for attr, val in zip(attrs, vals):
old_val = getattr(my_param, attr)
setattr(my_param, attr, val)
match = re.escape("Parameter already registered in this Measurement.")
with pytest.raises(ValueError, match=match):
meas.register_parameter(my_param)
setattr(my_param, attr, old_val)
assert len(meas.parameters) == 1
paramspec = meas.parameters[str(my_param)]
assert paramspec.name == str(my_param)
assert paramspec.label == my_param.label
assert paramspec.unit == my_param.unit
assert paramspec.type == 'numeric'
for parameter in parameters:
with pytest.raises(ValueError):
meas.register_parameter(my_param, setpoints=(parameter,))
with pytest.raises(ValueError):
meas.register_parameter(my_param, basis=(parameter,))
meas.register_parameter(DAC.ch2)
meas.register_parameter(DMM.v1)
meas.register_parameter(DMM.v2)
meas.unregister_parameter(my_param)
meas.register_parameter(my_param, basis=(DAC.ch2,),
setpoints=(DMM.v1, DMM.v2))
assert set(meas.parameters.keys()) == {str(DAC.ch2),
str(DMM.v1), str(DMM.v2),
str(my_param)}
paramspec = meas.parameters[str(my_param)]
assert paramspec.name == str(my_param)
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(DAC.ch2, setpoints=(DAC.ch1,))
with pytest.raises(ValueError):
meas.register_parameter(DMM.v1, setpoints=(DAC.ch2,))
def test_register_custom_parameter(DAC):
"""
Test the registration of custom parameters
"""
meas = Measurement()
name = 'V_modified'
unit = 'V^2'
label = 'square of the voltage'
meas.register_custom_parameter(name, label, unit)
assert len(meas.parameters) == 1
assert isinstance(meas.parameters[name], ParamSpecBase)
assert meas.parameters[name].unit == unit
assert meas.parameters[name].label == label
assert meas.parameters[name].type == 'numeric'
newunit = 'V^3'
newlabel = 'cube of the voltage'
meas.unregister_parameter(name)
meas.register_custom_parameter(name, newlabel, newunit)
assert len(meas.parameters) == 1
assert isinstance(meas.parameters[name], ParamSpecBase)
assert meas.parameters[name].unit == newunit
assert meas.parameters[name].label == newlabel
with pytest.raises(ValueError):
meas.register_custom_parameter(name, label, unit,
setpoints=(DAC.ch1,))
with pytest.raises(ValueError):
meas.register_custom_parameter(name, label, unit,
basis=(DAC.ch2,))
meas.register_parameter(DAC.ch1)
meas.register_parameter(DAC.ch2)
meas.register_custom_parameter('strange_dac')
meas.unregister_parameter(name)
meas.register_custom_parameter(name, label, unit,
setpoints=(DAC.ch1, str(DAC.ch2)),
basis=('strange_dac',))
assert len(meas.parameters) == 4
parspec = meas.parameters[name]
with pytest.raises(ValueError):
meas.register_custom_parameter('double dependence',
'label', 'unit', setpoints=(name,))
def test_unregister_parameter(DAC, DMM):
"""
Test the unregistering of parameters.
"""
DAC.add_parameter('impedance',
get_cmd=lambda: 5)
meas = Measurement()
meas.register_parameter(DAC.ch2)
meas.register_parameter(DMM.v1)
meas.register_parameter(DMM.v2)
meas.register_parameter(DAC.ch1, basis=(DMM.v1, DMM.v2),
setpoints=(DAC.ch2,))
with pytest.raises(ValueError):
meas.unregister_parameter(DAC.ch2)
with pytest.raises(ValueError):
meas.unregister_parameter(str(DAC.ch2))
with pytest.raises(ValueError):
meas.unregister_parameter(DMM.v1)
with pytest.raises(ValueError):
meas.unregister_parameter(DMM.v2)
meas.unregister_parameter(DAC.ch1)
assert set(meas.parameters.keys()) == {str(DAC.ch2), str(DMM.v1),
str(DMM.v2)}
meas.unregister_parameter(DAC.ch2)
assert set(meas.parameters.keys()) == {str(DMM.v1), str(DMM.v2)}
not_parameters = [DAC, DMM, 0.0, 1]
for notparam in not_parameters:
with pytest.raises(ValueError):
meas.unregister_parameter(notparam)
# unregistering something not registered should silently "succeed"
meas.unregister_parameter('totes_not_registered')
meas.unregister_parameter(DAC.ch2)
meas.unregister_parameter(DAC.ch2)
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
def test_mixing_array_and_numeric(DAC, bg_writing):
"""
Test that mixing array and numeric types is okay
"""
meas = Measurement()
meas.register_parameter(DAC.ch1, paramtype='numeric')
meas.register_parameter(DAC.ch2, paramtype='array')
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((DAC.ch1, np.array([DAC.ch1(), DAC.ch1()])),
(DAC.ch2, np.array([DAC.ch2(), DAC.ch1()])))
def test_measurement_name_default(experiment, DAC, DMM):
fmt = experiment.format_string
exp_id = experiment.exp_id
default_name = 'results'
meas = Measurement()
assert meas.name == ''
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=[DAC.ch1])
with meas.run() as datasaver:
run_id = datasaver.run_id
expected_name = fmt.format(default_name, exp_id, run_id)
assert datasaver.dataset.table_name == expected_name
assert datasaver.dataset.name == default_name
def test_measurement_name_changed_via_attribute(experiment, DAC, DMM):
fmt = experiment.format_string
exp_id = experiment.exp_id
name = 'yolo'
meas = Measurement()
meas.name = name
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=[DAC.ch1])
with meas.run() as datasaver:
run_id = datasaver.run_id
expected_name = fmt.format('results', exp_id, run_id)
assert datasaver.dataset.table_name == expected_name
assert datasaver.dataset.name == name
def test_measurement_name_set_as_argument(experiment, DAC, DMM):
fmt = experiment.format_string
exp_id = experiment.exp_id
name = 'yolo'
meas = Measurement(name=name, exp=experiment)
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=[DAC.ch1])
with meas.run() as datasaver:
run_id = datasaver.run_id
expected_name = fmt.format('results', exp_id, run_id)
assert datasaver.dataset.table_name == expected_name
assert datasaver.dataset.name == name
@settings(deadline=None)
@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False),
hst.text()))
@pytest.mark.usefixtures("empty_temp_db")
def test_setting_write_period(wp):
new_experiment('firstexp', sample_name='no sample')
meas = Measurement()
meas.register_custom_parameter(name='dummy')
if isinstance(wp, str):
with pytest.raises(ValueError):
meas.write_period = wp
elif wp < 1e-3:
with pytest.raises(ValueError):
meas.write_period = wp
else:
meas.write_period = wp
assert meas._write_period == float(wp)
with meas.run() as datasaver:
assert datasaver.write_period == float(wp)
@settings(deadline=None)
@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False),
hst.text()))
@pytest.mark.usefixtures("experiment")
def test_setting_write_period_from_config(wp):
with reset_config_on_exit():
qc.config.dataset.write_period = wp
if isinstance(wp, str):
with pytest.raises(ValueError):
Measurement()
elif wp < 1e-3:
with pytest.raises(ValueError):
Measurement()
else:
meas = Measurement()
assert meas.write_period == float(wp)
meas.register_custom_parameter(name='dummy')
with meas.run() as datasaver:
assert datasaver.write_period == float(wp)
@pytest.mark.parametrize("write_in_background", [True, False])
@pytest.mark.usefixtures("experiment")
def test_setting_write_in_background_from_config(write_in_background):
with reset_config_on_exit():
qc.config.dataset.write_in_background = write_in_background
meas = Measurement()
meas.register_custom_parameter(name='dummy')
with meas.run() as datasaver:
assert datasaver.dataset._writer_status.write_in_background is write_in_background
@pytest.mark.usefixtures("experiment")
def test_method_chaining(DAC):
meas = (
Measurement()
.register_parameter(DAC.ch1)
.register_custom_parameter(name='freqax',
label='Frequency axis',
unit='Hz')
.add_before_run((lambda: None), ())
.add_after_run((lambda: None), ())
.add_subscriber((lambda values, idx, state: None), state=[])
)
@pytest.mark.usefixtures("experiment")
@settings(deadline=None, suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(words=hst.lists(elements=hst.text(), min_size=4, max_size=10))
def test_enter_and_exit_actions(DAC, words):
# we use a list to check that the functions executed
# in the correct order
def action(lst, word):
lst.append(word)
meas = Measurement()
meas.register_parameter(DAC.ch1)
testlist = []
splitpoint = round(len(words) / 2)
for n in range(splitpoint):
meas.add_before_run(action, (testlist, words[n]))
for m in range(splitpoint, len(words)):
meas.add_after_run(action, (testlist, words[m]))
assert len(meas.enteractions) == splitpoint
assert len(meas.exitactions) == len(words) - splitpoint
with meas.run() as _:
assert testlist == words[:splitpoint]
assert testlist == words
meas = Measurement()
with pytest.raises(ValueError):
meas.add_before_run(action, 'no list!')
with pytest.raises(ValueError):
meas.add_after_run(action, testlist)
def test_subscriptions(experiment, DAC, DMM):
"""
Test that subscribers are called at the moment the data is flushed to
database
Note that for the purpose of this test, flush_data_to_database method is
called explicitly instead of waiting for the data to be flushed
automatically after the write_period passes after a add_result call.
"""
def collect_all_results(results, length, state):
"""
Updates the *state* to contain all the *results* acquired
during the experiment run
"""
# Due to the fact that by default subscribers only hold 1 data value
# in their internal queue, this assignment should work (i.e. not
# overwrite values in the "state" object) assuming that at the start
# of the experiment both the dataset and the *state* objects have
# the same length.
state[length] = results
def collect_values_larger_than_7(results, length, state):
"""
Appends to the *state* only the values from *results*
that are larger than 7
"""
for result_tuple in results:
state += [value for value in result_tuple if value > 7]
meas = Measurement(exp=experiment)
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))
# key is the number of the result tuple,
# value is the result tuple itself
all_results_dict = {}
values_larger_than_7 = []
meas.add_subscriber(collect_all_results, state=all_results_dict)
assert len(meas.subscribers) == 1
meas.add_subscriber(collect_values_larger_than_7,
state=values_larger_than_7)
assert len(meas.subscribers) == 2
meas.write_period = 0.2
with meas.run() as datasaver:
# Assert that the measurement, runner, and datasaver
# have added subscribers to the dataset
assert len(datasaver._dataset.subscribers) == 2
assert all_results_dict == {}
assert values_larger_than_7 == []
dac_vals_and_dmm_vals = list(zip(range(5), range(3, 8)))
values_larger_than_7__expected = []
for num in range(5):
(dac_val, dmm_val) = dac_vals_and_dmm_vals[num]
values_larger_than_7__expected += \
[val for val in (dac_val, dmm_val) if val > 7]
datasaver.add_result((DAC.ch1, dac_val), (DMM.v1, dmm_val))
# Ensure that data is flushed to the database despite the write
# period, so that the database triggers are executed, which in turn
# add data to the queues within the subscribers
datasaver.flush_data_to_database()
# In order to make this test deterministic, we need to ensure that
# just enough time has passed between the moment the data is
# flushed to database and the "state" object (that is passed to
# subscriber constructor) has been updated by the corresponding
# subscriber's callback function. At the moment, there is no robust
# way to ensure this. The reason is that the subscribers have
# internal queue which is populated via a trigger call from the SQL
# database, hence from this "main" thread it is difficult to say
# whether the queue is empty because the subscriber callbacks have
# already been executed or because the triggers of the SQL database
# has not been executed yet.
#
# In order to overcome this problem, a special decorator is used to
# wrap the assertions. This is going to ensure that some time is
# given to the Subscriber threads to finish exhausting the queue.
@retry_until_does_not_throw(
exception_class_to_expect=AssertionError, delay=0.5, tries=20)
def assert_states_updated_from_callbacks():
assert values_larger_than_7 == values_larger_than_7__expected
assert list(all_results_dict.keys()) == \
[result_index for result_index in range(1, num + 1 + 1)]
assert_states_updated_from_callbacks()
# Ensure that after exiting the "run()" context,
# all subscribers get unsubscribed from the dataset
assert len(datasaver._dataset.subscribers) == 0
# Ensure that the triggers for each subscriber
# have been removed from the database
get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';"
triggers = atomic_transaction(
datasaver._dataset.conn, get_triggers_sql).fetchall()
assert len(triggers) == 0
def test_subscribers_called_at_exiting_context_if_queue_is_not_empty(experiment,
DAC):
"""
Upon quitting the "run()" context, verify that in case the queue is
not empty, the subscriber's callback is still called on that data.
This situation is created by setting the minimum length of the queue
to a number that is larger than the number of value written to the dataset.
"""
def collect_x_vals(results, length, state):
"""
Collects first elements of results tuples in *state*
"""
index_of_x = 0
state += [res[index_of_x] for res in results]
meas = Measurement(exp=experiment)
meas.register_parameter(DAC.ch1)
collected_x_vals = []
meas.add_subscriber(collect_x_vals, state=collected_x_vals)
given_x_vals = [0, 1, 2, 3]
with meas.run() as datasaver:
# Set the minimum queue size of the subscriber to more that
# the total number of values being added to the dataset;
# this way the subscriber callback is not called before
# we exit the "run()" context.
subscriber = list(datasaver.dataset.subscribers.values())[0]
subscriber.min_queue_length = int(len(given_x_vals) + 1)
for x in given_x_vals:
datasaver.add_result((DAC.ch1, x))
# Verify that the subscriber callback is not called yet
assert collected_x_vals == []
# Verify that the subscriber callback is finally called
assert collected_x_vals == given_x_vals
@pytest.mark.serial
@pytest.mark.flaky(reruns=5)
@settings(deadline=None, max_examples=25,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=2000, max_value=3000))
def test_subscribers_called_for_all_data_points(experiment, DAC, DMM, N):
def sub_get_x_vals(results, length, state):
"""
A list of all x values
"""
state += [res[0] for res in results]
def sub_get_y_vals(results, length, state):
"""
A list of all y values
"""
state += [res[1] for res in results]
meas = Measurement(exp=experiment)
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))
xvals = []
yvals = []
meas.add_subscriber(sub_get_x_vals, state=xvals)
meas.add_subscriber(sub_get_y_vals, state=yvals)
given_xvals = range(N)
given_yvals = range(1, N + 1)
with meas.run() as datasaver:
for x, y in zip(given_xvals, given_yvals):
datasaver.add_result((DAC.ch1, x), (DMM.v1, y))
assert xvals == list(given_xvals)
assert yvals == list(given_yvals)
# There is no way around it: this test is slow. We test that write_period
# works and hence we must wait for some time to elapse. Sorry.
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(breakpoint=hst.integers(min_value=1, max_value=19),
write_period=hst.floats(min_value=0.1, max_value=1.5),
set_values=hst.lists(elements=hst.floats(), min_size=20, max_size=20),
get_values=hst.lists(elements=hst.floats(), min_size=20, max_size=20))
@pytest.mark.usefixtures('set_default_station_to_none')
def test_datasaver_scalars(experiment, DAC, DMM, set_values, get_values,
breakpoint, write_period):
no_of_runs = len(experiment)
station = qc.Station(DAC, DMM)
meas = Measurement(station=station)
meas.write_period = write_period
assert meas.write_period == write_period
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))
with meas.run() as datasaver:
for set_v, get_v in zip(set_values[:breakpoint],
get_values[:breakpoint]):
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
assert datasaver._dataset.number_of_results == 0
sleep(write_period * 1.1)
datasaver.add_result((DAC.ch1, set_values[breakpoint]),
(DMM.v1, get_values[breakpoint]))
assert datasaver.points_written == breakpoint + 1
assert datasaver.run_id == no_of_runs + 1
with meas.run() as datasaver:
with pytest.raises(ValueError):
datasaver.add_result((DAC.ch2, 1), (DAC.ch2, 2))
with pytest.raises(ValueError):
datasaver.add_result((DMM.v1, 0))
# More assertions of setpoints, labels and units in the DB!
@pytest.mark.usefixtures('set_default_station_to_none')
def test_datasaver_inst_metadata(experiment, DAC_with_metadata, DMM):
"""
Check that additional instrument metadata is captured into the dataset snapshot
"""
station = qc.Station(DAC_with_metadata, DMM)
meas = Measurement(station=station)
meas.register_parameter(DAC_with_metadata.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC_with_metadata.ch1,))
with meas.run() as datasaver:
for set_v in range(10):
DAC_with_metadata.ch1.set(set_v)
datasaver.add_result((DAC_with_metadata.ch1, set_v), (DMM.v1, DMM.v1.get()))
station_snapshot = datasaver.dataset.snapshot['station']
assert station_snapshot['instruments']['dummy_dac']['metadata'] == {"dac": "metadata"}
def test_exception_happened_during_measurement_is_stored_in_dataset_metadata(
experiment):
meas = Measurement()
meas.register_custom_parameter(name='nodata')
class SomeMeasurementException(Exception):
pass
# `pytest.raises`` is used here instead of custom try-except for convenience
with pytest.raises(SomeMeasurementException, match='foo') as e:
with meas.run() as datasaver:
dataset = datasaver.dataset
raise SomeMeasurementException('foo')
metadata = dataset.metadata
assert "measurement_exception" in metadata
expected_exception_string = "".join(
traceback.format_exception(e.type, e.value, e.tb))
exception_string = metadata["measurement_exception"]
assert exception_string == expected_exception_string
@pytest.mark.parametrize("bg_writing", [True, False])
@settings(max_examples=10, deadline=None)
@given(N=hst.integers(min_value=2, max_value=500))
@pytest.mark.usefixtures("empty_temp_db")
def test_datasaver_arrays_lists_tuples(bg_writing, N):
new_experiment('firstexp', sample_name='no sample')
meas = Measurement()
meas.register_custom_parameter(name='freqax',
label='Frequency axis',
unit='Hz')
meas.register_custom_parameter(name='signal',
label='qubit signal',
unit='Majorana number',
setpoints=('freqax',))
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = np.linspace(1e6, 2e6, N)
signal = np.random.randn(N)
datasaver.add_result(('freqax', freqax), ('signal', signal))
assert datasaver.points_written == N
assert not(datasaver.dataset.conn.atomic_in_progress)
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = np.linspace(1e6, 2e6, N)
signal = np.random.randn(N - 1)
with pytest.raises(ValueError):
datasaver.add_result(('freqax', freqax), ('signal', signal))
meas.register_custom_parameter(name='gate_voltage',
label='Gate tuning potential',
unit='V')
meas.unregister_parameter('signal')
meas.register_custom_parameter(name='signal',
label='qubit signal',
unit='Majorana flux',
setpoints=('freqax', 'gate_voltage'))
# save arrays
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = np.linspace(1e6, 2e6, N)
signal = np.random.randn(N)
datasaver.add_result(('freqax', freqax),
('signal', signal),
('gate_voltage', 0))
assert datasaver.points_written == N
assert not(datasaver.dataset.conn.atomic_in_progress)
# save lists
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = list(np.linspace(1e6, 2e6, N))
signal = list(np.random.randn(N))
datasaver.add_result(('freqax', freqax),
('signal', signal),
('gate_voltage', 0))
assert datasaver.points_written == N
# save tuples
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = tuple(np.linspace(1e6, 2e6, N))
signal = tuple(np.random.randn(N))
datasaver.add_result(('freqax', freqax),
('signal', signal),
('gate_voltage', 0))
assert datasaver.points_written == N
@pytest.mark.parametrize("bg_writing", [True, False])
@settings(max_examples=10, deadline=None)
@given(N=hst.integers(min_value=2, max_value=500))
@pytest.mark.usefixtures("empty_temp_db")
def test_datasaver_numeric_and_array_paramtype(bg_writing, N):
"""
Test saving one parameter with 'numeric' paramtype and one parameter with
'array' paramtype
"""
new_experiment('firstexp', sample_name='no sample')
meas = Measurement()
meas.register_custom_parameter(name='numeric_1',
label='Magnetic field',
unit='T',
paramtype='numeric')
meas.register_custom_parameter(name='array_1',
label='Alazar signal',
unit='V',
paramtype='array',
setpoints=('numeric_1',))
signal = np.random.randn(113)
with meas.run(bg_writing) as datasaver:
datasaver.add_result(('numeric_1', 3.75), ('array_1', signal))
assert datasaver.points_written == 1
data = datasaver.dataset.get_parameter_data(
*datasaver.dataset.parameters.split(','))
assert (data['numeric_1']['numeric_1'] == np.array([3.75])).all()
assert np.allclose(data['array_1']['array_1'], signal)
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("empty_temp_db")
def test_datasaver_numeric_after_array_paramtype(bg_writing):
"""
Test that passing values for 'array' parameter in `add_result` before
passing values for 'numeric' parameter works.
"""
new_experiment('firstexp', sample_name='no sample')
meas = Measurement()
meas.register_custom_parameter(name='numeric_1',
label='Magnetic field',
unit='T',
paramtype='numeric')
meas.register_custom_parameter(name='array_1',
label='Alazar signal',
unit='V',
paramtype='array',
setpoints=('numeric_1',))
signal = np.random.randn(113)
with meas.run(write_in_background=bg_writing) as datasaver:
# it is important that first comes the 'array' data and then 'numeric'
datasaver.add_result(('array_1', signal), ('numeric_1', 3.75))
assert datasaver.points_written == 1
data = datasaver.dataset.get_parameter_data(
*datasaver.dataset.parameters.split(','))
assert (data['numeric_1']['numeric_1'] == np.array([3.75])).all()
assert np.allclose(data['array_1']['array_1'], signal)
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_foul_input(bg_writing):
meas = Measurement()
meas.register_custom_parameter('foul',
label='something unnatural',
unit='Fahrenheit')
foul_stuff = [qc.Parameter('foul'), {1, 2, 3}]
with meas.run(bg_writing) as datasaver:
for ft in foul_stuff:
with pytest.raises(ValueError):
datasaver.add_result(('foul', ft))
@settings(max_examples=10, deadline=None)
@given(N=hst.integers(min_value=2, max_value=500))
@pytest.mark.usefixtures("empty_temp_db")
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
def test_datasaver_unsized_arrays(N, storage_type, bg_writing):
new_experiment('firstexp', sample_name='no sample')
meas = Measurement()
meas.register_custom_parameter(name='freqax',
label='Frequency axis',
unit='Hz',
paramtype=storage_type)
meas.register_custom_parameter(name='signal',
label='qubit signal',
unit='Majorana number',
setpoints=('freqax',),
paramtype=storage_type)
# note that np.array(some_number) is not the same as the number
# its also not an array with a shape. Check here that we handle it
# correctly
with meas.run(write_in_background=bg_writing) as datasaver:
freqax = np.linspace(1e6, 2e6, N)
np.random.seed(0)
signal = np.random.randn(N)
for i in range(N):
myfreq = np.array(freqax[i])
assert myfreq.shape == ()
mysignal = np.array(signal[i])
assert mysignal.shape == ()
datasaver.add_result(('freqax', myfreq), ('signal', mysignal))
assert datasaver.points_written == N
loaded_data = datasaver.dataset.get_parameter_data()['signal']
np.random.seed(0)
expected_signal = np.random.randn(N)
expected_freqax = np.linspace(1e6, 2e6, N)
if storage_type == 'array':
expected_freqax = expected_freqax.reshape((N, 1))
expected_signal = expected_signal.reshape((N, 1))
assert_allclose(loaded_data['freqax'], expected_freqax)
assert_allclose(loaded_data['signal'], expected_signal)
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=6),
M=hst.integers(min_value=4, max_value=5),
seed=hst.integers(min_value=0, max_value=np.iinfo(np.uint32).max))
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("param_type", ['np_array', 'tuple', 'list'])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
def test_datasaver_arrayparams(SpectrumAnalyzer, DAC, N, M,
param_type, storage_type,
seed, bg_writing):
"""
test that data is stored correctly for array parameters that
return numpy arrays, lists and tuples. Stored both as arrays and
numeric
"""
if param_type == 'list':
spectrum = SpectrumAnalyzer.listspectrum
spectrum_name = 'dummy_SA_listspectrum'
elif param_type == 'tuple':
spectrum = SpectrumAnalyzer.tuplespectrum
spectrum_name = 'dummy_SA_tuplespectrum'
elif param_type == 'np_array':
spectrum = SpectrumAnalyzer.spectrum
spectrum_name = 'dummy_SA_spectrum'
else:
raise RuntimeError("Invalid storage_type")
meas = Measurement()
meas.register_parameter(spectrum, paramtype=storage_type)
assert len(meas.parameters) == 2
setpoint_paramspec = meas.parameters['dummy_SA_Frequency']
spectrum_paramspec = meas.parameters[str(spectrum)]
assert setpoint_paramspec in meas._interdeps.dependencies[spectrum_paramspec]
assert meas.parameters[str(spectrum)].type == storage_type
assert meas.parameters['dummy_SA_Frequency'].type == storage_type
# Now for a real measurement
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(spectrum, setpoints=[DAC.ch1], paramtype=storage_type)
assert len(meas.parameters) == 3
spectrum.npts = M
np.random.seed(seed)
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in np.linspace(0, 0.01, N):
datasaver.add_result((DAC.ch1, set_v),
(spectrum, spectrum.get()))
if storage_type == 'numeric':
assert datasaver.points_written == N * M
elif storage_type == 'array':
assert datasaver.points_written == N
np.random.seed(seed)
expected_dac_data = np.repeat(np.linspace(0, 0.01, N), M)
expected_freq_axis = np.tile(spectrum.setpoints[0], N)
expected_output = np.array([spectrum.get() for _ in range(N)]).reshape(
N * M)
if storage_type == 'array':
expected_dac_data = expected_dac_data.reshape(N, M)
expected_freq_axis = expected_freq_axis.reshape(N, M)
expected_output = expected_output.reshape(N, M)
data = datasaver.dataset.get_parameter_data()[spectrum_name]
assert_allclose(data['dummy_dac_ch1'], expected_dac_data)
assert_allclose(data['dummy_SA_Frequency'], expected_freq_axis)
assert_allclose(data[spectrum_name], expected_output)
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=500))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_array_parameters_channel(channel_array_instrument,
DAC, N, storage_type,
bg_writing):
meas = Measurement()
array_param = channel_array_instrument.A.dummy_array_parameter
meas.register_parameter(array_param, paramtype=storage_type)
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint'
dep_paramspec = meas.parameters[dependency_name]
array_paramspec = meas.parameters[str(array_param)]
assert dep_paramspec in meas._interdeps.dependencies[array_paramspec]
assert meas.parameters[str(array_param)].type == storage_type
assert meas.parameters[dependency_name].type == storage_type
# Now for a real measurement
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype=storage_type)
assert len(meas.parameters) == 3
M = array_param.shape[0]
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in np.linspace(0, 0.01, N):
datasaver.add_result((DAC.ch1, set_v),
(array_param, array_param.get()))
if storage_type == 'numeric':
n_points_written_expected = N * M
elif storage_type == 'array':
n_points_written_expected = N
assert datasaver.points_written == n_points_written_expected
expected_params = ('dummy_dac_ch1',
dependency_name,
'dummy_channel_inst_ChanA_dummy_array_parameter')
ds = load_by_id(datasaver.run_id)
loaded_data = ds.get_parameter_data()['dummy_channel_inst_ChanA_dummy_array_parameter']
for param in expected_params:
if storage_type == 'array':
expected_shape = (N, M)
else:
expected_shape = (N*M, )
assert loaded_data[param].shape == expected_shape
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n=hst.integers(min_value=5, max_value=500))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints(channel_array_instrument,
DAC, n, storage_type,
bg_writing):
random_seed = 1
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints
chan.dummy_n_points(n)
chan.dummy_start(0)
chan.dummy_stop(100)
meas = Measurement()
meas.register_parameter(param, paramtype=storage_type)
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
dep_ps = meas.parameters[dependency_name]
param_ps = meas.parameters[str(param)]
assert dep_ps in meas._interdeps.dependencies[param_ps]
assert meas.parameters[str(param)].type == storage_type
assert meas.parameters[dependency_name].type == storage_type
# Now for a real measurement
with meas.run(write_in_background=bg_writing) as datasaver:
# we seed the random number generator
# so we can test that we get the expected numbers
np.random.seed(random_seed)
datasaver.add_result((param, param.get()))
if storage_type == 'numeric':
expected_points_written = n
elif storage_type == 'array':
expected_points_written = 1
assert datasaver.points_written == expected_points_written
expected_params = (dependency_name,
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints')
ds = load_by_id(datasaver.run_id)
loaded_data = ds.get_parameter_data()
for param in expected_params:
data = loaded_data['dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'][param]
if storage_type == 'array':
assert data.shape == (expected_points_written, n)
else:
assert data.shape == (expected_points_written,)
assert len(loaded_data) == 1
subdata = loaded_data[
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints']
expected_dep_data = np.linspace(chan.dummy_start(),
chan.dummy_stop(),
chan.dummy_n_points())
np.random.seed(random_seed)
expected_data = np.random.rand(n)
if storage_type == 'array':
expected_dep_data = expected_dep_data.reshape((1,
chan.dummy_n_points()))
expected_data = expected_data.reshape((1, chan.dummy_n_points()))
assert_allclose(subdata[dependency_name], expected_dep_data)
assert_allclose(subdata['dummy_channel_inst_ChanA_'
'dummy_parameter_with_setpoints'],
expected_data)
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n=hst.integers(min_value=5, max_value=500))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_explicitly_expanded(channel_array_instrument,
DAC, n, storage_type,
bg_writing):
random_seed = 1
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints
chan.dummy_n_points(n)
chan.dummy_start(0)
chan.dummy_stop(100)
meas = Measurement()
meas.register_parameter(param, paramtype=storage_type)
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
dep_ps = meas.parameters[dependency_name]
param_ps = meas.parameters[str(param)]
assert dep_ps in meas._interdeps.dependencies[param_ps]
assert meas.parameters[str(param)].type == storage_type
assert meas.parameters[dependency_name].type == storage_type
# Now for a real measurement
with meas.run(write_in_background=bg_writing) as datasaver:
# we seed the random number generator
# so we can test that we get the expected numbers
np.random.seed(random_seed)
datasaver.add_result(*expand_setpoints_helper(param))
if storage_type == 'numeric':
expected_points_written = n
elif storage_type == 'array':
expected_points_written = 1
assert datasaver.points_written == expected_points_written
expected_params = (dependency_name,
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints')
ds = load_by_id(datasaver.run_id)
loaded_data = ds.get_parameter_data()
for param in expected_params:
data = loaded_data['dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'][param]
if storage_type == 'array':
assert data.shape == (expected_points_written, n)
else:
assert data.shape == (expected_points_written,)
assert len(loaded_data) == 1
subdata = loaded_data[
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints']
expected_dep_data = np.linspace(chan.dummy_start(),
chan.dummy_stop(),
chan.dummy_n_points())
np.random.seed(random_seed)
expected_data = np.random.rand(n)
if storage_type == 'array':
expected_dep_data = expected_dep_data.reshape((1,
chan.dummy_n_points()))
expected_data = expected_data.reshape((1, chan.dummy_n_points()))
assert_allclose(subdata[dependency_name], expected_dep_data)
assert_allclose(subdata['dummy_channel_inst_ChanA_'
'dummy_parameter_with_setpoints'],
expected_data)
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_partially_expanded_raises(channel_array_instrument, DAC):
random_seed = 1
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints_2d
chan.dummy_n_points(10)
chan.dummy_n_points_2(20)
chan.dummy_start(0)
chan.dummy_stop(100)
chan.dummy_start_2(5)
chan.dummy_stop_2(7)
meas = Measurement()
meas.register_parameter(param)
sp_param_1 = chan.dummy_sp_axis
assert len(meas.parameters) == 3
dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
dep_ps = meas.parameters[dependency_name]
param_ps = meas.parameters[str(param)]
assert dep_ps in meas._interdeps.dependencies[param_ps]
with meas.run() as datasaver:
# we seed the random number generator
# so we can test that we get the expected numbers
np.random.seed(random_seed)
with pytest.raises(ValueError, match="Some of the setpoints of"):
datasaver.add_result((param, param.get()),
(sp_param_1, sp_param_1.get()))
@pytest.mark.parametrize("bg_writing", [True, False])
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n=hst.integers(min_value=5, max_value=500))
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_complex(channel_array_instrument,
DAC, n,
bg_writing):
random_seed = 1
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints_complex
chan.dummy_n_points(n)
chan.dummy_start(0)
chan.dummy_stop(100)
meas = Measurement()
meas.register_parameter(param, paramtype='array')
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
dependent_parameter = meas.parameters[str(param)]
indepdendent_parameter = meas.parameters[dependency_name]
assert meas._interdeps.dependencies[dependent_parameter] \
== (indepdendent_parameter, )
assert dependent_parameter.type == 'array'
assert indepdendent_parameter.type == 'array'
# Now for a real measurement
with meas.run(write_in_background=bg_writing) as datasaver:
# we seed the random number generator
# so we can test that we get the expected numbers
np.random.seed(random_seed)
datasaver.add_result((param, param.get()))
assert datasaver.points_written == 1
ds = load_by_id(datasaver.run_id)
datadict = ds.get_parameter_data()
assert len(datadict) == 1
subdata = datadict[
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex']
assert_allclose(subdata[dependency_name],
np.linspace(chan.dummy_start(),
chan.dummy_stop(),
chan.dummy_n_points()).reshape(1, chan.dummy_n_points()))
np.random.seed(random_seed)
assert_allclose(subdata['dummy_channel_inst_ChanA_'
'dummy_parameter_with_setpoints_complex'],
(np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points()))
@pytest.mark.parametrize("bg_writing", [True, False])
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n=hst.integers(min_value=5, max_value=500))
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_complex_explicitly_expanded(channel_array_instrument,
DAC, n,
bg_writing):
random_seed = 1
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints_complex
chan.dummy_n_points(n)
chan.dummy_start(0)
chan.dummy_stop(100)
meas = Measurement()
meas.register_parameter(param, paramtype='array')
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
dependent_parameter = meas.parameters[str(param)]
indepdendent_parameter = meas.parameters[dependency_name]
assert meas._interdeps.dependencies[dependent_parameter] \
== (indepdendent_parameter, )
assert dependent_parameter.type == 'array'
assert indepdendent_parameter.type == 'array'
# Now for a real measurement
with meas.run(write_in_background=bg_writing) as datasaver:
# we seed the random number generator
# so we can test that we get the expected numbers
np.random.seed(random_seed)
datasaver.add_result(*expand_setpoints_helper(param))
assert datasaver.points_written == 1
ds = load_by_id(datasaver.run_id)
datadict = ds.get_parameter_data()
assert len(datadict) == 1
subdata = datadict[
'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex']
assert_allclose(subdata[dependency_name],
np.linspace(chan.dummy_start(),
chan.dummy_stop(),
chan.dummy_n_points()).reshape(1, chan.dummy_n_points()))
np.random.seed(random_seed)
assert_allclose(subdata['dummy_channel_inst_ChanA_'
'dummy_parameter_with_setpoints_complex'],
(np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points()))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_missing_reg_raises(
channel_array_instrument,
DAC, storage_type,
bg_writing):
"""
Test that if for whatever reason new setpoints are added after
registering but before adding this raises correctly
"""
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints
chan.dummy_n_points(11)
chan.dummy_start(0)
chan.dummy_stop(10)
old_setpoints = param.setpoints
param.setpoints = ()
meas = Measurement()
meas.register_parameter(param, paramtype=storage_type)
param.setpoints = old_setpoints
with meas.run(write_in_background=bg_writing) as datasaver:
sp_param_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
match = re.escape('Can not add result for parameter '
f'{sp_param_name}, no such parameter registered '
'with this measurement.')
with pytest.raises(ValueError, match=match):
datasaver.add_result(*expand_setpoints_helper(param))
with meas.run(write_in_background=bg_writing) as datasaver:
sp_param_name = 'dummy_channel_inst_ChanA_dummy_sp_axis'
match = re.escape('Can not add result for parameter '
f'{sp_param_name}, no such parameter registered '
'with this measurement.')
with pytest.raises(ValueError, match=match):
datasaver.add_result((param, param.get()))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_reg_but_missing_validator(
channel_array_instrument,
DAC, storage_type,
bg_writing):
"""
Test that if for whatever reason the setpoints are removed between
registering and adding this raises correctly. This tests tests that
the parameter validator correctly asserts this.
"""
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints
chan.dummy_n_points(11)
chan.dummy_start(0)
chan.dummy_stop(10)
meas = Measurement()
meas.register_parameter(param, paramtype=storage_type)
param.setpoints = ()
with meas.run(write_in_background=bg_writing) as datasaver:
with pytest.raises(ValueError, match=r"Shape of output is not"
r" consistent with setpoints."
r" Output is shape "
r"\(<qcodes.instrument.parameter."
r"Parameter: dummy_n_points at "
r"[0-9]+>,\) and setpoints are "
r"shape \(\)', 'getting dummy_"
r"channel_inst_ChanA_dummy_"
r"parameter_with_setpoints"):
datasaver.add_result(*expand_setpoints_helper(param))
with meas.run(write_in_background=bg_writing) as datasaver:
with pytest.raises(ValueError, match=r"Shape of output is not"
r" consistent with setpoints."
r" Output is shape "
r"\(<qcodes.instrument.parameter."
r"Parameter: dummy_n_points at "
r"[0-9]+>,\) and setpoints are "
r"shape \(\)', 'getting dummy_"
r"channel_inst_ChanA_dummy_"
r"parameter_with_setpoints"):
datasaver.add_result((param, param.get()))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_parameter_with_setpoints_reg_but_missing(
channel_array_instrument,
DAC, storage_type,
bg_writing):
"""
Test that if for whatever reason the setpoints of a QCoDeS parameter are
removed between registering that parameter with the Measurement and adding
results for those setpoints, then then datasaver correctly raises.
"""
chan = channel_array_instrument.A
param = chan.dummy_parameter_with_setpoints
chan.dummy_n_points(11)
chan.dummy_start(0)
chan.dummy_stop(10)
someparam = Parameter('someparam', vals=Arrays(shape=(10,)))
old_setpoints = param.setpoints
param.setpoints = (old_setpoints[0], someparam)
meas = Measurement()
meas.register_parameter(param, paramtype=storage_type)
param.setpoints = old_setpoints
with meas.run(write_in_background=bg_writing) as datasaver:
match = re.escape('Can not add result, some required parameters '
'are missing.')
with pytest.raises(ValueError, match=match):
datasaver.add_result(*expand_setpoints_helper(param))
with meas.run(write_in_background=bg_writing) as datasaver:
match = re.escape('Can not add result, some required parameters '
'are missing.')
with pytest.raises(ValueError, match=match):
datasaver.add_result((param, param.get()))
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=500))
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_array_parameters_array(channel_array_instrument, DAC, N,
storage_type,
bg_writing):
"""
Test that storing array parameters inside a loop works as expected
"""
meas = Measurement()
array_param = channel_array_instrument.A.dummy_array_parameter
meas.register_parameter(array_param, paramtype=storage_type)
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint'
dependency_ps = meas.parameters[dependency_name]
array_param_ps = meas.parameters[str(array_param)]
assert dependency_ps in meas._interdeps.dependencies[array_param_ps]
assert meas.parameters[str(array_param)].type == storage_type
assert meas.parameters[dependency_name].type == storage_type
# Now for a real measurement
meas = Measurement()
meas.register_parameter(DAC.ch1, paramtype='numeric')
meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype=storage_type)
assert len(meas.parameters) == 3
M = array_param.shape[0]
dac_datapoints = np.linspace(0, 0.01, N)
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in dac_datapoints:
datasaver.add_result((DAC.ch1, set_v),
(array_param, array_param.get()))
if storage_type == 'numeric':
expected_npoints = N*M
elif storage_type == 'array':
expected_npoints = N
assert datasaver.points_written == expected_npoints
ds = load_by_id(datasaver.run_id)
loaded_data = ds.get_parameter_data()['dummy_channel_inst_ChanA_dummy_array_parameter']
data_num = loaded_data['dummy_dac_ch1']
assert len(data_num) == expected_npoints
setpoint_arrays = loaded_data[dependency_name]
data_arrays = loaded_data['dummy_channel_inst_ChanA_dummy_array_parameter']
assert len(setpoint_arrays) == expected_npoints
assert len(data_arrays) == expected_npoints
expected_dac_data = np.repeat(np.linspace(0, 0.01, N), M)
expected_sp_data = np.tile(array_param.setpoints[0], N)
expected_output = np.array([array_param.get() for _ in range(N)]).reshape(
N * M)
if storage_type == 'array':
expected_dac_data = expected_dac_data.reshape(N, M)
expected_sp_data = expected_sp_data.reshape(N, M)
expected_output = expected_output.reshape(N, M)
assert_allclose(loaded_data['dummy_dac_ch1'], expected_dac_data)
assert_allclose(loaded_data[dependency_name],
expected_sp_data)
assert_allclose(loaded_data['dummy_channel_inst_ChanA_dummy_array_parameter'],
expected_output)
if storage_type == 'array':
for data_array, setpoint_array in zip(data_arrays, setpoint_arrays):
assert_array_equal(setpoint_array, np.linspace(5, 9, 5))
assert_array_equal(data_array, np.array([2., 2., 2., 2., 2.]))
@pytest.mark.parametrize("bg_writing", [True, False])
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=500))
@pytest.mark.usefixtures("experiment")
def test_datasaver_complex_array_parameters_array(channel_array_instrument,
DAC, N,
bg_writing):
"""
Test that storing a complex array parameters inside a loop with the sqlite
Array type works as expected
"""
meas = Measurement()
array_param = channel_array_instrument.A.dummy_complex_array_parameter
meas.register_parameter(array_param, paramtype='array')
assert len(meas.parameters) == 2
dependency_name = 'dummy_channel_inst_ChanA_this_setpoint'
dependent_parameter = meas.parameters[str(array_param)]
indepdendent_parameter = meas.parameters[dependency_name]
assert meas._interdeps.dependencies[dependent_parameter] \
== (indepdendent_parameter, )
assert dependent_parameter.type == 'array'
assert indepdendent_parameter.type == 'array'
# Now for a real measurement
meas = Measurement()
meas.register_parameter(DAC.ch1, paramtype='numeric')
meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype='array')
assert len(meas.parameters) == 3
M = array_param.shape[0]
dac_datapoints = np.linspace(0, 0.01, N)
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in dac_datapoints:
datasaver.add_result((DAC.ch1, set_v),
(array_param, array_param.get()))
assert datasaver.points_written == N
ds = load_by_id(datasaver.run_id)
loaded_data = ds.get_parameter_data()["dummy_channel_inst_ChanA_dummy_complex_array_parameter"]
data_num = loaded_data['dummy_dac_ch1']
assert data_num.shape == (N, M)
param_name = 'dummy_channel_inst_ChanA_dummy_complex_array_parameter'
setpoint_arrays = loaded_data['dummy_channel_inst_ChanA_this_setpoint']
data_arrays = loaded_data[param_name]
assert setpoint_arrays.shape == (N, M)
assert data_arrays.shape == (N, M)
for data_array, setpoint_array in zip(data_arrays, setpoint_arrays):
assert_array_equal(setpoint_array, np.linspace(5, 9, 5))
assert_array_equal(data_array, np.arange(5) - 1j*np.arange(5))
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_multidim_array(experiment, bg_writing): # noqa: F811
"""
Test that inserting multidim parameters as arrays works as expected
"""
meas = Measurement(experiment)
size1 = 10
size2 = 15
data_mapping = {name: i for i, name in
zip(range(4), ['x1', 'x2', 'y1', 'y2'])}
x1 = qc.ManualParameter('x1')
x2 = qc.ManualParameter('x2')
y1 = qc.ManualParameter('y1')
y2 = qc.ManualParameter('y2')
meas.register_parameter(x1, paramtype='array')
meas.register_parameter(x2, paramtype='array')
meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array')
meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array')
data = np.random.rand(4, size1, size2)
expected = {'x1': data[0, :, :],
'x2': data[1, :, :],
'y1': data[2, :, :],
'y2': data[3, :, :]}
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((str(x1), expected['x1']),
(str(x2), expected['x2']),
(str(y1), expected['y1']),
(str(y2), expected['y2']))
# We expect one "point" i.e. row in the DB to be written per top-level
# parameter.
assert datasaver.points_written == 2
dataset = load_by_id(datasaver.run_id)
loaded_data = dataset.get_parameter_data()
for outerid in ('y1', 'y2'):
for innerid in ('x1', 'x2', outerid):
mydata = loaded_data[outerid][innerid]
assert mydata.shape == (1, size1, size2)
assert_array_equal(mydata[0], expected[innerid])
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("export", [True, False])
def test_datasaver_export(experiment, bg_writing, tmp_path_factory,
export):
"""
Test export data to csv after measurement ends
"""
meas = Measurement(experiment)
size1 = 10
size2 = 15
x1 = qc.ManualParameter('x1')
x2 = qc.ManualParameter('x2')
y1 = qc.ManualParameter('y1')
y2 = qc.ManualParameter('y2')
meas.register_parameter(x1, paramtype='array')
meas.register_parameter(x2, paramtype='array')
meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array')
meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array')
data = np.random.rand(4, size1, size2)
expected = {'x1': data[0, :, :],
'x2': data[1, :, :],
'y1': data[2, :, :],
'y2': data[3, :, :]}
tmp_path = tmp_path_factory.mktemp("export_from_config")
path = str(tmp_path)
with patch("qcodes.dataset.data_set.get_data_export_type") as mock_type, \
patch("qcodes.dataset.data_set.get_data_export_path") as mock_path, \
patch("qcodes.dataset.measurements.get_data_export_automatic") as mock_automatic:
mock_type.return_value = DataExportType.CSV
mock_path.return_value = path
mock_automatic.return_value = export
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((str(x1), expected['x1']),
(str(x2), expected['x2']),
(str(y1), expected['y1']),
(str(y2), expected['y2']))
if export:
assert os.listdir(path) == [f"qcodes_{datasaver.dataset.run_id}.csv"]
else:
assert os.listdir(path) == []
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_multidim_numeric(experiment, bg_writing):
"""
Test that inserting multidim parameters as numeric works as expected
"""
meas = Measurement(experiment)
size1 = 10
size2 = 15
x1 = qc.ManualParameter('x1')
x2 = qc.ManualParameter('x2')
y1 = qc.ManualParameter('y1')
y2 = qc.ManualParameter('y2')
data_mapping = {name: i for i, name in
zip(range(4), ['x1', 'x2', 'y1', 'y2'])}
meas.register_parameter(x1, paramtype='numeric')
meas.register_parameter(x2, paramtype='numeric')
meas.register_parameter(y1, setpoints=[x1, x2], paramtype='numeric')
meas.register_parameter(y2, setpoints=[x1, x2], paramtype='numeric')
data = np.random.rand(4, size1, size2)
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((str(x1), data[0, :, :]),
(str(x2), data[1, :, :]),
(str(y1), data[2, :, :]),
(str(y2), data[3, :, :]))
# The factor of 2 is due to there being 2 top-level params
assert datasaver.points_written == 2 * (size1 * size2)
dataset = load_by_id(datasaver.run_id)
all_data = dataset.get_parameter_data()
for outer in ('y1', 'y2'):
for inner in ('x1', 'x2', outer):
mydata = all_data[outer][inner]
assert mydata.shape == (size1 * size2, )
assert mydata.dtype == np.float64
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multidimarrayparameter_as_array(SpectrumAnalyzer,
bg_writing):
"""
Test that inserting multidim Arrrayparameters as array works as expected
"""
array_param = SpectrumAnalyzer.multidimspectrum
meas = Measurement()
meas.register_parameter(array_param, paramtype='array')
assert len(meas.parameters) == 4
inserted_data = array_param.get()
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((array_param, inserted_data))
assert datasaver.points_written == 1
ds = load_by_id(datasaver.run_id)
expected_shape = (1, 100, 50, 20)
loaded_data = ds.get_parameter_data()
for i in range(3):
data = loaded_data['dummy_SA_multidimspectrum'][f'dummy_SA_Frequency{i}']
aux_shape = list(expected_shape)
aux_shape.pop(i+1)
assert data.shape == expected_shape
for j in range(aux_shape[1]):
for k in range(aux_shape[2]):
# todo There should be a simpler way of doing this
if i == 0:
mydata = data[0, :, j, k]
if i == 1:
mydata = data[0, j, :, k]
if i == 2:
mydata = data[0, j, k, :]
assert_array_equal(mydata,
np.linspace(array_param.start,
array_param.stop,
array_param.npts[i]))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multidimarrayparameter_as_numeric(SpectrumAnalyzer,
bg_writing):
"""
Test that storing a multidim Array parameter as numeric unravels the
parameter as expected.
"""
array_param = SpectrumAnalyzer.multidimspectrum
meas = Measurement()
meas.register_parameter(array_param, paramtype='numeric')
expected_shape = array_param.shape
dims = len(array_param.shape)
assert len(meas.parameters) == dims + 1
points_expected = np.prod(array_param.npts)
inserted_data = array_param.get()
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((array_param, inserted_data))
assert datasaver.points_written == points_expected
ds = load_by_id(datasaver.run_id)
# check setpoints
expected_setpoints_vectors = (np.linspace(array_param.start,
array_param.stop,
array_param.npts[i]) for i in
range(dims))
expected_setpoints_matrix = np.meshgrid(*expected_setpoints_vectors,
indexing='ij')
expected_setpoints = tuple(
setpoint_array.ravel() for setpoint_array in expected_setpoints_matrix)
loaded_data = ds.get_parameter_data()
for i in range(dims):
data = loaded_data['dummy_SA_multidimspectrum'][f'dummy_SA_Frequency{i}']
assert len(data) == points_expected
assert_allclose(data.squeeze(),
expected_setpoints[i])
data = loaded_data['dummy_SA_multidimspectrum']['dummy_SA_multidimspectrum'].squeeze()
assert_allclose(data, inserted_data.ravel())
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multi_parameters_scalar(channel_array_instrument,
bg_writing):
"""
Test that we can register multiparameters that are scalar.
"""
meas = Measurement()
param = channel_array_instrument.A.dummy_scalar_multi_parameter
meas.register_parameter(param)
assert len(meas.parameters) == len(param.shapes)
assert set(meas.parameters.keys()) == set(param.full_names)
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((param, param()))
assert datasaver.points_written == 2
ds = load_by_id(datasaver.run_id)
assert ds.get_parameter_data()['dummy_channel_inst_ChanA_thisparam']['dummy_channel_inst_ChanA_thisparam'] == np.array([[0]])
assert ds.get_parameter_data()['dummy_channel_inst_ChanA_thatparam']['dummy_channel_inst_ChanA_thatparam'] == np.array([[1]])
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multi_parameters_array(channel_array_instrument,
bg_writing):
"""
Test that we can register multiparameters that are array like.
"""
meas = Measurement()
param = channel_array_instrument.A.dummy_multi_parameter
meas.register_parameter(param)
assert len(meas.parameters) == 3 # two params + 1D identical setpoints
param_names = ('dummy_channel_inst_ChanA_multi_setpoint_param_this_setpoint',
'dummy_channel_inst_ChanA_multi_setpoint_param_this',
'dummy_channel_inst_ChanA_multi_setpoint_param_that')
assert set(meas.parameters.keys()) == set(param_names)
this_ps = meas.parameters[param_names[1]]
that_ps = meas.parameters[param_names[2]]
sp_ps = meas.parameters[param_names[0]]
assert sp_ps in meas._interdeps.dependencies[this_ps]
assert sp_ps in meas._interdeps.dependencies[that_ps]
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((param, param()))
assert datasaver.points_written == 2 * 5
ds = load_by_id(datasaver.run_id)
setpts = np.arange(5, 10)
np.testing.assert_array_equal(ds.get_parameter_data()[param_names[1]][param_names[0]], setpts)
np.testing.assert_array_equal(ds.get_parameter_data()[param_names[2]][param_names[0]], setpts)
this_read_data = ds.get_parameter_data()[param_names[1]][param_names[1]]
that_read_data = ds.get_parameter_data()[param_names[2]][param_names[2]]
np.testing.assert_array_equal(this_read_data, np.zeros(5))
np.testing.assert_array_equal(that_read_data, np.ones(5))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_2d_multi_parameters_array(channel_array_instrument,
bg_writing):
"""
Test that we can register multiparameters that are array like and 2D.
"""
sp_name_1 = "dummy_channel_inst_ChanA_multi_2d_setpoint_param_this_setpoint"
sp_name_2 = 'dummy_channel_inst_ChanA_multi_2d_setpoint_param_that_setpoint'
p_name_1 = 'dummy_channel_inst_ChanA_this'
p_name_2 = 'dummy_channel_inst_ChanA_that'
from functools import reduce
meas = Measurement()
param = channel_array_instrument.A.dummy_2d_multi_parameter
meas.register_parameter(param)
assert len(meas.parameters) == 4 # two params + 2D identical setpoints
param_names = (sp_name_1,
sp_name_2,
p_name_1,
p_name_2)
assert set(meas.parameters.keys()) == set(param_names)
this_ps = meas.parameters[p_name_1]
that_ps = meas.parameters[p_name_2]
this_sp_ps = meas.parameters[sp_name_1]
that_sp_ps = meas.parameters[sp_name_2]
assert that_sp_ps in meas._interdeps.dependencies[this_ps]
assert that_sp_ps in meas._interdeps.dependencies[that_ps]
assert this_sp_ps in meas._interdeps.dependencies[this_ps]
assert this_sp_ps in meas._interdeps.dependencies[that_ps]
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((param, param()))
assert datasaver.points_written == 2 * 15
ds = load_by_id(datasaver.run_id)
# 30 points in each setpoint value list
this_sp_val = np.array(reduce(list.__add__, [[n]*3 for n in range(5, 10)], []))
that_sp_val = np.array(reduce(list.__add__, [[n] for n in range(9, 12)], []) * 5)
np.testing.assert_array_equal(
ds.get_parameter_data()[p_name_1][sp_name_1],
this_sp_val
)
np.testing.assert_array_equal(
ds.get_parameter_data()[p_name_1][sp_name_2],
that_sp_val
)
np.testing.assert_array_equal(
ds.get_parameter_data()[p_name_2][sp_name_1],
this_sp_val
)
np.testing.assert_array_equal(
ds.get_parameter_data()[p_name_2][sp_name_2],
that_sp_val
)
this_read_data = ds.get_parameter_data()[p_name_1][p_name_1]
that_read_data = ds.get_parameter_data()[p_name_2][p_name_2]
assert len(this_read_data) == 15
assert len(that_read_data) == 15
np.testing.assert_array_equal(this_read_data, np.zeros(15))
np.testing.assert_array_equal(that_read_data, np.ones(15))
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@settings(deadline=None)
@given(Ns=hst.lists(hst.integers(2, 10), min_size=2, max_size=5))
def test_datasaver_arrays_of_different_length(storage_type, Ns, bg_writing):
"""
Test that we can save arrays of different length in a single call to
datasaver.add_result
"""
no_of_signals = len(Ns)
meas = Measurement()
meas.register_custom_parameter('temperature',
paramtype='numeric',
label='Temperature',
unit='K')
for n in range(no_of_signals):
meas.register_custom_parameter(f'freqs{n}', paramtype=storage_type)
meas.register_custom_parameter(f'signal{n}',
paramtype=storage_type,
setpoints=(f'freqs{n}', 'temperature'))
with meas.run(write_in_background=bg_writing) as datasaver:
result_t = ('temperature', 70)
result_freqs = list((f'freqs{n}', np.linspace(0, 1, Ns[n]))
for n in range(no_of_signals))
result_sigs = list((f'signal{n}', np.random.randn(Ns[n]))
for n in range(no_of_signals))
full_result = tuple(result_freqs + result_sigs + [result_t])
datasaver.add_result(*full_result)
ds = load_by_id(datasaver.run_id)
data = ds.get_parameter_data()
assert list(data.keys()) == [f'signal{n}' for n in range(no_of_signals)]
for n in range(no_of_signals):
assert (data[f'signal{n}']['temperature'] == np.array([70]*(Ns[n]))).all()
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_complex_num(complex_num_instrument, bg_writing):
"""
Test that we can save various parameters mixed with complex parameters
"""
# scalar complex parameter
setparam = complex_num_instrument.setpoint
param = complex_num_instrument.complex_num
# array parameter
arrayparam = complex_num_instrument.some_array
# complex array parameter
complexarrayparam = complex_num_instrument.some_complex_array
some_complex_array_setpoints = complex_num_instrument.some_complex_array_setpoints
meas = Measurement()
meas.register_parameter(setparam, paramtype='numeric')
meas.register_parameter(param, paramtype='complex', setpoints=(setparam,))
meas.register_parameter(arrayparam, paramtype='array',
setpoints=(setparam,))
meas.register_parameter(some_complex_array_setpoints, paramtype='numeric')
meas.register_parameter(complexarrayparam, paramtype='complex',
setpoints=(setparam, some_complex_array_setpoints))
with meas.run(write_in_background=bg_writing) as datasaver:
for i in range(10):
setparam.set(i)
datasaver.add_result((setparam, setparam()),
(param, param()),
*expand_setpoints_helper(arrayparam),
(some_complex_array_setpoints, some_complex_array_setpoints.get()),
(complexarrayparam, complexarrayparam.get()))
data = datasaver.dataset.get_parameter_data()
# scalar complex parameter
setpoints_num = data['dummy_channel_inst_complex_num'][
'dummy_channel_inst_setpoint']
data_num = data['dummy_channel_inst_complex_num'][
'dummy_channel_inst_complex_num']
assert_allclose(setpoints_num, np.arange(10))
assert_allclose(data_num, np.arange(10) + 1j*np.arange(10))
# array parameter
setpoints1_array = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_setpoint']
assert_allclose(setpoints1_array, np.repeat(np.arange(10), 5).reshape(10, 5))
setpoints2_array = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_some_array_setpoints']
assert_allclose(setpoints2_array, np.tile(np.arange(5), 10).reshape(10, 5))
array_data = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_some_array']
assert_allclose(array_data, np.ones((10, 5)))
# complex array parameter
setpoints1_array = data['dummy_channel_inst_some_complex_array'][
'dummy_channel_inst_setpoint']
assert_allclose(setpoints1_array, np.repeat(np.arange(10), 5))
setpoints2_array = data['dummy_channel_inst_some_complex_array'][
'dummy_channel_inst_some_complex_array_setpoints']
assert_allclose(setpoints2_array, np.tile(np.arange(5), 10))
array_data = data['dummy_channel_inst_some_complex_array'][
'dummy_channel_inst_some_complex_array']
assert_allclose(array_data, np.ones(50)+1j*np.ones(50))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_and_reload_complex_standalone(complex_num_instrument,
bg_writing):
param = complex_num_instrument.complex_num
complex_num_instrument.setpoint(1)
p = qc.instrument.parameter.Parameter(
'test',
set_cmd=None,
get_cmd=lambda: 1+1j,
vals=qc.utils.validators.ComplexNumbers())
meas = qc.dataset.measurements.Measurement()
meas.register_parameter(param)
pval = param.get()
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((param, pval))
data = datasaver.dataset.get_parameter_data()
data_num = data['dummy_channel_inst_complex_num'][
'dummy_channel_inst_complex_num']
assert_allclose(data_num, 1 + 1j)
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_complex_num_setpoints(complex_num_instrument, bg_writing):
"""
Test that we can save a parameter with complex setpoints
"""
setparam = complex_num_instrument.complex_setpoint
param = complex_num_instrument.real_part
meas = Measurement()
meas.register_parameter(setparam, paramtype='complex')
meas.register_parameter(param, paramtype='numeric', setpoints=(setparam,))
with meas.run(write_in_background=bg_writing) as datasaver:
for i in range(10):
setparam.set(i+1j*i)
datasaver.add_result((setparam, setparam()),
(param, param()))
data = datasaver.dataset.get_parameter_data()
setpoints_num = data['dummy_channel_inst_real_part'][
'dummy_channel_inst_complex_setpoint']
data_num = data['dummy_channel_inst_real_part'][
'dummy_channel_inst_real_part']
assert_allclose(setpoints_num, np.arange(10) + 1j*np.arange(10))
assert_allclose(data_num, np.arange(10))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_complex_num_setpoints_array(complex_num_instrument, bg_writing):
"""
Test that we can save an array parameter with complex setpoints
"""
setparam = complex_num_instrument.complex_setpoint
param = complex_num_instrument.some_array
meas = Measurement()
meas.register_parameter(setparam, paramtype='complex')
meas.register_parameter(param, paramtype='array', setpoints=(setparam,))
with meas.run(write_in_background=bg_writing) as datasaver:
for i in range(10):
setparam.set(i+1j*i)
datasaver.add_result((setparam, setparam()),
*expand_setpoints_helper(param))
data = datasaver.dataset.get_parameter_data()
setpoints1 = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_complex_setpoint']
setpoints2 = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_some_array_setpoints']
data_num = data['dummy_channel_inst_some_array'][
'dummy_channel_inst_some_array']
assert_allclose(setpoints1, np.repeat(np.arange(10) +
1j*np.arange(10), 5).reshape((10, 5)))
assert_allclose(setpoints2, np.tile(np.arange(5), 10).reshape((10, 5)))
assert_allclose(data_num, np.ones((10, 5)))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_complex_as_num_raises(complex_num_instrument, bg_writing):
setparam = complex_num_instrument.setpoint
param = complex_num_instrument.complex_num
meas = Measurement()
meas.register_parameter(setparam, paramtype='numeric')
meas.register_parameter(param, paramtype='numeric', setpoints=(setparam,))
expected_msg = ('Parameter dummy_channel_inst_complex_num is of '
'type "numeric", but got a result of '
'type complex128')
with meas.run(write_in_background=bg_writing) as datasaver:
setparam.set(0)
with pytest.raises(ValueError, match=expected_msg):
datasaver.add_result((setparam, setparam()),
(param, param()))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_save_numeric_as_complex_raises(complex_num_instrument, bg_writing):
setparam = complex_num_instrument.setpoint
param = complex_num_instrument.complex_num
meas = Measurement()
meas.register_parameter(setparam, paramtype='numeric')
meas.register_parameter(param, paramtype='complex', setpoints=(setparam,))
expected_msg = ('Parameter dummy_channel_inst_complex_num is of '
'type "complex", but got a result of type int')
with meas.run(write_in_background=bg_writing) as datasaver:
setparam.set(0)
with pytest.raises(ValueError, match=expected_msg):
datasaver.add_result((setparam, setparam()),
(param, setparam()))
def test_parameter_inference(channel_array_instrument):
chan = channel_array_instrument.channels[0]
# default values
assert Measurement._infer_paramtype(chan.temperature, None) is None
assert Measurement._infer_paramtype(chan.dummy_array_parameter,
None) == 'array'
assert Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints,
None) == 'array'
assert Measurement._infer_paramtype(chan.dummy_multi_parameter,
None) is None
assert Measurement._infer_paramtype(chan.dummy_scalar_multi_parameter,
None) is None
assert Measurement._infer_paramtype(chan.dummy_2d_multi_parameter,
None) is None
assert Measurement._infer_paramtype(chan.dummy_text,
None) == 'text'
assert Measurement._infer_paramtype(chan.dummy_complex,
None) == 'complex'
# overwrite the default with sensible alternatives
assert Measurement._infer_paramtype(chan.dummy_array_parameter,
'numeric') == 'numeric'
assert Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints,
'numeric') == 'numeric'
assert Measurement._infer_paramtype(chan.dummy_multi_parameter,
'array') == 'array'
assert Measurement._infer_paramtype(chan.dummy_2d_multi_parameter,
'array') == 'array'
@pytest.mark.usefixtures("experiment")
def test_load_legacy_files_2D():
location = '../fixtures/2018-01-17/#002_2D_test_15-43-14'
dir = os.path.dirname(__file__)
full_location = os.path.join(dir, location)
run_ids = import_dat_file(full_location)
run_id = run_ids[0]
data = load_by_id(run_id)
assert data.parameters == 'dac_ch1_set,dac_ch2_set,dmm_voltage'
assert data.number_of_results == 36
expected_names = ['dac_ch1_set', 'dac_ch2_set', 'dmm_voltage']
expected_labels = ['Gate ch1', 'Gate ch2', 'Gate voltage']
expected_units = ['V', 'V', 'V']
expected_depends_on = ['', '', 'dac_ch1_set, dac_ch2_set']
for i, parameter in enumerate(data.get_parameters()):
assert parameter.name == expected_names[i]
assert parameter.label == expected_labels[i]
assert parameter.unit == expected_units[i]
assert parameter.depends_on == expected_depends_on[i]
assert parameter.type == 'numeric'
snapshot = json.loads(data.get_metadata('snapshot'))
assert sorted(list(snapshot.keys())) == ['__class__', 'arrays',
'formatter', 'io', 'location',
'loop', 'station']
@pytest.mark.usefixtures("experiment")
def test_load_legacy_files_1D():
location = '../fixtures/2018-01-17/#001_testsweep_15-42-57'
dir = os.path.dirname(__file__)
full_location = os.path.join(dir, location)
run_ids = import_dat_file(full_location)
run_id = run_ids[0]
data = load_by_id(run_id)
assert data.parameters == 'dac_ch1_set,dmm_voltage'
assert data.number_of_results == 201
expected_names = ['dac_ch1_set', 'dmm_voltage']
expected_labels = ['Gate ch1', 'Gate voltage']
expected_units = ['V', 'V']
expected_depends_on = ['', 'dac_ch1_set']
for i, parameter in enumerate(data.get_parameters()):
assert parameter.name == expected_names[i]
assert parameter.label == expected_labels[i]
assert parameter.unit == expected_units[i]
assert parameter.depends_on == expected_depends_on[i]
assert parameter.type == 'numeric'
snapshot = json.loads(data.get_metadata('snapshot'))
assert sorted(list(snapshot.keys())) == ['__class__', 'arrays',
'formatter', 'io', 'location',
'loop', 'station']
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_adding_parents(bg_writing, DAC):
"""
Test that we can register a DataSet as the parent of another DataSet
as created by the Measurement
"""
# The narrative of the test is that we do a measurement once, then learn
# from the result of that where to measure next. We want to annotate the
# second run as having the first run as predecessor
meas = (Measurement()
.register_parameter(DAC.ch1)
.register_parameter(DAC.ch2, setpoints=[DAC.ch1]))
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((DAC.ch1, 0), (DAC.ch2, 1))
parent_ds = datasaver.dataset
meas = (Measurement()
.register_parameter(DAC.ch1)
.register_parameter(DAC.ch2, setpoints=[DAC.ch1])
.register_parent(parent=parent_ds, link_type="predecessor"))
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((DAC.ch1, 1), (DAC.ch2, 2))
child_ds = datasaver.dataset
ds_links = child_ds.parent_dataset_links
assert len(ds_links) == 1
assert ds_links[0].tail == parent_ds.guid
assert ds_links[0].head == child_ds.guid
assert ds_links[0].edge_type == "predecessor"
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that allows running existing pandas doctests with Beam dataframes.
This module hooks into the doctesting framework by providing a custom
runner and, in particular, an OutputChecker, as well as providing a fake
object for mocking out the pandas module.
The (novel) sequence of events when running a doctest is as follows.
1. The test invokes `pd.DataFrame(...)` (or similar) and an actual dataframe
is computed and stashed but a Beam deferred dataframe is returned
in its place.
2. Computations are done on these "dataframes," resulting in new objects,
but as these are actually deferred, only expression trees are built.
In the background, a mapping of id -> deferred dataframe is stored for
each newly created dataframe.
3. When any dataframe is printed out, the repr has been overwritten to
print `Dataframe[id]`. The aforementened mapping is used to map this back
to the actual dataframe object, which is then computed via Beam, and its
the (stringified) result plugged into the actual output for comparison.
4. The comparison is then done on the sorted lines of the expected and actual
values.
"""
from __future__ import absolute_import
import collections
import contextlib
import doctest
import re
from typing import Any
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frames # pylint: disable=unused-import
from apache_beam.dataframe import transforms
from apache_beam.dataframe.frame_base import DeferredFrame
class TestEnvironment(object):
"""A class managing the patching (of methods, inputs, and outputs) needed
to run and validate tests.
These classes are patched to be able to recognize and retrieve inputs
and results, stored in `self._inputs` and `self._all_frames` respectively.
"""
def __init__(self):
self._inputs = {}
self._all_frames = {}
def fake_pandas_module(self):
class FakePandas(object):
"""A stand-in for the pandas top-level module.
"""
# For now, only populated with the frame types (below).
# TODO(BEAM-9561): We may want to put more here.
pass
fake_pd = FakePandas()
for pandas_type, deferred_type in DeferredFrame._pandas_type_map.items():
setattr(
fake_pd,
pandas_type.__name__,
self._deferred_frame(pandas_type, deferred_type))
return fake_pd
def _deferred_frame(self, pandas_type, deferred_type):
"""Creates a "constructor" that record the actual value as an input and
returns a placeholder frame in its place."""
def wrapper(*args, **kwargs):
df = pandas_type(*args, **kwargs)
placeholder = expressions.PlaceholderExpression(df[0:0])
self._inputs[placeholder] = df
return deferred_type(placeholder)
return wrapper
@contextlib.contextmanager
def _monkey_patch_type(self, deferred_type):
"""Monkey-patch __init__ to record a pointer to all created frames, and
__repr__ to be able to recognize them in the doctest output.
"""
try:
old_init, old_repr = deferred_type.__init__, deferred_type.__repr__
def new_init(df, *args, **kwargs):
old_init(df, *args, **kwargs)
self._all_frames[id(df)] = df
deferred_type.__init__ = new_init
deferred_type.__repr__ = lambda self: 'DeferredFrame[%s]' % id(self)
self._recorded_results = collections.defaultdict(list)
yield
finally:
deferred_type.__init__, deferred_type.__repr__ = old_init, old_repr
@contextlib.contextmanager
def context(self):
"""Creates a context within which DeferredFrame types are monkey patched
to record ids."""
with contextlib.ExitStack() as stack:
for deferred_type in DeferredFrame._pandas_type_map.values():
stack.enter_context(self._monkey_patch_type(deferred_type))
yield
class _InMemoryResultRecorder(object):
"""Helper for extracting computed results from a Beam pipeline.
Used as follows::
with _InMemoryResultRecorder() as recorder:
with beam.Pipeline() as p:
...
pcoll | beam.Map(recorder.record_fn(name))
seen = recorder.get_recorded(name)
"""
# Class-level value to survive pickling.
_ALL_RESULTS = {} # type: Dict[str, List[Any]]
def __init__(self):
self._id = id(self)
def __enter__(self):
self._ALL_RESULTS[self._id] = collections.defaultdict(list)
return self
def __exit__(self, *unused_args):
del self._ALL_RESULTS[self._id]
def record_fn(self, name):
def record(value):
self._ALL_RESULTS[self._id][name].append(value)
return record
def get_recorded(self, name):
return self._ALL_RESULTS[self._id][name]
class _DeferrredDataframeOutputChecker(doctest.OutputChecker):
"""Validates output by replacing DeferredFrame[...] with computed values.
"""
def __init__(self, env, use_beam):
self._env = env
if use_beam:
self.compute = self.compute_using_beam
else:
self.compute = self.compute_using_session
def compute_using_session(self, to_compute):
session = expressions.Session(self._env._inputs)
return {
name: frame._expr.evaluate_at(session)
for name,
frame in to_compute.items()
}
def compute_using_beam(self, to_compute):
with _InMemoryResultRecorder() as recorder:
with beam.Pipeline() as p:
input_pcolls = {
placeholder: p
| 'Create%s' % placeholder >> beam.Create([input[::2], input[1::2]])
for placeholder,
input in self._env._inputs.items()
}
output_pcolls = (
input_pcolls | transforms._DataframeExpressionsTransform(
{name: frame._expr
for name, frame in to_compute.items()}))
for name, output_pcoll in output_pcolls.items():
_ = output_pcoll | 'Record%s' % name >> beam.FlatMap(
recorder.record_fn(name))
# pipeline runs, side effects recorded
return {
name: pd.concat(recorder.get_recorded(name))
for name in to_compute.keys()
}
def fix(self, want, got):
if 'DeferredFrame' in got:
to_compute = {
m.group(0): self._env._all_frames[int(m.group(1))]
for m in re.finditer(r'DeferredFrame\[(\d+)\]', got)
}
computed = self.compute(to_compute)
for name, frame in computed.items():
got = got.replace(name, repr(frame))
got = '\n'.join(sorted(line.rstrip() for line in got.split('\n')))
want = '\n'.join(sorted(line.rstrip() for line in want.split('\n')))
return want, got
def check_output(self, want, got, optionflags):
want, got = self.fix(want, got)
return super(_DeferrredDataframeOutputChecker,
self).check_output(want, got, optionflags)
def output_difference(self, example, got, optionflags):
want, got = self.fix(example.want, got)
want = example.want
if want != example.want:
example = doctest.Example(
example.source,
want,
example.exc_msg,
example.lineno,
example.indent,
example.options)
return super(_DeferrredDataframeOutputChecker,
self).output_difference(example, got, optionflags)
class BeamDataframeDoctestRunner(doctest.DocTestRunner):
"""A Doctest runner suitable for replacing the `pd` module with one backed
by beam.
"""
def __init__(self, env, use_beam=True, **kwargs):
self._test_env = env
super(BeamDataframeDoctestRunner, self).__init__(
checker=_DeferrredDataframeOutputChecker(self._test_env, use_beam),
**kwargs)
def run(self, test, **kwargs):
with self._test_env.context():
return super(BeamDataframeDoctestRunner, self).run(test, **kwargs)
def fake_pandas_module(self):
return self._test_env.fake_pandas_module()
def teststring(text, report=True, **runner_kwargs):
parser = doctest.DocTestParser()
runner = BeamDataframeDoctestRunner(TestEnvironment(), **runner_kwargs)
test = parser.get_doctest(
text, {
'pd': runner.fake_pandas_module(), 'np': np
},
'<string>',
'<string>',
0)
result = runner.run(test)
if report:
runner.summarize()
return result
def testfile(*args, **kwargs):
return _run_patched(doctest.testfile, *args, **kwargs)
def testmod(*args, **kwargs):
return _run_patched(doctest.testmod, *args, **kwargs)
def _run_patched(func, *args, **kwargs):
try:
env = TestEnvironment()
use_beam = kwargs.pop('use_beam', True)
extraglobs = dict(kwargs.pop('extraglobs', {}))
extraglobs['pd'] = env.fake_pandas_module()
# Unfortunately the runner is not injectable.
original_doc_test_runner = doctest.DocTestRunner
doctest.DocTestRunner = lambda **kwargs: BeamDataframeDoctestRunner(
env, use_beam=use_beam, **kwargs)
return func(*args, extraglobs=extraglobs, **kwargs)
finally:
doctest.DocTestRunner = original_doc_test_runner
|
#
# PySNMP MIB module NORTEL-WLAN-AP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NORTEL-WLAN-AP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:14:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
ifIndex, InterfaceIndex, ifPhysAddress = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex", "ifPhysAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, Gauge32, Unsigned32, ObjectIdentity, iso, IpAddress, NotificationType, TimeTicks, MibIdentifier, Bits, Counter64, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Gauge32", "Unsigned32", "ObjectIdentity", "iso", "IpAddress", "NotificationType", "TimeTicks", "MibIdentifier", "Bits", "Counter64", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32")
TruthValue, MacAddress, PhysAddress, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "MacAddress", "PhysAddress", "TextualConvention", "RowStatus", "DisplayString")
wlan2200, = mibBuilder.importSymbols("SYNOPTICS-ROOT-MIB", "wlan2200")
nortelWlanApMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 1, 11, 1))
nortelWlanApMib.setRevisions(('2003-07-16 00:00', '2003-09-11 00:00', '2004-04-12 00:00',))
if mibBuilder.loadTexts: nortelWlanApMib.setLastUpdated('200404120000Z')
if mibBuilder.loadTexts: nortelWlanApMib.setOrganization('Nortel Networks')
ntWlanApSys = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1))
ntWlanApLineMgnt = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2))
ntWlanApPortMgnt = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3))
ntWlanApFileTransferMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4))
ntWlanApResetMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 5))
ntWlanApIpMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6))
ntWlanApDot11 = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7))
ntWlanApTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8))
ntWlanApLID = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 9))
ntWlanApRateSupport = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10))
ntWlanApSecurity = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11))
ntWlanApQoS = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12))
ntWlanApVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13))
ntWlanApStats = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14))
class NtWlanApDataRate(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(2, 127)
class NtWlanApWEPKey(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(5, 5), ValueSizeConstraint(13, 13), ValueSizeConstraint(16, 16), )
ntWlanSwHardwareVer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanSwHardwareVer.setStatus('current')
ntWlanSwBootRomVer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanSwBootRomVer.setStatus('current')
ntWlanSwOpCodeVer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanSwOpCodeVer.setStatus('current')
ntWlanSwCountryCode = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanSwCountryCode.setStatus('current')
ntWlanSwNNDataFileVer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanSwNNDataFileVer.setStatus('current')
ntWlanLineTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1), )
if mibBuilder.loadTexts: ntWlanLineTable.setStatus('current')
ntWlanLineEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanLineIndex"))
if mibBuilder.loadTexts: ntWlanLineEntry.setStatus('current')
ntWlanLineIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ntWlanLineIndex.setStatus('current')
ntWlanLineDataBits = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanLineDataBits.setStatus('current')
ntWlanLineParity = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2))).clone(namedValues=NamedValues(("none", 99), ("odd", 1), ("even", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanLineParity.setStatus('current')
ntWlanLineSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanLineSpeed.setStatus('current')
ntWlanLineStopBits = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanLineStopBits.setStatus('current')
ntWlanPortTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1), )
if mibBuilder.loadTexts: ntWlanPortTable.setStatus('current')
ntWlanPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanPortIndex"))
if mibBuilder.loadTexts: ntWlanPortEntry.setStatus('current')
ntWlanPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: ntWlanPortIndex.setStatus('current')
ntWlanPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortName.setStatus('current')
ntWlanPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("other", 1), ("hundredBaseTX", 2), ("hundredBaseFX", 3), ("thousandBaseSX", 4), ("thousandBaseLX", 5), ("thousandBaseT", 6), ("thousandBaseGBIC", 7), ("thousandBaseMiniGBIC", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortType.setStatus('current')
ntWlanPortSpeedDpxCfg = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("reserved", 1), ("halfDuplex10", 2), ("fullDuplex10", 3), ("halfDuplex100", 4), ("fullDuplex100", 5), ("halfDuplex1000", 6), ("fullDuplex1000", 7))).clone('halfDuplex10')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortSpeedDpxCfg.setStatus('current')
ntWlanPortFlowCtrlCfg = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("backPressure", 3), ("dot3xFlowControl", 4))).clone('enabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortFlowCtrlCfg.setStatus('current')
ntWlanPortCapabilities = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("portCap10half", 99), ("portCap10full", 1), ("portCap100half", 2), ("portCap100full", 3), ("portCap1000half", 4), ("portCap1000full", 5), ("reserved6", 6), ("reserved7", 7), ("reserved8", 8), ("reserved9", 9), ("reserved10", 10), ("reserved11", 11), ("reserved12", 12), ("reserved13", 13), ("portCapSym", 14), ("portCapFlowCtrl", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortCapabilities.setStatus('current')
ntWlanPortAutonegotiation = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortAutonegotiation.setStatus('current')
ntWlanPortSpeedDpxStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("error", 1), ("halfDuplex10", 2), ("fullDuplex10", 3), ("halfDuplex100", 4), ("fullDuplex100", 5), ("halfDuplex1000", 6), ("fullDuplex1000", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortSpeedDpxStatus.setStatus('current')
ntWlanPortFlowCtrlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 3, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("error", 1), ("backPressure", 2), ("dot3xFlowControl", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanPortFlowCtrlStatus.setStatus('current')
ntWlanTransferStart = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("go", 1), ("nogo", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanTransferStart.setStatus('current')
ntWlanTransferType = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ftpDownload", 1), ("tftpDownload", 2), ("ftpUpload", 3), ("tftpUpload", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanTransferType.setStatus('current')
ntWlanFileType = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("firmware", 1), ("config", 2), ("nortelConfig", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanFileType.setStatus('current')
ntWlanSrcFile = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanSrcFile.setStatus('current')
ntWlanDestFile = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDestFile.setStatus('current')
ntWlanFileServer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanFileServer.setStatus('current')
ntWlanUserName = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanUserName.setStatus('current')
ntWlanPassword = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanPassword.setStatus('current')
ntWlanFileTransferStatus = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 4, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("none", 1), ("inProgress", 2), ("success", 3), ("fail", 4), ("invalidSource", 5), ("invalidDestination", 6), ("outOfMemory", 7), ("outOfSpace", 8), ("fileNotFound", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanFileTransferStatus.setStatus('current')
ntWlanRestartOpCodeFile = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 5, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanRestartOpCodeFile.setStatus('current')
ntWlanRestartControl = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("running", 1), ("warmBoot", 2), ("coldBoot", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanRestartControl.setStatus('current')
ntWlanNetConfigIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanNetConfigIPAddress.setStatus('current')
ntWlanNetConfigSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanNetConfigSubnetMask.setStatus('current')
ntWlanNetDefaultGateway = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanNetDefaultGateway.setStatus('current')
ntWlanIpHttpState = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanIpHttpState.setStatus('current')
ntWlanIpHttpPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 6, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanIpHttpPort.setStatus('current')
ntWlanDot11Phy = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4))
ntWlanDot11PhyOperationTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1), )
if mibBuilder.loadTexts: ntWlanDot11PhyOperationTable.setStatus('current')
ntWlanDot11PhyOperationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanDot11Index"))
if mibBuilder.loadTexts: ntWlanDot11PhyOperationEntry.setStatus('current')
ntWlanDot11Index = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ntWlanDot11Index.setStatus('current')
ntWlanDot11TurboModeEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2))).clone(namedValues=NamedValues(("none", 99), ("on", 1), ("off", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanDot11TurboModeEnabled.setStatus('current')
ntWlanDot11PreambleLength = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2))).clone(namedValues=NamedValues(("twelveSymbols", 99), ("short", 1), ("long", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11PreambleLength.setStatus('current')
ntWlanDot11dWorldModeEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1, 5), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11dWorldModeEnabled.setStatus('current')
ntWlanDot11ClosedSystem = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 4, 1, 1, 6), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11ClosedSystem.setStatus('current')
ntWlanDot11AuthenticationEntry = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5))
ntWlanDot118021xSupport = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot118021xSupport.setStatus('current')
ntWlanDot118021xRequired = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot118021xRequired.setStatus('current')
ntWlanDot118021xBcastKeyRefresh = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1440))).setUnits('Minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot118021xBcastKeyRefresh.setStatus('current')
ntWlanDot118021xSessKeyRefresh = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1440))).setUnits('Minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot118021xSessKeyRefresh.setStatus('current')
ntWlanDot118021xReAuthRefresh = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 5, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('Seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot118021xReAuthRefresh.setStatus('current')
ntWlanDot11AuthenticationServerTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6), )
if mibBuilder.loadTexts: ntWlanDot11AuthenticationServerTable.setStatus('current')
ntWlanDot11AuthenticationServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanDot11ServerIndex"))
if mibBuilder.loadTexts: ntWlanDot11AuthenticationServerEntry.setStatus('current')
ntWlanDot11ServerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ntWlanDot11ServerIndex.setStatus('current')
ntWlanDot11AuthenticationServer = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11AuthenticationServer.setStatus('current')
ntWlanDot11AuthenticationPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11AuthenticationPort.setStatus('current')
ntWlanDot11AuthenticationKey = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11AuthenticationKey.setStatus('current')
ntWlanDot11AuthenticationRetransmit = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11AuthenticationRetransmit.setStatus('current')
ntWlanDot11AuthenticationTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setUnits('Seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11AuthenticationTimeout.setStatus('current')
ntWlanDot11SecondaryAuthServer = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11SecondaryAuthServer.setStatus('current')
ntWlanDot11SecondaryAuthPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11SecondaryAuthPort.setStatus('current')
ntWlanDot11SecondaryAuthKey = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11SecondaryAuthKey.setStatus('current')
ntWlanDot11SecondaryAuthRetransmit = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11SecondaryAuthRetransmit.setStatus('current')
ntWlanDot11SecondaryAuthTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 6, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setUnits('Seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11SecondaryAuthTimeout.setStatus('current')
ntWlanDot11FilterTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 7), )
if mibBuilder.loadTexts: ntWlanDot11FilterTable.setStatus('current')
ntWlanDot11FilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 7, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanDot11FilterIndex"))
if mibBuilder.loadTexts: ntWlanDot11FilterEntry.setStatus('current')
ntWlanDot11FilterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ntWlanDot11FilterIndex.setStatus('current')
ntWlanDot11FilterAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 7, 1, 2), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11FilterAddress.setStatus('current')
ntWlanDot11FilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(30, 31))).clone(namedValues=NamedValues(("allowed", 30), ("denied", 31)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanDot11FilterStatus.setStatus('current')
ntWlanDot11TrapTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9), )
if mibBuilder.loadTexts: ntWlanDot11TrapTable.setStatus('current')
ntWlanDot11TrapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanDot11InterfaceIndex"))
if mibBuilder.loadTexts: ntWlanDot11TrapEntry.setStatus('current')
ntWlanDot11InterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ntWlanDot11InterfaceIndex.setStatus('current')
ntWlanDot11AssociationStationAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9, 1, 2), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanDot11AssociationStationAddress.setStatus('current')
ntWlanDot11DisassociationStationAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9, 1, 3), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanDot11DisassociationStationAddress.setStatus('current')
ntWlanDot11AssociationMU = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 7, 9, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanDot11AssociationMU.setStatus('current')
ntWlanApTraps0 = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0))
ntWlanApDot1xAuthenticationFail = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0, 101)).setObjects(("IF-MIB", "ifPhysAddress"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationStationAddress"))
if mibBuilder.loadTexts: ntWlanApDot1xAuthenticationFail.setStatus('current')
ntWlanApMuAssocTrap = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0, 111)).setObjects(("IF-MIB", "ifPhysAddress"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationStationAddress"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationMU"))
if mibBuilder.loadTexts: ntWlanApMuAssocTrap.setStatus('current')
ntWlanApMuDisAssocTrap = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0, 112)).setObjects(("IF-MIB", "ifPhysAddress"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11DisassociationStationAddress"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationMU"))
if mibBuilder.loadTexts: ntWlanApMuDisAssocTrap.setStatus('current')
ntWlanApMuWEPAuthFail = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0, 113)).setObjects(("IF-MIB", "ifIndex"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationStationAddress"))
if mibBuilder.loadTexts: ntWlanApMuWEPAuthFail.setStatus('current')
ntWlanApMuWPAAuthFail = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 0, 114)).setObjects(("IF-MIB", "ifIndex"), ("NORTEL-WLAN-AP-MIB", "ntWlanDot11AssociationStationAddress"))
if mibBuilder.loadTexts: ntWlanApMuWPAAuthFail.setStatus('current')
ntWlanApMuAssocTrapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApMuAssocTrapEnabled.setStatus('current')
ntWlanApMuDisAssocTrapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 8, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApMuDisAssocTrapEnabled.setStatus('current')
ntWlanApLIDCheckEtherLinkEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 9, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApLIDCheckEtherLinkEnabled.setStatus('current')
ntWlanApLIDCheckIPLinkEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 9, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApLIDCheckIPLinkEnabled.setStatus('current')
ntWlanApLIDCheckIpLinkAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 9, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApLIDCheckIpLinkAddress.setStatus('current')
ntWlanApRateSupportTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10, 1), )
if mibBuilder.loadTexts: ntWlanApRateSupportTable.setStatus('current')
ntWlanApRateSupportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApRateSupportIfIndex"), (0, "NORTEL-WLAN-AP-MIB", "ntWlanApRateSupportSpeed"))
if mibBuilder.loadTexts: ntWlanApRateSupportEntry.setStatus('current')
ntWlanApRateSupportIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: ntWlanApRateSupportIfIndex.setStatus('current')
ntWlanApRateSupportSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10, 1, 1, 2), NtWlanApDataRate())
if mibBuilder.loadTexts: ntWlanApRateSupportSpeed.setStatus('current')
ntWlanApRateSupportLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 10, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("off", 0), ("supported", 1), ("supportedBasic", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApRateSupportLevel.setStatus('current')
ntWlanApSecurityTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1), )
if mibBuilder.loadTexts: ntWlanApSecurityTable.setStatus('current')
ntWlanApSecurityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApSecurityIfIndex"))
if mibBuilder.loadTexts: ntWlanApSecurityEntry.setStatus('current')
ntWlanApSecurityIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: ntWlanApSecurityIfIndex.setStatus('current')
ntWlanApSecurityEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityEnabled.setStatus('current')
ntWlanApSecurityWEPAuthType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("openSystem", 0), ("sharedKey", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPAuthType.setStatus('current')
ntWlanApSecurityWEPKeyLength = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("wepKey64", 0), ("wepKey128", 1), ("wepKey152", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPKeyLength.setStatus('current')
ntWlanApSecurityWEPActiveKey = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPActiveKey.setStatus('current')
ntWlanApSecurityWEPKey1 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 6), NtWlanApWEPKey()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPKey1.setStatus('current')
ntWlanApSecurityWEPKey2 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 7), NtWlanApWEPKey()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPKey2.setStatus('current')
ntWlanApSecurityWEPKey3 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 8), NtWlanApWEPKey()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPKey3.setStatus('current')
ntWlanApSecurityWEPKey4 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 9), NtWlanApWEPKey()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWEPKey4.setStatus('current')
ntWlanApSecurityWPASupport = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("supported", 1), ("required", 2), ("wepOnly", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWPASupport.setStatus('current')
ntWlanApSecurityWPAMode = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dynamic", 0), ("preSharedKey", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWPAMode.setStatus('current')
ntWlanApSecurityWPAPreSharedKey = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(63, 63)).setFixedLength(63)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWPAPreSharedKey.setStatus('current')
ntWlanApSecurityWPAMcastCypherMode = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 11, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("wep", 0), ("tkip", 1), ("aes", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApSecurityWPAMcastCypherMode.setStatus('current')
ntWlanApQoSMode = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("off", 0), ("etherSrc", 1), ("etherDst", 2), ("ethertype", 3), ("directPriorityMap", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApQoSMode.setStatus('current')
ntWlanApQoSEtherTypeToQueueTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 2), )
if mibBuilder.loadTexts: ntWlanApQoSEtherTypeToQueueTable.setStatus('current')
ntWlanApQoSEtherTypeToQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 2, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApQoSEtherTypeToQueueIndex"))
if mibBuilder.loadTexts: ntWlanApQoSEtherTypeToQueueEntry.setStatus('current')
ntWlanApQoSEtherTypeToQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ntWlanApQoSEtherTypeToQueueIndex.setStatus('current')
ntWlanApQoSEtherTypeToQueueNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApQoSEtherTypeToQueueNumber.setStatus('current')
ntWlanApQoSEtherTypeToQueueRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 2, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApQoSEtherTypeToQueueRowStatus.setStatus('current')
ntWlanApQoSMACToQueueTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 3), )
if mibBuilder.loadTexts: ntWlanApQoSMACToQueueTable.setStatus('current')
ntWlanApQoSMACToQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 3, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApQoSMACToQueueAddress"))
if mibBuilder.loadTexts: ntWlanApQoSMACToQueueEntry.setStatus('current')
ntWlanApQoSMACToQueueAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 3, 1, 1), MacAddress())
if mibBuilder.loadTexts: ntWlanApQoSMACToQueueAddress.setStatus('current')
ntWlanApQoSMACToQueueNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApQoSMACToQueueNumber.setStatus('current')
ntWlanApQoSMACToQueueRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 12, 3, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApQoSMACToQueueRowStatus.setStatus('current')
ntWlanApVlanEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApVlanEnabled.setStatus('current')
ntWlanApVlanTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 2), )
if mibBuilder.loadTexts: ntWlanApVlanTable.setStatus('current')
ntWlanApVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 2, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApVlanIfIndex"))
if mibBuilder.loadTexts: ntWlanApVlanEntry.setStatus('current')
ntWlanApVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: ntWlanApVlanIfIndex.setStatus('current')
ntWlanApVlanDefaultVid = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApVlanDefaultVid.setStatus('current')
ntWlanApVlanMUMACToVidTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 3), )
if mibBuilder.loadTexts: ntWlanApVlanMUMACToVidTable.setStatus('current')
ntWlanApVlanMUMACToVidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 3, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApVlanMUMACToVidAddress"))
if mibBuilder.loadTexts: ntWlanApVlanMUMACToVidEntry.setStatus('current')
ntWlanApVlanMUMACToVidAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 3, 1, 1), MacAddress())
if mibBuilder.loadTexts: ntWlanApVlanMUMACToVidAddress.setStatus('current')
ntWlanApVlanMUMACToVidNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 13, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntWlanApVlanMUMACToVidNumber.setStatus('current')
ntWlanApMUStatsTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1), )
if mibBuilder.loadTexts: ntWlanApMUStatsTable.setStatus('current')
ntWlanApMUStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1), ).setIndexNames((0, "NORTEL-WLAN-AP-MIB", "ntWlanApMUStatsMUAddress"))
if mibBuilder.loadTexts: ntWlanApMUStatsEntry.setStatus('current')
ntWlanApMUStatsMUAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: ntWlanApMUStatsMUAddress.setStatus('current')
ntWlanApMUStatsPacketsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanApMUStatsPacketsIn.setStatus('current')
ntWlanApMUStatsPacketsOut = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanApMUStatsPacketsOut.setStatus('current')
ntWlanApMUStatsOctetsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanApMUStatsOctetsIn.setStatus('current')
ntWlanApMUStatsOctetsOut = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 11, 1, 14, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntWlanApMUStatsOctetsOut.setStatus('current')
mibBuilder.exportSymbols("NORTEL-WLAN-AP-MIB", ntWlanDot11PreambleLength=ntWlanDot11PreambleLength, ntWlanNetDefaultGateway=ntWlanNetDefaultGateway, ntWlanDot118021xRequired=ntWlanDot118021xRequired, ntWlanApLIDCheckEtherLinkEnabled=ntWlanApLIDCheckEtherLinkEnabled, ntWlanApQoSMACToQueueEntry=ntWlanApQoSMACToQueueEntry, ntWlanApQoSMACToQueueRowStatus=ntWlanApQoSMACToQueueRowStatus, ntWlanDot11AuthenticationEntry=ntWlanDot11AuthenticationEntry, ntWlanApMuDisAssocTrapEnabled=ntWlanApMuDisAssocTrapEnabled, ntWlanPortIndex=ntWlanPortIndex, ntWlanDot11DisassociationStationAddress=ntWlanDot11DisassociationStationAddress, ntWlanLineEntry=ntWlanLineEntry, ntWlanPortSpeedDpxCfg=ntWlanPortSpeedDpxCfg, ntWlanApQoS=ntWlanApQoS, ntWlanDot11ServerIndex=ntWlanDot11ServerIndex, ntWlanRestartControl=ntWlanRestartControl, ntWlanApLIDCheckIPLinkEnabled=ntWlanApLIDCheckIPLinkEnabled, ntWlanSwCountryCode=ntWlanSwCountryCode, ntWlanDot11Phy=ntWlanDot11Phy, ntWlanDot11AuthenticationPort=ntWlanDot11AuthenticationPort, ntWlanApSys=ntWlanApSys, ntWlanApVlanIfIndex=ntWlanApVlanIfIndex, ntWlanApRateSupport=ntWlanApRateSupport, ntWlanDot11AuthenticationServerTable=ntWlanDot11AuthenticationServerTable, ntWlanApMUStatsPacketsOut=ntWlanApMUStatsPacketsOut, ntWlanTransferStart=ntWlanTransferStart, ntWlanDot11TurboModeEnabled=ntWlanDot11TurboModeEnabled, NtWlanApWEPKey=NtWlanApWEPKey, ntWlanDot11AuthenticationServerEntry=ntWlanDot11AuthenticationServerEntry, ntWlanDot11AuthenticationRetransmit=ntWlanDot11AuthenticationRetransmit, ntWlanApMuWEPAuthFail=ntWlanApMuWEPAuthFail, ntWlanApSecurityWEPKey3=ntWlanApSecurityWEPKey3, ntWlanApVlanEntry=ntWlanApVlanEntry, ntWlanApMUStatsPacketsIn=ntWlanApMUStatsPacketsIn, ntWlanNetConfigSubnetMask=ntWlanNetConfigSubnetMask, ntWlanLineParity=ntWlanLineParity, ntWlanDot11FilterStatus=ntWlanDot11FilterStatus, ntWlanApMUStatsOctetsOut=ntWlanApMUStatsOctetsOut, ntWlanApSecurityEnabled=ntWlanApSecurityEnabled, ntWlanPortFlowCtrlCfg=ntWlanPortFlowCtrlCfg, ntWlanDot11AuthenticationKey=ntWlanDot11AuthenticationKey, ntWlanApQoSEtherTypeToQueueRowStatus=ntWlanApQoSEtherTypeToQueueRowStatus, ntWlanApSecurity=ntWlanApSecurity, ntWlanApRateSupportIfIndex=ntWlanApRateSupportIfIndex, ntWlanApSecurityIfIndex=ntWlanApSecurityIfIndex, ntWlanApVlanMUMACToVidEntry=ntWlanApVlanMUMACToVidEntry, nortelWlanApMib=nortelWlanApMib, ntWlanApVlanDefaultVid=ntWlanApVlanDefaultVid, ntWlanPortCapabilities=ntWlanPortCapabilities, ntWlanFileTransferStatus=ntWlanFileTransferStatus, ntWlanApVlanMUMACToVidAddress=ntWlanApVlanMUMACToVidAddress, ntWlanApMUStatsTable=ntWlanApMUStatsTable, ntWlanApDot11=ntWlanApDot11, ntWlanApSecurityWEPActiveKey=ntWlanApSecurityWEPActiveKey, ntWlanApMuWPAAuthFail=ntWlanApMuWPAAuthFail, ntWlanIpHttpState=ntWlanIpHttpState, ntWlanApQoSEtherTypeToQueueIndex=ntWlanApQoSEtherTypeToQueueIndex, ntWlanLineDataBits=ntWlanLineDataBits, ntWlanApSecurityWEPKey4=ntWlanApSecurityWEPKey4, ntWlanSwNNDataFileVer=ntWlanSwNNDataFileVer, ntWlanDot11FilterTable=ntWlanDot11FilterTable, ntWlanRestartOpCodeFile=ntWlanRestartOpCodeFile, ntWlanDot11SecondaryAuthTimeout=ntWlanDot11SecondaryAuthTimeout, ntWlanSwOpCodeVer=ntWlanSwOpCodeVer, ntWlanSwHardwareVer=ntWlanSwHardwareVer, ntWlanApLIDCheckIpLinkAddress=ntWlanApLIDCheckIpLinkAddress, ntWlanFileType=ntWlanFileType, ntWlanApSecurityWPAMode=ntWlanApSecurityWPAMode, ntWlanDot11ClosedSystem=ntWlanDot11ClosedSystem, ntWlanDot118021xSupport=ntWlanDot118021xSupport, ntWlanLineTable=ntWlanLineTable, ntWlanApVlanMUMACToVidTable=ntWlanApVlanMUMACToVidTable, ntWlanPortName=ntWlanPortName, ntWlanApSecurityTable=ntWlanApSecurityTable, ntWlanApIpMgt=ntWlanApIpMgt, ntWlanApVlanEnabled=ntWlanApVlanEnabled, ntWlanApFileTransferMgt=ntWlanApFileTransferMgt, ntWlanApVlanMUMACToVidNumber=ntWlanApVlanMUMACToVidNumber, ntWlanApRateSupportLevel=ntWlanApRateSupportLevel, ntWlanPortSpeedDpxStatus=ntWlanPortSpeedDpxStatus, ntWlanApTraps=ntWlanApTraps, ntWlanApVlanTable=ntWlanApVlanTable, NtWlanApDataRate=NtWlanApDataRate, ntWlanApSecurityWPAPreSharedKey=ntWlanApSecurityWPAPreSharedKey, ntWlanDot11FilterEntry=ntWlanDot11FilterEntry, ntWlanApQoSEtherTypeToQueueNumber=ntWlanApQoSEtherTypeToQueueNumber, ntWlanDot11SecondaryAuthRetransmit=ntWlanDot11SecondaryAuthRetransmit, ntWlanApSecurityWPAMcastCypherMode=ntWlanApSecurityWPAMcastCypherMode, ntWlanApTraps0=ntWlanApTraps0, ntWlanLineSpeed=ntWlanLineSpeed, ntWlanPortAutonegotiation=ntWlanPortAutonegotiation, ntWlanDot11Index=ntWlanDot11Index, ntWlanApRateSupportEntry=ntWlanApRateSupportEntry, ntWlanTransferType=ntWlanTransferType, ntWlanApSecurityWEPKeyLength=ntWlanApSecurityWEPKeyLength, ntWlanApMuDisAssocTrap=ntWlanApMuDisAssocTrap, ntWlanApQoSEtherTypeToQueueEntry=ntWlanApQoSEtherTypeToQueueEntry, ntWlanDot11InterfaceIndex=ntWlanDot11InterfaceIndex, ntWlanApQoSMACToQueueNumber=ntWlanApQoSMACToQueueNumber, ntWlanFileServer=ntWlanFileServer, ntWlanPortType=ntWlanPortType, ntWlanDot11SecondaryAuthKey=ntWlanDot11SecondaryAuthKey, PYSNMP_MODULE_ID=nortelWlanApMib, ntWlanDot11PhyOperationTable=ntWlanDot11PhyOperationTable, ntWlanApSecurityEntry=ntWlanApSecurityEntry, ntWlanApMuAssocTrapEnabled=ntWlanApMuAssocTrapEnabled, ntWlanApMUStatsOctetsIn=ntWlanApMUStatsOctetsIn, ntWlanLineIndex=ntWlanLineIndex, ntWlanApDot1xAuthenticationFail=ntWlanApDot1xAuthenticationFail, ntWlanPortFlowCtrlStatus=ntWlanPortFlowCtrlStatus, ntWlanNetConfigIPAddress=ntWlanNetConfigIPAddress, ntWlanApSecurityWEPKey2=ntWlanApSecurityWEPKey2, ntWlanDot11TrapTable=ntWlanDot11TrapTable, ntWlanApSecurityWEPKey1=ntWlanApSecurityWEPKey1, ntWlanDot118021xSessKeyRefresh=ntWlanDot118021xSessKeyRefresh, ntWlanApQoSEtherTypeToQueueTable=ntWlanApQoSEtherTypeToQueueTable, ntWlanApMUStatsMUAddress=ntWlanApMUStatsMUAddress, ntWlanDot11AuthenticationServer=ntWlanDot11AuthenticationServer, ntWlanApStats=ntWlanApStats, ntWlanUserName=ntWlanUserName, ntWlanDot11SecondaryAuthServer=ntWlanDot11SecondaryAuthServer, ntWlanApLineMgnt=ntWlanApLineMgnt, ntWlanApLID=ntWlanApLID, ntWlanApQoSMACToQueueTable=ntWlanApQoSMACToQueueTable, ntWlanDot11TrapEntry=ntWlanDot11TrapEntry, ntWlanApQoSMode=ntWlanApQoSMode, ntWlanApVlan=ntWlanApVlan, ntWlanApRateSupportTable=ntWlanApRateSupportTable, ntWlanDot118021xBcastKeyRefresh=ntWlanDot118021xBcastKeyRefresh, ntWlanSrcFile=ntWlanSrcFile, ntWlanSwBootRomVer=ntWlanSwBootRomVer, ntWlanIpHttpPort=ntWlanIpHttpPort, ntWlanApSecurityWEPAuthType=ntWlanApSecurityWEPAuthType, ntWlanDot11FilterAddress=ntWlanDot11FilterAddress, ntWlanDot11AssociationStationAddress=ntWlanDot11AssociationStationAddress, ntWlanDot11SecondaryAuthPort=ntWlanDot11SecondaryAuthPort, ntWlanLineStopBits=ntWlanLineStopBits, ntWlanDot11AuthenticationTimeout=ntWlanDot11AuthenticationTimeout, ntWlanApMuAssocTrap=ntWlanApMuAssocTrap, ntWlanApQoSMACToQueueAddress=ntWlanApQoSMACToQueueAddress, ntWlanPortTable=ntWlanPortTable, ntWlanPortEntry=ntWlanPortEntry, ntWlanApPortMgnt=ntWlanApPortMgnt, ntWlanDestFile=ntWlanDestFile, ntWlanPassword=ntWlanPassword, ntWlanDot11FilterIndex=ntWlanDot11FilterIndex, ntWlanDot118021xReAuthRefresh=ntWlanDot118021xReAuthRefresh, ntWlanDot11PhyOperationEntry=ntWlanDot11PhyOperationEntry, ntWlanDot11dWorldModeEnabled=ntWlanDot11dWorldModeEnabled, ntWlanApResetMgt=ntWlanApResetMgt, ntWlanDot11AssociationMU=ntWlanDot11AssociationMU, ntWlanApMUStatsEntry=ntWlanApMUStatsEntry, ntWlanApRateSupportSpeed=ntWlanApRateSupportSpeed, ntWlanApSecurityWPASupport=ntWlanApSecurityWPASupport)
|
#!/usr/bin/env python
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
#
# seL4 System Call Stub Generator
# ===============================
#
# 2009 David Greenaway
#
# This script generates system call stubs based on an XML specification of the
# objects that the kernel exports (and the methods those objects export).
#
# Previously, Magpie (an IDL compiler) was used to generate these stubs. As
# Magpie development progressed, support for a fixed ABI (i.e., the ABI
# implemented by the seL4 kernel) was lost, and support for generating
# alignment-safe code (required by platforms such as ARM) was also removed.
#
# This script is a stop-gap until these features can be restored in Magpie
# once again.
#
# The script has certain limitations:
#
# * It must be told the size of all types. This includes complex types
# such as structures.
#
# We generate code that will cause compilation to fail if we get any
# object's size wrong, which should help mitigate the number of bugs caused
# because of this script becoming out of date compared to the source files.
#
# * The script has only been tested on the actual seL4 API XML description.
#
# No stress testing has taken place; there may be bugs if new and wonderful
# XML method descriptions are added.
#
import operator
import itertools
import xml.dom.minidom
from argparse import ArgumentParser
import sys
from functools import reduce
# Number of bits in a standard word
WORD_SIZE_BITS_ARCH = {
"aarch32": 32,
"ia32": 32,
"aarch64": 64,
"ia64": 64,
"x86_64": 64,
"arm_hyp": 32,
"riscv32": 32,
"riscv64": 64,
}
MESSAGE_REGISTERS_FOR_ARCH = {
"aarch32": 4,
"aarch64": 4,
"ia32": 2,
"x86_64": 4,
"arm_hyp": 4,
"riscv32": 4,
"riscv64": 4,
}
WORD_CONST_SUFFIX_BITS = {
32: "ul",
64: "ull",
}
# Maximum number of words that will be in a message.
MAX_MESSAGE_LENGTH = 64
# Headers to include
INCLUDES = [
'autoconf.h', 'sel4/types.h'
]
TYPES = {
8: "seL4_Uint8",
16: "seL4_Uint16",
32: "seL4_Uint32",
64: "seL4_Uint64"
}
class Type(object):
"""
This class represents a C type (such as an 'int', structure or
pointer.
"""
def __init__(self, name, size_bits, wordsize, double_word=False, native_size_bits=None):
"""
Define a new type, named 'name' that is 'size_bits' bits
long.
"""
self.name = name
self.size_bits = size_bits
self.wordsize = wordsize
self.double_word = double_word
#
# Store the number of bits C will use for this type
# in its native unpacked form.
#
# Required for 'bool', for example, which only uses 1
# bit when packed, but 32 bits when unpacked.
#
if native_size_bits:
self.native_size_bits = native_size_bits
else:
self.native_size_bits = size_bits
def pass_by_reference(self):
return self.size_bits > self.wordsize and not self.double_word
def render_parameter_name(self, name):
"""
Return a string of C code that would be used in a function
parameter declaration.
"""
return "%s %s" % (self.name, name)
def pointer(self):
"""
Return a new Type class representing a pointer to this
object.
"""
return PointerType(self, self.wordsize)
def c_expression(self, var_name, word_num=0):
"""
Return code for a C expression that gets word 'word_num'
of this type.
"""
assert word_num == 0
return "%s" % var_name
def double_word_expression(self, var_name, word_num, word_size):
assert word_num == 0 or word_num == 1
if word_num == 0:
return "({0}) {1}".format(TYPES[self.size_bits], var_name)
elif word_num == 1:
return "({0}) ({1} >> {2})".format(TYPES[self.size_bits], var_name,
word_size)
class PointerType(Type):
"""
A pointer to a standard type.
"""
def __init__(self, base_type, wordsize):
Type.__init__(self, base_type.name, wordsize, wordsize)
self.base_type = base_type
def render_parameter_name(self, name):
return "%s *%s" % (self.name, name)
def c_expression(self, var_name, word_num=0):
assert word_num == 0
return "*%s" % var_name
def pointer(self):
raise NotImplementedError()
class CapType(Type):
"""
A type that is just a typedef of seL4_CPtr.
"""
def __init__(self, name, wordsize):
Type.__init__(self, name, wordsize, wordsize)
class StructType(Type):
"""
A C 'struct' definition.
"""
def __init__(self, name, size_bits, wordsize):
Type.__init__(self, name, size_bits, wordsize)
def c_expression(self, var_name, word_num, member_name):
assert word_num < self.size_bits / self.wordsize
# Multiword structure.
assert self.pass_by_reference()
return "%s->%s" % (var_name, member_name[word_num])
class BitFieldType(Type):
"""
A special C 'struct' generated by the bitfield generator
"""
def __init__(self, name, size_bits, wordsize):
Type.__init__(self, name, size_bits, wordsize)
def c_expression(self, var_name, word_num=0):
return "%s.words[%d]" % (var_name, word_num)
class Parameter(object):
def __init__(self, name, type):
self.name = name
self.type = type
class Api(object):
def __init__(self, node):
self.name = node.getAttribute("name")
self.label_prefix = node.getAttribute("label_prefix") or ""
#
# Types
#
def init_data_types(wordsize):
types = [
# Simple Types
Type("int", 32, wordsize),
Type("long", wordsize, wordsize),
Type("seL4_Uint8", 8, wordsize),
Type("seL4_Uint16", 16, wordsize),
Type("seL4_Uint32", 32, wordsize),
Type("seL4_Uint64", 64, wordsize, double_word=(wordsize == 32)),
Type("seL4_Word", wordsize, wordsize),
Type("seL4_Bool", 1, wordsize, native_size_bits=8),
# seL4 Structures
BitFieldType("seL4_CapRights_t", wordsize, wordsize),
# Object types
CapType("seL4_CPtr", wordsize),
CapType("seL4_CNode", wordsize),
CapType("seL4_IRQHandler", wordsize),
CapType("seL4_IRQControl", wordsize),
CapType("seL4_TCB", wordsize),
CapType("seL4_Untyped", wordsize),
CapType("seL4_DomainSet", wordsize),
]
return types
def init_arch_types(wordsize):
arch_types = {
"aarch32" : [
Type("seL4_ARM_VMAttributes", wordsize, wordsize),
CapType("seL4_ARM_Page", wordsize),
CapType("seL4_ARM_PageTable", wordsize),
CapType("seL4_ARM_PageDirectory", wordsize),
CapType("seL4_ARM_ASIDControl", wordsize),
CapType("seL4_ARM_ASIDPool", wordsize),
CapType("seL4_ARM_VCPU", wordsize),
CapType("seL4_ARM_IOSpace", wordsize),
CapType("seL4_ARM_IOPageTable", wordsize),
StructType("seL4_UserContext", wordsize * 17, wordsize),
],
"aarch64" : [
Type("seL4_ARM_VMAttributes", wordsize, wordsize),
CapType("seL4_ARM_Page", wordsize),
CapType("seL4_ARM_PageTable", wordsize),
CapType("seL4_ARM_PageDirectory", wordsize),
CapType("seL4_ARM_PageUpperDirectory", wordsize),
CapType("seL4_ARM_PageGlobalDirectory", wordsize),
CapType("seL4_ARM_ASIDControl", wordsize),
CapType("seL4_ARM_ASIDPool", wordsize),
CapType("seL4_ARM_VCPU", wordsize),
CapType("seL4_ARM_IOSpace", wordsize),
CapType("seL4_ARM_IOPageTable", wordsize),
StructType("seL4_UserContext", wordsize * 34, wordsize),
],
"arm_hyp" : [
Type("seL4_ARM_VMAttributes", wordsize, wordsize),
CapType("seL4_ARM_Page", wordsize),
CapType("seL4_ARM_PageTable", wordsize),
CapType("seL4_ARM_PageDirectory", wordsize),
CapType("seL4_ARM_ASIDControl", wordsize),
CapType("seL4_ARM_ASIDPool", wordsize),
CapType("seL4_ARM_VCPU", wordsize),
CapType("seL4_ARM_IOSpace", wordsize),
CapType("seL4_ARM_IOPageTable", wordsize),
StructType("seL4_UserContext", wordsize * 17, wordsize),
],
"ia32" : [
Type("seL4_X86_VMAttributes", wordsize, wordsize),
CapType("seL4_X86_IOPort", wordsize),
CapType("seL4_X86_IOPortControl", wordsize),
CapType("seL4_X86_ASIDControl", wordsize),
CapType("seL4_X86_ASIDPool", wordsize),
CapType("seL4_X86_IOSpace", wordsize),
CapType("seL4_X86_Page", wordsize),
CapType("seL4_X86_PageDirectory", wordsize),
CapType("seL4_X86_PageTable", wordsize),
CapType("seL4_X86_IOPageTable", wordsize),
CapType("seL4_X86_VCPU", wordsize),
CapType("seL4_X86_EPTPML4", wordsize),
CapType("seL4_X86_EPTPDPT", wordsize),
CapType("seL4_X86_EPTPD", wordsize),
CapType("seL4_X86_EPTPT", wordsize),
StructType("seL4_VCPUContext", wordsize * 7 ,wordsize),
StructType("seL4_UserContext", wordsize * 13, wordsize),
],
"x86_64" : [
Type("seL4_X86_VMAttributes", wordsize, wordsize),
CapType("seL4_X86_IOPort", wordsize),
CapType("seL4_X86_IOPortControl", wordsize),
CapType("seL4_X86_ASIDControl", wordsize),
CapType("seL4_X86_ASIDPool", wordsize),
CapType("seL4_X86_IOSpace", wordsize),
CapType("seL4_X86_Page", wordsize),
CapType("seL4_X64_PML4", wordsize),
CapType("seL4_X86_PDPT", wordsize),
CapType("seL4_X86_PageDirectory", wordsize),
CapType("seL4_X86_PageTable", wordsize),
CapType("seL4_X86_IOPageTable", wordsize),
CapType("seL4_X86_VCPU", wordsize),
CapType("seL4_X86_EPTPML4", wordsize),
CapType("seL4_X86_EPTPDPT", wordsize),
CapType("seL4_X86_EPTPD", wordsize),
CapType("seL4_X86_EPTPT", wordsize),
StructType("seL4_VCPUContext", wordsize * 7 ,wordsize),
StructType("seL4_UserContext", wordsize * 19, wordsize),
],
"riscv32" : [
Type("seL4_RISCV_VMAttributes", wordsize, wordsize),
CapType("seL4_RISCV_Page", wordsize),
CapType("seL4_RISCV_PageTable", wordsize),
CapType("seL4_RISCV_ASIDControl", wordsize),
CapType("seL4_RISCV_ASIDPool", wordsize),
StructType("seL4_UserContext", wordsize * 32, wordsize),
],
"riscv64" : [
Type("seL4_RISCV_VMAttributes", wordsize, wordsize),
CapType("seL4_RISCV_Page", wordsize),
CapType("seL4_RISCV_PageTable", wordsize),
CapType("seL4_RISCV_ASIDControl", wordsize),
CapType("seL4_RISCV_ASIDPool", wordsize),
StructType("seL4_UserContext", wordsize * 32, wordsize),
]
}
return arch_types
# Retrieve a member list for a given struct type
def struct_members(typ, structs):
members = [member for struct_name, member in structs if struct_name == typ.name]
assert len(members) == 1
return members[0]
# Keep increasing the given number 'x' until 'x % a == 0'.
def align_up(x, a):
if x % a == 0:
return x
return x + a - (x % a)
def get_parameter_positions(parameters, wordsize):
"""
Determine where each parameter should be packed in the generated message.
We generate a list of:
(param_name, param_type, first_bit, num_bits)
tuples.
We guarantee that either (num_words == 1) or (bit_offset == 0).
"""
bits_used = 0
results = []
for param in parameters:
# How big are we?
type_size = param.type.size_bits
# We need everything to be a power of two, or word sized.
assert ((type_size & (type_size - 1)) == 0) or (type_size % wordsize == 0)
# Align up to our own size, or the next word. (Whichever is smaller)
bits_used = align_up(bits_used, min(type_size, wordsize))
# Place ourself.
results.append((param, bits_used, type_size))
bits_used += type_size
return results
def generate_param_list(input_params, output_params):
# Generate parameters
params = []
for param in input_params:
if not param.type.pass_by_reference():
params.append(param.type.render_parameter_name(param.name))
else:
params.append(param.type.pointer().render_parameter_name(param.name))
for param in output_params:
if param.type.pass_by_reference():
params.append(param.type.pointer().render_parameter_name(param.name))
return ", ".join(params)
def generate_marshal_expressions(params, num_mrs, structs, wordsize):
"""
Generate marshalling expressions for the given set of inputs.
We return a list of expressions; one expression per word required
to marshal all the inputs.
"""
def generate_param_code(param, first_bit, num_bits, word_array, wordsize):
"""
Generate code to marshal the given parameter into the correct
location in the message.
'word_array' is an array of the final contents of the message.
word_array[k] contains what should be placed in the k'th message
register, and is an array of expressions that will (eventually)
be bitwise-or'ed into it.
"""
target_word = first_bit // wordsize
target_offset = first_bit % wordsize
# double word type
if param.type.double_word:
word_array[target_word].append(param.type.double_word_expression(param.name, 0, wordsize))
word_array[target_word + 1].append(param.type.double_word_expression(param.name, 1, wordsize))
return
# Single full word?
if num_bits == wordsize:
assert target_offset == 0
expr = param.type.c_expression(param.name)
word_array[target_word].append(expr)
return
# Part of a word?
if num_bits < wordsize:
expr = param.type.c_expression(param.name)
expr = "(%s & %#x%s)" % (expr, (1 << num_bits) - 1,
WORD_CONST_SUFFIX_BITS[wordsize])
if target_offset:
expr = "(%s << %d)" % (expr, target_offset)
word_array[target_word].append(expr)
return
# Multiword array
assert target_offset == 0
num_words = num_bits // wordsize
for i in range(num_words):
expr = param.type.c_expression(param.name, i, struct_members(param.type, structs))
word_array[target_word + i].append(expr)
# Get their marshalling positions
positions = get_parameter_positions(params, wordsize)
# Generate marshal code.
words = [[] for _ in range(num_mrs, MAX_MESSAGE_LENGTH)]
for (param, first_bit, num_bits) in positions:
generate_param_code(param, first_bit, num_bits, words, wordsize)
# Return list of expressions.
return [" | ".join(x) for x in words if len(x) > 0]
def generate_unmarshal_expressions(params, wordsize):
"""
Generate unmarshalling expressions for the given set of outputs.
We return a list of list of expressions; one list per variable, containing
expressions for the words in it that must be unmarshalled. The expressions
will have tokens of the form:
"%(w0)s"
in them, indicating a read from a word in the message.
"""
def unmarshal_single_param(first_bit, num_bits, wordsize):
"""
Unmarshal a single parameter.
"""
first_word = first_bit // wordsize
bit_offset = first_bit % wordsize
# Multiword type?
if num_bits > wordsize:
result = []
for x in range(num_bits // wordsize):
result.append("%%(w%d)s" % (x + first_word))
return result
# Otherwise, bit packed.
if num_bits == wordsize:
return ["%%(w%d)s" % first_word]
elif bit_offset == 0:
return ["(%%(w%d)s & %#x)" % (
first_word, (1 << num_bits) - 1)]
else:
return ["(%%(w%d)s >> %d) & %#x" % (
first_word, bit_offset, (1 << num_bits) - 1)]
# Get their marshalling positions
positions = get_parameter_positions(params, wordsize)
# Generate the unmarshal code.
results = []
for (param, first_bit, num_bits) in positions:
results.append((param, unmarshal_single_param(first_bit, num_bits, wordsize)))
return results
def is_result_struct_required(output_params):
return len([x for x in output_params if not x.type.pass_by_reference()]) != 0
def generate_result_struct(interface_name, method_name, output_params):
"""
Generate a structure definition to be returned by the system call stubs to
the user.
We have a few constraints:
* We always need an 'error' output parameter, even though it won't
appear in the list 'output_params' given to us.
* Output parameters may be marked as 'pass_by_reference', indicating
that we only ever see pointers to the item.
If no structure is needed (i.e., we just return an error code), we return
'None'.
"""
# Do we actually need a structure?
if not is_result_struct_required(output_params):
return None
#
# Generate the structure:
#
# struct seL4_CNode_Copy {
# int error;
# seL4_Word foo;
# };
# typedef struct seL4_CNode_Copy seL4_CNode_Copy_t;
#
result = []
result.append("struct %s_%s {" % (interface_name, method_name))
result.append("\tint error;")
for i in output_params:
if not i.type.pass_by_reference():
result.append("\t%s;" % i.type.render_parameter_name(i.name))
result.append("};")
result.append("typedef struct %s_%s %s_%s_t;" % (
(interface_name, method_name, interface_name, method_name)))
result.append("")
return "\n".join(result)
def generate_stub(arch, wordsize, interface_name, method_name, method_id, input_params, output_params, structs, use_only_ipc_buffer, comment):
result = []
if use_only_ipc_buffer:
num_mrs = 0
else:
num_mrs = MESSAGE_REGISTERS_FOR_ARCH[arch]
# Split out cap parameters and standard parameters
standard_params = []
cap_params = []
for x in input_params:
if isinstance(x.type, CapType):
cap_params.append(x)
else:
standard_params.append(x)
# Determine if we are returning a structure, or just the error code.
returning_struct = False
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
return_type = "%s_%s_t" % (interface_name, method_name)
returning_struct = True
else:
return_type = "seL4_Error"
#
# Print doxygen comment.
#
result.append(comment)
#
# Print function header.
#
# static inline int
# seL4_Untyped_Retype(...)
# {
#
result.append("LIBSEL4_INLINE %s" % return_type)
result.append("%s_%s(%s)" % (interface_name, method_name,
generate_param_list(input_params, output_params)))
result.append("{")
#
# Get a list of expressions for our caps and inputs.
#
input_expressions = generate_marshal_expressions(standard_params, num_mrs,
structs, wordsize)
cap_expressions = [x.name for x in cap_params]
service_cap = cap_expressions[0]
cap_expressions = cap_expressions[1:]
#
# Compute how many words the inputs and output will require.
#
input_param_words = len(input_expressions)
output_param_words = sum([p.type.size_bits for p in output_params]) / wordsize
#
# Setup variables we will need.
#
result.append("\t%s result;" % return_type)
result.append("\tseL4_MessageInfo_t tag = seL4_MessageInfo_new(%s, 0, %d, %d);" % (method_id, len(cap_expressions), len(input_expressions)))
result.append("\tseL4_MessageInfo_t output_tag;")
for i in range(num_mrs):
result.append("\tseL4_Word mr%d;" % i)
result.append("")
#
# Copy capabilities.
#
# /* Setup input capabilities. */
# seL4_SetCap(i, cap);
#
if len(cap_expressions) > 0:
result.append("\t/* Setup input capabilities. */")
for i in range(len(cap_expressions)):
result.append("\tseL4_SetCap(%d, %s);" % (i, cap_expressions[i]))
result.append("")
#
# Copy in the inputs.
#
# /* Marshal input parameters. */
# seL4_SetMR(i, v);
# ...
#
if max(num_mrs, len(input_expressions)) > 0:
result.append("\t/* Marshal and initialise parameters. */")
# Initialise in-register parameters
for i in range(num_mrs):
if i < len(input_expressions):
result.append("\tmr%d = %s;" % (i, input_expressions[i]))
else:
result.append("\tmr%d = 0;" % i)
# Initialise buffered parameters
for i in range(num_mrs, len(input_expressions)):
result.append("\tseL4_SetMR(%d, %s);" % (i, input_expressions[i]))
result.append("")
#
# Generate the call.
#
if use_only_ipc_buffer:
result.append("\t/* Perform the call. */")
result.append("\toutput_tag = seL4_Call(%s, tag);" % service_cap)
else:
result.append("\t/* Perform the call, passing in-register arguments directly. */")
result.append("\toutput_tag = seL4_CallWithMRs(%s, tag," % (service_cap))
result.append("\t\t%s);" % ', '.join(
("&mr%d" % i) for i in range(num_mrs)))
#
# Prepare the result.
#
label = "result.error" if returning_struct else "result"
cast = " (%s)" % return_type if not returning_struct else ""
result.append("\t%s =%s seL4_MessageInfo_get_label(output_tag);" % (label, cast))
result.append("")
if not use_only_ipc_buffer:
result.append("\t/* Unmarshal registers into IPC buffer on error. */")
result.append("\tif (%s != seL4_NoError) {" % label)
for i in range(num_mrs):
result.append("\t\tseL4_SetMR(%d, mr%d);" % (i, i))
if returning_struct:
result.append("\t\treturn result;")
result.append("\t}")
result.append("")
#
# Generate unmarshalling code.
#
if len(output_params) > 0:
result.append("\t/* Unmarshal result. */")
source_words = {}
for i in range(MAX_MESSAGE_LENGTH):
if i < num_mrs:
source_words["w%d" % i] = "mr%d" % i
else:
source_words["w%d" % i] = "seL4_GetMR(%d)" % i
unmashalled_params = generate_unmarshal_expressions(output_params, wordsize)
for (param, words) in unmashalled_params:
if param.type.pass_by_reference():
members = struct_members(param.type, structs)
for i in range(len(words)):
result.append("\t%s->%s = %s;" %
(param.name, members[i], words[i] % source_words))
else:
if param.type.double_word:
result.append("\tresult.%s = ((%s)%s + ((%s)%s << 32));" %
(param.name, TYPES[64], words[0] % source_words,
TYPES[64], words[1] % source_words))
else:
for word in words:
result.append("\tresult.%s = %s;" % (param.name, word % source_words))
#
# }
#
result.append("\treturn result;")
result.append("}")
return "\n".join(result) + "\n"
def get_xml_element_contents(element):
"""
Converts the contents of an xml element into a string, with all
child xml nodes unchanged.
"""
return "".join([c.toxml() for c in element.childNodes])
def get_xml_element_content_with_xmlonly(element):
"""
Converts the contents of an xml element into a string, wrapping
all child xml nodes in doxygen @xmlonly/@endxmlonly keywords.
"""
result = []
prev_element = False
for node in element.childNodes:
if node.nodeType == xml.dom.Node.TEXT_NODE:
if prev_element:
# text node following element node
result.append(" @endxmlonly ")
prev_element = False
else:
if not prev_element:
# element node following text node
result.append(" @xmlonly ")
prev_element = True
result.append(node.toxml())
return "".join(result)
def normalise_text(text):
"""
Removes leading and trailing whitespace from each line of text.
Removes leading and trailing blank lines from text.
"""
stripped = text.strip()
stripped_lines = [line.strip() for line in text.split("\n")]
# remove leading and trailing empty lines
stripped_head = list(itertools.dropwhile(lambda s: not s, stripped_lines))
stripped_tail = itertools.dropwhile(lambda s: not s, reversed(stripped_head))
return "\n".join(reversed(list(stripped_tail)))
def parse_xml_file(input_file, valid_types):
"""
Parse an XML file containing method definitions.
"""
# Create a dictionary of type name to type.
type_names = {}
for i in valid_types:
type_names[i.name] = i
# Parse the XML to generate method structures.
methods = []
structs = []
doc = xml.dom.minidom.parse(input_file)
api = Api(doc.getElementsByTagName("api")[0])
for struct in doc.getElementsByTagName("struct"):
_struct_members = []
struct_name = struct.getAttribute("name")
for members in struct.getElementsByTagName("member"):
member_name = members.getAttribute("name")
_struct_members.append(member_name)
structs.append((struct_name, _struct_members))
for interface in doc.getElementsByTagName("interface"):
interface_name = interface.getAttribute("name")
interface_manual_name = interface.getAttribute("manual_name") or interface_name
interface_cap_description = interface.getAttribute("cap_description")
for method in interface.getElementsByTagName("method"):
method_name = method.getAttribute("name")
method_id = method.getAttribute("id")
method_condition = method.getAttribute("condition")
method_manual_name = method.getAttribute("manual_name") or method_name
method_manual_label = method.getAttribute("manual_label")
if not method_manual_label:
# If no manual label is specified, infer one from the interface and method
# names by combining the interface name and method name.
method_manual_label = ("%s_%s" % (interface_manual_name, method_manual_name)) \
.lower() \
.replace(" ", "_") \
.replace("/", "")
# Prefix the label with an api-wide label prefix
method_manual_label = "%s%s" % (api.label_prefix, method_manual_label)
comment_lines = ["@xmlonly <manual name=\"%s\" label=\"%s\"/> @endxmlonly" %
(method_manual_name, method_manual_label)]
method_brief = method.getElementsByTagName("brief")
if method_brief:
method_brief_text = get_xml_element_contents(method_brief[0])
normalised_method_brief_text = normalise_text(method_brief_text)
comment_lines.append("@brief @xmlonly %s @endxmlonly" % normalised_method_brief_text)
method_description = method.getElementsByTagName("description")
if method_description:
method_description_text = get_xml_element_contents(method_description[0])
normalised_method_description_text = normalise_text(method_description_text)
comment_lines.append("\n@xmlonly\n%s\n@endxmlonly\n" % normalised_method_description_text)
#
# Get parameters.
#
# We always have an implicit cap parameter.
#
input_params = [Parameter("_service", type_names[interface_name])]
cap_description = interface_cap_description
cap_param = method.getElementsByTagName("cap_param")
if cap_param:
append_description = cap_param[0].getAttribute("append_description")
if append_description:
cap_description += append_description
comment_lines.append("@param[in] _service %s" % cap_description)
output_params = []
for param in method.getElementsByTagName("param"):
param_name = param.getAttribute("name")
param_type = type_names.get(param.getAttribute("type"))
if not param_type:
raise Exception("Unknown type '%s'." % (param.getAttribute("type")))
param_dir = param.getAttribute("dir")
assert (param_dir == "in") or (param_dir == "out")
if param_dir == "in":
input_params.append(Parameter(param_name, param_type))
else:
output_params.append(Parameter(param_name, param_type))
if param_dir == "in" or param_type.pass_by_reference():
param_description = param.getAttribute("description")
if not param_description:
param_description_element = param.getElementsByTagName("description")
if param_description_element:
param_description_text = get_xml_element_content_with_xmlonly(param_description_element[0])
param_description = normalise_text(param_description_text)
comment_lines.append("@param[%s] %s %s " % (param_dir, param_name, param_description))
method_return_description = method.getElementsByTagName("return")
if method_return_description:
comment_lines.append("@return @xmlonly %s @endxmlonly" % get_xml_element_contents(method_return_description[0]))
else:
# no return documentation given - default to something sane
if is_result_struct_required(output_params):
comment_lines.append("@return @xmlonly @endxmlonly")
else:
comment_lines.append("@return @xmlonly <errorenumdesc/> @endxmlonly")
# split each line on newlines
comment_lines = reduce(operator.add, [l.split("\n") for l in comment_lines], [])
# place the comment text in a c comment
comment = "\n".join(["/**"] + [" * %s" % l for l in comment_lines] + [" */"])
methods.append((interface_name, method_name, method_id, input_params, output_params, method_condition, comment))
return (methods, structs, api)
def generate_stub_file(arch, wordsize, input_files, output_file, use_only_ipc_buffer):
"""
Generate a header file containing system call stubs for seL4.
"""
result = []
# Ensure architecture looks sane.
if arch not in WORD_SIZE_BITS_ARCH.keys():
raise Exception("Invalid architecture.")
data_types = init_data_types(wordsize)
arch_types = init_arch_types(wordsize)
# Parse XML
methods = []
structs = []
for infile in input_files:
method, struct, _ = parse_xml_file(infile, data_types + arch_types[arch])
methods += method
structs += struct
# Print header.
result.append("""
/*
* Automatically generated system call stubs.
*/
#ifndef __LIBSEL4_SEL4_CLIENT_H
#define __LIBSEL4_SEL4_CLIENT_H
""")
# Emit the includes
result.append('\n'.join(['#include <%s>' % include for include in INCLUDES]))
#
# Emit code to ensure that all of our type sizes are consistent with
# the compiler's.
#
result.append("""
/*
* The following code generates a compile-time error if the system call
* stub generator has an incorrect understanding of how large a type is.
*
* If you receive a compile-time error here, you will need to adjust
* the type information in the stub generator.
*/
#define assert_size_correct(type, expected_bytes) \\
typedef unsigned long __type_##type##_size_incorrect[ \\
(sizeof(type) == expected_bytes) ? 1 : -1]
""")
for x in data_types + arch_types[arch]:
result.append("assert_size_correct(%s, %d);" % (x.name, x.native_size_bits / 8))
result.append("")
#
# Generate structures needed to return results back to the user.
#
# We can not use pass-by-reference (except for really large objects), as
# the verification framework does not support them.
#
result.append("/*")
result.append(" * Return types for generated methods.")
result.append(" */")
for (interface_name, method_name, _, _, output_params, _, _) in methods:
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
result.append(results_structure)
#
# Generate the actual stub code.
#
result.append("/*")
result.append(" * Generated stubs.")
result.append(" */")
for (interface_name, method_name, method_id, inputs, outputs, condition, comment) in methods:
if condition != "":
result.append("#if %s" % condition)
result.append(generate_stub(arch, wordsize, interface_name, method_name,
method_id, inputs, outputs, structs, use_only_ipc_buffer, comment))
if condition != "":
result.append("#endif")
# Print footer.
result.append("#endif /* __LIBSEL4_SEL4_CLIENT_H */")
result.append("")
# Write the output
output = open(output_file, "w")
output.write("\n".join(result))
output.close()
def process_args():
usage_str = """
%(prog)s [OPTIONS] [FILES] """
epilog_str = """
"""
parser = ArgumentParser(description='seL4 System Call Stub Generator.',
usage=usage_str,
epilog=epilog_str)
parser.add_argument("-o", "--output", dest="output", default="/dev/stdout",
help="Output file to write stub to. (default: %(default)s).")
parser.add_argument("-b", "--buffer", dest="buffer", action="store_true", default=False,
help="Use IPC buffer exclusively, i.e. do not pass syscall arguments by registers. (default: %(default)s)")
parser.add_argument("-a", "--arch", dest="arch", required=True, choices=WORD_SIZE_BITS_ARCH,
help="Architecture to generate stubs for.")
wsizegroup = parser.add_mutually_exclusive_group()
wsizegroup.add_argument("-w", "--word-size", dest="wsize",
help="Word size(in bits), for the platform.")
wsizegroup.add_argument("-c", "--cfile", dest="cfile",
help="Config file for Kbuild, used to get Word size.")
parser.add_argument("files", metavar="FILES", nargs="+",
help="Input XML files.")
return parser
def main():
parser = process_args()
args = parser.parse_args()
if not (args.wsize or args.cfile):
parser.error("Require either -w/--word-size or -c/--cfile argument.")
sys.exit(2)
# Get word size
wordsize = -1
if args.cfile:
try:
with open(args.cfile) as conffile:
for line in conffile:
if line.startswith('CONFIG_WORD_SIZE'):
wordsize = int(line.split('=')[1].strip())
except IndexError:
print("Invalid word size in configuration file.")
sys.exit(2)
else:
wordsize = int(args.wsize)
if wordsize is -1:
print("Invalid word size.")
sys.exit(2)
# Generate the stubs.
generate_stub_file(args.arch, wordsize, args.files, args.output, args.buffer)
if __name__ == "__main__":
sys.exit(main())
|
from selenium import webdriver
import os
import time
from bs4 import BeautifulSoup
import lxml
import json
import re
# open up the browser and navigate to the hompage of careerbuilder.com
career_builder_base_url = 'http://www.careerbuilder.com/?cbRecursionCnt=1'
phantomjs_path = '../phantomjs-2.0.0-windows/bin/phantomjs.exe'
# browser = webdriver.Firefox()
# browser = webdriver.Chrome('../chromedriver_win32/chromedriver.exe')
browser = webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=os.path.devnull)
browser.implicitly_wait(10)
browser.get(career_builder_base_url)
# locate the input fields and search buttons
keywords_input = browser.find_element_by_id('search-key')
location_input = browser.find_element_by_id('search-loc')
search_button = browser.find_element_by_id('search-button')
keywords = 'java'
location = '22304'
# start the search
keywords_input.send_keys(keywords)
location_input.send_keys(location)
search_button.click()
print 'search button clicked'
# since we do not know which element loads the last
# we explicitly wait 5 seconds after the search button was hit
# click the appropriate 'sort by' button
sort_by_job_title_button = browser.find_element_by_id('SortBox1_JobTitleSort')
sort_by_location_button = browser.find_element_by_id('SortBox1_cbhlDist')
sort_by_relevance_button = browser.find_element_by_id('SortBox1_cbhlKey')
sort_by_date_button = browser.find_element_by_id('SortBox1_cbhlDate')
sort_by_date_button.click()
# print 'sort button clicked'
# we should have now reached the first page in the results
# we should now grab the html in all result pages
# first we take the page source and append it to the htmls list
# then try to find the next button (anchor tag actually)
# if next button found, we click on it and repeat the process above
# if next button not found, we have reached the last page of possible results
# we therefore break out of the while loop
htmls = []
while True:
for i in range (0,100):
browser.execute_script("window.scrollBy(0,100);")
time.sleep(0.03)
# # time.sleep(3)
# # give the page 3 seconds to fully load
htmls.append(browser.page_source)
try:
print 'trying to click a next button'
next_button = browser.find_element_by_class_name('JL_MXDLPagination2_next')
next_button.click()
print 'next button clicked'
except:
print 'failed to click next button'
break
# we are now done with the selenium and the webdrivers
# do not forget to close the webdriver
browser.close()
jobs=[]
for html in htmls:
print 'new html source parsed'
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', id = 'NJL_ND')
# recursive = False because there are nested tables
# if recursive = True, it will drill down to more than one level
# which is not what we want
rows = table.find_all('table')
jobs_on_page = []
for row in rows:
print 'new job added'
job_on_page={}
try:
title = row.find('h2', itemprop='title').text.strip()
except:
title = 'failed'
job_on_page['title'] = title
try:
summary = row.find('span',id=re.compile('NJL_ND__ctl[0-9]+_[a-z]+Teaser')).text.strip()
except:
summary = 'failed'
job_on_page['summary'] = summary
try:
if row.find(id=re.compile('NJL_ND__ctl[0-9]+_[a-z]+Company')):
employer = row.find(id=re.compile('NJL_ND__ctl[0-9]+_[a-z]+Company')).text.strip()
else:
employer = None
except:
employer = 'failed'
job_on_page['employer'] = employer
try:
location = row.find(id=re.compile('NJL_ND__ctl[0-9]+_[a-z]+Location')).text.strip()
except:
location = 'failed'
job_on_page['location'] = location
try:
date = row.find(id=re.compile('NJL_ND__ctl[0-9]+_[a-z]+Posted')).text.strip()
except:
date = 'failed'
job_on_page['date'] = date
jobs_on_page.append(job_on_page)
jobs += jobs_on_page
with open('careerbuilder.JSON', 'w') as outfile:
json.dump(jobs, outfile, indent=4)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class MonitorManagementClientConfiguration(Configuration):
"""Configuration for MonitorManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(MonitorManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-monitor/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
"""
Managers of ``blog`` application.
"""
from django.db import models
class CategoryOnlineManager(models.Manager):
"""
Manager that manages online ``Category`` objects.
"""
def get_queryset(self):
from blog.models import Entry
entry_status = Entry.STATUS_ONLINE
return super(CategoryOnlineManager, self).get_queryset().filter(
entry__status=entry_status).distinct()
class EntryOnlineManager(models.Manager):
"""
Manager that manages online ``Entry`` objects.
"""
def get_queryset(self):
return super(EntryOnlineManager, self).get_queryset().filter(
status=self.model.STATUS_ONLINE)
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListCustomerOnDemandResourcesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'QueryCustomerOnDemandResourcesReq'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""ListCustomerOnDemandResourcesRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this ListCustomerOnDemandResourcesRequest.
语言。中文:zh_CN英文:en_US缺省为zh_CN。
:return: The x_language of this ListCustomerOnDemandResourcesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListCustomerOnDemandResourcesRequest.
语言。中文:zh_CN英文:en_US缺省为zh_CN。
:param x_language: The x_language of this ListCustomerOnDemandResourcesRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this ListCustomerOnDemandResourcesRequest.
:return: The body of this ListCustomerOnDemandResourcesRequest.
:rtype: QueryCustomerOnDemandResourcesReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListCustomerOnDemandResourcesRequest.
:param body: The body of this ListCustomerOnDemandResourcesRequest.
:type: QueryCustomerOnDemandResourcesReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListCustomerOnDemandResourcesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from jel.common_config import CACHE_ROOT
from allennlp.modules.token_embedders.embedding import _read_embeddings_from_text_file
def bert_emb_returner():
return BasicTextFieldEmbedder(
{'tokens': PretrainedTransformerEmbedder(model_name='cl-tohoku/bert-base-japanese')})
def chive_emb_returner(vocab: Vocabulary) -> BasicTextFieldEmbedder:
# embed_matrix = _read_embeddings_from_text_file(
# file_uri="./resources/chive-1.1-mc30.txt",
# embedding_dim=300,
# vocab=vocab
# )
token_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
embedding_dim=300,
pretrained_file=str(CACHE_ROOT)+"/resources/chive-1.1-mc30.txt",
vocab=vocab)
return BasicTextFieldEmbedder({'tokens': token_embedding})
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.shortcuts import render, redirect
urlpatterns=[
#This is the home page url pattern
path('',views.index, name='index'),
path('explore',views.explore,name ='explore'),
path('notification',views.notification,name ='notification'),
path('profile',views.profile,name ='profile'),
path('login',views.login,name ='login'),
path('logout',views.index,{'next_page': 'accounts:login'}, name='logout'),
path('upload',views.upload,name ='upload'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from qf_lib.data_providers.bloomberg import BloombergDataProvider
from qf_lib_tests.manual_tests import futures_strategy, spx_with_stop_loss
from qf_lib_tests.manual_tests import simple_ma_strategy
from qf_lib_tests.unit_tests.config.test_settings import get_test_settings
class TestStrategies(unittest.TestCase):
def setUp(self) -> None:
settings = get_test_settings()
self.data_provider = BloombergDataProvider(settings)
self.data_provider.connect()
if not self.data_provider.connected:
raise self.skipTest("No Bloomberg connection")
def test_futures_strategy(self):
expected_value = 10317477.750000006
expected_data_checksum = "88eb90dc28c7375204507ed2a112543955757f04"
actual_end_value, actual_data_checksum = futures_strategy.run_strategy(self.data_provider)
self.assertAlmostEqual(expected_value, actual_end_value, places=2)
self.assertEqual(expected_data_checksum, actual_data_checksum)
def test_simple_ma_strategy(self):
expected_value = 898294.64
expected_data_checksum = "0a29492c3a276286375f08c51a9733a37e60f81e"
actual_end_value, actual_data_checksum = simple_ma_strategy.run_strategy(self.data_provider)
self.assertAlmostEqual(expected_value, actual_end_value, places=2)
self.assertEqual(expected_data_checksum, actual_data_checksum)
def test_spx_with_stop_loss(self):
expected_value = 1137843
expected_data_checksum = "76bb3cb970068333d0d46b017871c7063dcd6fe2"
actual_end_value, actual_data_checksum = spx_with_stop_loss.run_strategy(self.data_provider)
self.assertAlmostEqual(expected_value, actual_end_value, delta=10)
self.assertEqual(expected_data_checksum, actual_data_checksum)
|
"""Provides 'odometry', which loads and parses odometry benchmark data."""
import datetime as dt
import glob
import os
from collections import namedtuple
import numpy as np
import pykitti.utils as utils
__author__ = "Lee Clement"
__email__ = "lee.clement@robotics.utias.utoronto.ca"
class odometry:
"""Load and parse odometry benchmark data into a usable format."""
def __init__(self, base_path, sequence, **kwargs):
"""Set the path."""
self.sequence = sequence
self.sequence_path = os.path.join(base_path, 'sequences', sequence)
self.pose_path = os.path.join(base_path, 'poses')
self.frames = kwargs.get('frames', None)
# Default image file extension is 'png'
self.imtype = kwargs.get('imtype', 'png')
# Find all the data files
self._get_file_lists()
# Pre-load data that isn't returned as a generator
self._load_calib()
self._load_timestamps()
self._load_poses()
def __len__(self):
"""Return the number of frames loaded."""
return len(self.timestamps)
@property
def cam0(self):
"""Generator to read image files for cam0 (monochrome left)."""
return utils.yield_images(self.cam0_files, mode='L')
def get_cam0(self, idx):
"""Read image file for cam0 (monochrome left) at the specified index."""
return utils.load_image(self.cam0_files[idx], mode='L')
@property
def cam1(self):
"""Generator to read image files for cam1 (monochrome right)."""
return utils.yield_images(self.cam1_files, mode='L')
def get_cam1(self, idx):
"""Read image file for cam1 (monochrome right) at the specified index."""
return utils.load_image(self.cam1_files[idx], mode='L')
@property
def cam2(self):
"""Generator to read image files for cam2 (RGB left)."""
return utils.yield_images(self.cam2_files, mode='RGB')
def get_cam2(self, idx):
"""Read image file for cam2 (RGB left) at the specified index."""
return utils.load_image(self.cam2_files[idx], mode='RGB')
@property
def cam3(self):
"""Generator to read image files for cam0 (RGB right)."""
return utils.yield_images(self.cam3_files, mode='RGB')
def get_cam3(self, idx):
"""Read image file for cam3 (RGB right) at the specified index."""
return utils.load_image(self.cam3_files[idx], mode='RGB')
@property
def gray(self):
"""Generator to read monochrome stereo pairs from file.
"""
return zip(self.cam0, self.cam1)
def get_gray(self, idx):
"""Read monochrome stereo pair at the specified index."""
return (self.get_cam0(idx), self.get_cam1(idx))
@property
def rgb(self):
"""Generator to read RGB stereo pairs from file.
"""
return zip(self.cam2, self.cam3)
def get_rgb(self, idx):
"""Read RGB stereo pair at the specified index."""
return (self.get_cam2(idx), self.get_cam3(idx))
@property
def velo(self):
"""Generator to read velodyne [x,y,z,reflectance] scan data from binary files."""
# Return a generator yielding Velodyne scans.
# Each scan is a Nx4 array of [x,y,z,reflectance]
return utils.yield_velo_scans(self.velo_files)
def get_velo(self, idx):
"""Read velodyne [x,y,z,reflectance] scan at the specified index."""
return utils.load_velo_scan(self.velo_files[idx])
def _get_file_lists(self):
"""Find and list data files for each sensor."""
self.cam0_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_0',
'*.{}'.format(self.imtype))))
self.cam1_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_1',
'*.{}'.format(self.imtype))))
self.cam2_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_2',
'*.{}'.format(self.imtype))))
self.cam3_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_3',
'*.{}'.format(self.imtype))))
self.velo_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'velodyne',
'*.bin')))
# Subselect the chosen range of frames, if any
if self.frames is not None:
self.cam0_files = utils.subselect_files(
self.cam0_files, self.frames)
self.cam1_files = utils.subselect_files(
self.cam1_files, self.frames)
self.cam2_files = utils.subselect_files(
self.cam2_files, self.frames)
self.cam3_files = utils.subselect_files(
self.cam3_files, self.frames)
self.velo_files = utils.subselect_files(
self.velo_files, self.frames)
def _load_calib(self):
"""Load and compute intrinsic and extrinsic calibration parameters."""
# We'll build the calibration parameters as a dictionary, then
# convert it to a namedtuple to prevent it from being modified later
data = {}
# Load the calibration file
calib_filepath = os.path.join(self.sequence_path, 'calib.txt')
filedata = utils.read_calib_file(calib_filepath)
# Create 3x4 projection matrices
P_rect_00 = np.reshape(filedata['P0'], (3, 4))
P_rect_10 = np.reshape(filedata['P1'], (3, 4))
P_rect_20 = np.reshape(filedata['P2'], (3, 4))
P_rect_30 = np.reshape(filedata['P3'], (3, 4))
data['P_rect_00'] = P_rect_00
data['P_rect_10'] = P_rect_10
data['P_rect_20'] = P_rect_20
data['P_rect_30'] = P_rect_30
# Compute the rectified extrinsics from cam0 to camN
T1 = np.eye(4)
T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]
T2 = np.eye(4)
T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]
T3 = np.eye(4)
T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]
# Compute the velodyne to rectified camera coordinate transforms
data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))
data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])
data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])
data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])
data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])
# Compute the camera intrinsics
data['K_cam0'] = P_rect_00[0:3, 0:3]
data['K_cam1'] = P_rect_10[0:3, 0:3]
data['K_cam2'] = P_rect_20[0:3, 0:3]
data['K_cam3'] = P_rect_30[0:3, 0:3]
# Compute the stereo baselines in meters by projecting the origin of
# each camera frame into the velodyne frame and computing the distances
# between them
p_cam = np.array([0, 0, 0, 1])
p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)
p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)
p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)
p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)
data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline
data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline
self.calib = namedtuple('CalibData', data.keys())(*data.values())
def _load_timestamps(self):
"""Load timestamps from file."""
timestamp_file = os.path.join(self.sequence_path, 'times.txt')
# Read and parse the timestamps
self.timestamps = []
with open(timestamp_file, 'r') as f:
for line in f.readlines():
t = dt.timedelta(seconds=float(line))
self.timestamps.append(t)
# Subselect the chosen range of frames, if any
if self.frames is not None:
self.timestamps = [self.timestamps[i] for i in self.frames]
def _load_poses(self):
"""Load ground truth poses (T_w_cam0) from file."""
pose_file = os.path.join(self.pose_path, self.sequence + '.txt')
# Read and parse the poses
poses = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print('Ground truth poses are not avaialble for sequence ' +
self.sequence + '.')
self.poses = poses
|
### generate plots of luciferase data:
### Import dependencies
import matplotlib
matplotlib.use('Agg') ### set backend
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 # this keeps most text as actual text in PDFs, not outlines
plt.tight_layout()
import sys
import math
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from pylab import *
import argparse
### import libsettings file and add to global namespace
parser= argparse.ArgumentParser()
parser.add_argument('--rootDir', help= 'the root directory containing data and scripts')
parser.add_argument('--threadNumb', help= 'number of threads')
args = parser.parse_args()
sys.path.append("%s/figures" % args.rootDir)
sys.path.append("%s/figures/figscripts" % args.rootDir)
rootDir = args.rootDir
threadNumb = str(args.threadNumb)
#### functions:
def input_luc_data():
df = pd.read_csv("%s/Data/Luc/Figure1_LucData.csv" % rootDir)
exclusion_list = ['Untrans', 'H2O', 'Tris']
df = df.loc[~df['Treatment'].isin(exclusion_list)]
df['Ratio'] = df['Nluc']/df['Fluc']
for i in df.index:
df.loc[i,'Test'] = str(df.loc[i,'Treatment']) + "_" + str(df.loc[i,'Conc'])
df.reset_index(drop=True, inplace=True)
return df
def calc_luc_ratios(df):
### for data collected in triplicate:
index_all = range(len(df))
# repN = 6
tr_index = [0, 3, 9, 15, 21, 27, 33, 39, 45, 51, 57, 63, 69, 75, 81]
# tr_index = index_all[::repN]
treatments = []
means = []
for i in tr_index:
tr = str(df.loc[i,'Treatment'])
conc = str(df.loc[i,'Conc'])
trOut = tr+"_"+conc
treatments.append(trOut)
print treatments
fM = []
fSD = []
fSEM = []
nM = []
nSD = []
nSEM = []
rM =[]
rSD =[]
rSEM =[]
for i in tr_index:
if i == 0:
repN = 3
else:
repN = 6
fluc_vals = df.loc[i:i+repN-1,'Fluc']
nluc_vals = df.loc[i:i+repN-1,'Nluc']
ratio_vals = df.loc[i:i+repN-1, 'Ratio']
### Fluc
fluc_mean = np.mean(fluc_vals)
fluc_sd = np.std(fluc_vals, ddof=1)
fluc_sem = stats.sem(fluc_vals)
fM.append(fluc_mean)
fSD.append(fluc_sd)
fSEM.append(fluc_sem)
### Nluc
nluc_mean = np.mean(nluc_vals)
nluc_sd = np.std(nluc_vals, ddof=1)
nluc_sem = stats.sem(nluc_vals)
nM.append(nluc_mean)
nSD.append(nluc_sd)
nSEM.append(nluc_sem)
### ratio
ratio_mean = np.mean(ratio_vals)
ratio_sd = np.std(ratio_vals, ddof=1)
ratio_sem = stats.sem(ratio_vals)
rM.append(ratio_mean)
rSD.append(ratio_sd)
rSEM.append(ratio_sem)
new_ind = range(len(df)/repN)
df_dict = {'Test':treatments,
'fluc_mean':fM,
'fluc_sd':fSD,
'fluc_sem':fSEM,
'nluc_mean':nM,
'nluc_sd':nSD,
'nluc_sem':nSEM,
'ratio_mean':rM,
'ratio_sd':rSD,
'ratio_sem':rSEM
}
dfout = pd.DataFrame.from_dict(df_dict)
fluc_ctrl_mean = dfout.loc[0, 'fluc_mean']
nluc_ctrl_mean = dfout.loc[0, 'nluc_mean']
ctrl_mean = dfout.loc[0,'ratio_mean']
# ctrl_sd = dfout.loc[1,'ratio_sd']
# ctrl_sem = dfout.loc[1,'ratio_sem']
dfout['rM_FC'] = dfout['ratio_mean']/ctrl_mean
dfout['rSD_FC'] = dfout['ratio_sd']/ctrl_mean
dfout['rSEM_FC'] = dfout['ratio_sem']/ctrl_mean
return dfout, fluc_ctrl_mean, ctrl_mean
def plot_1B(df, dfout, fluc_ctrl_mean):
### plot Fluc
fig, ax = plt.subplots(figsize=(6,2.5))
sns.swarmplot(data=df, x='Test', y='Fluc', size=3)
sns.boxplot(data=dfout, x='Test', y='fluc_mean', showbox=False , width = 0.5,
showcaps=False, color = "black")
plt.errorbar(dfout['Test'], dfout['fluc_mean'], yerr=dfout['fluc_sd'], capsize = 3, ls='none', color = "black")
ax=plt.gca()
for item in ax.get_xticklabels():
item.set_rotation(90)
axhline(y=fluc_ctrl_mean/2, linestyle="--", color="#999999")
ax.set_ylim(0, 1000000)
plt.savefig("%s/figures/Fig1B.pdf" % rootDir, format="pdf", bbox_inches = "tight")
def plot_1C(df, dfout):
### plot Nluc
# sns.catplot(x='Test', y='Nluc', data=df, kind='swarm', height = 6, aspect=12/6)
fig, ax = plt.subplots(figsize=(6,2.5))
sns.swarmplot(data=df, x='Test', y='Nluc', size=3)
sns.boxplot(data=dfout, x='Test', y='nluc_mean', showbox=False , width = 0.5,
showcaps=False, color = "black")
plt.errorbar(dfout['Test'], dfout['nluc_mean'], yerr=dfout['nluc_sd'], capsize = 3, ls='none', color = "black")
ax=plt.gca()
for item in ax.get_xticklabels():
item.set_rotation(90)
ax.set_ylim(0, 1000000)
plt.savefig("%s/figures/Fig1C.pdf" % rootDir, format="pdf", bbox_inches = "tight")
def plot_1D_low(df, dfout, ctrl_mean):
dfp1 = df.copy()
dfp2 = dfout.copy()
dfp1['RatioScaled'] = dfp1['Ratio']/ctrl_mean
fig, ax = plt.subplots(figsize=(6,1.25))
sns.swarmplot(data=dfp1, x='Test', y='RatioScaled', size=3)
sns.boxplot(data=dfp2, x='Test', y='rM_FC', showbox=False , width = 0.5,
showcaps=False, color = "black")
plt.errorbar(dfp2['Test'], dfout['rM_FC'], yerr=dfout['rSD_FC'], capsize = 3, ls='none', color = "black")
ax=plt.gca()
for item in ax.get_xticklabels():
item.set_rotation(90)
ax.set_ylim(0, 40)
ax.set_yticks([0, 10, 20, 30, 40])
plt.savefig("%s/figures/Fig1D_lower.pdf" % rootDir, format="pdf", bbox_inches = "tight")
def plot_1D_high(df, dfout, ctrl_mean):
### plot Ratio
dfp1 = df.copy()
dfp2 = dfout.copy()
dfp1['RatioScaled'] = dfp1['Ratio']/ctrl_mean
fig, ax = plt.subplots(figsize=(6,1.25))
sns.swarmplot(data=dfp1, x='Test', y='RatioScaled', size=3)
sns.boxplot(data=dfp2, x='Test', y='rM_FC', showbox=False , width = 0.5,
showcaps=False, color = "black")
plt.errorbar(dfp2['Test'], dfout['rM_FC'], yerr=dfout['rSD_FC'], capsize = 3, ls='none', color = "black")
ax=plt.gca()
for item in ax.get_xticklabels():
item.set_rotation(90)
ax.set_ylim(50, 1000)
ax.set_yticks([50, 250, 500, 750, 1000])
plt.savefig("%s/figures/Fig1D_high.pdf" % rootDir, format="pdf", bbox_inches = "tight")
def main():
df = input_luc_data()
dfout, fluc_ctrl_mean, ctrl_mean = calc_luc_ratios(df)
plot_1B(df, dfout, fluc_ctrl_mean)
plot_1C(df, dfout)
plot_1D_low(df, dfout, ctrl_mean)
plot_1D_high(df, dfout, ctrl_mean)
if __name__ == '__main__':
main()
|
#
# Base class for thermal effects
#
import pybamm
class BaseThermal(pybamm.BaseSubModel):
"""Base class for thermal effects
Parameters
----------
param : parameter class
The parameters to use for this submodel
**Extends:** :class:`pybamm.BaseSubModel`
"""
def __init__(self, param):
super().__init__(param)
def _get_standard_fundamental_variables(self, T, T_cn, T_cp):
param = self.param
T_n, T_s, T_p = T.orphans
# Compute the X-average over the current collectors by default.
# Note: the method 'self._x_average' is overwritten by models which do
# not include current collector effects, so that the average is just taken
# over the negative electrode, separator and positive electrode.
T_x_av = self._x_average(T, T_cn, T_cp)
T_vol_av = self._yz_average(T_x_av)
q = self._flux_law(T)
variables = {
"Negative current collector temperature": T_cn,
"Negative current collector temperature [K]": param.Delta_T * T_cn,
"X-averaged negative electrode temperature": pybamm.x_average(T_n),
"X-averaged negative electrode temperature [K]": param.Delta_T
* pybamm.x_average(T_n)
+ param.T_ref,
"Negative electrode temperature": T_n,
"Negative electrode temperature [K]": param.Delta_T * T_n + param.T_ref,
"X-averaged separator temperature": pybamm.x_average(T_s),
"X-averaged separator temperature [K]": param.Delta_T
* pybamm.x_average(T_s)
+ param.T_ref,
"Separator temperature": T_s,
"Separator temperature [K]": param.Delta_T * T_s + param.T_ref,
"X-averaged positive electrode temperature": pybamm.x_average(T_p),
"X-averaged positive electrode temperature [K]": param.Delta_T
* pybamm.x_average(T_p)
+ param.T_ref,
"Positive electrode temperature": T_p,
"Positive electrode temperature [K]": param.Delta_T * T_p + param.T_ref,
"Positive current collector temperature": T_cp,
"Positive current collector temperature [K]": param.Delta_T * T_cp,
"Cell temperature": T,
"Cell temperature [K]": param.Delta_T * T + param.T_ref,
"X-averaged cell temperature": T_x_av,
"X-averaged cell temperature [K]": param.Delta_T * T_x_av + param.T_ref,
"Volume-averaged cell temperature": T_vol_av,
"Volume-averaged cell temperature [K]": param.Delta_T * T_vol_av
+ param.T_ref,
"Heat flux": q,
"Heat flux [W.m-2]": q,
}
return variables
def _get_standard_coupled_variables(self, variables):
param = self.param
T = variables["Cell temperature"]
T_n, _, T_p = T.orphans
j_n = variables["Negative electrode interfacial current density"]
j_p = variables["Positive electrode interfacial current density"]
eta_r_n = variables["Negative electrode reaction overpotential"]
eta_r_p = variables["Positive electrode reaction overpotential"]
dUdT_n = variables["Negative electrode entropic change"]
dUdT_p = variables["Positive electrode entropic change"]
i_e = variables["Electrolyte current density"]
phi_e = variables["Electrolyte potential"]
i_s_n = variables["Negative electrode current density"]
i_s_p = variables["Positive electrode current density"]
phi_s_n = variables["Negative electrode potential"]
phi_s_p = variables["Positive electrode potential"]
# Ohmic heating in solid
Q_ohm_s_cn, Q_ohm_s_cp = self._current_collector_heating(variables)
Q_ohm_s_n = -pybamm.inner(i_s_n, pybamm.grad(phi_s_n))
Q_ohm_s_s = pybamm.FullBroadcast(0, ["separator"], "current collector")
Q_ohm_s_p = -pybamm.inner(i_s_p, pybamm.grad(phi_s_p))
Q_ohm_s = pybamm.Concatenation(Q_ohm_s_n, Q_ohm_s_s, Q_ohm_s_p)
# Ohmic heating in electrolyte
# TODO: change full stefan-maxwell conductivity so that i_e is always
# a Concatenation
if isinstance(i_e, pybamm.Concatenation):
# compute by domain if possible
i_e_n, i_e_s, i_e_p = i_e.orphans
phi_e_n, phi_e_s, phi_e_p = phi_e.orphans
Q_ohm_e_n = -pybamm.inner(i_e_n, pybamm.grad(phi_e_n))
Q_ohm_e_s = -pybamm.inner(i_e_s, pybamm.grad(phi_e_s))
Q_ohm_e_p = -pybamm.inner(i_e_p, pybamm.grad(phi_e_p))
Q_ohm_e = pybamm.Concatenation(Q_ohm_e_n, Q_ohm_e_s, Q_ohm_e_p)
else:
Q_ohm_e = -pybamm.inner(i_e, pybamm.grad(phi_e))
# Total Ohmic heating
Q_ohm = Q_ohm_s + Q_ohm_e
# Irreversible electrochemical heating
Q_rxn_n = j_n * eta_r_n
Q_rxn_p = j_p * eta_r_p
Q_rxn = pybamm.Concatenation(
*[
Q_rxn_n,
pybamm.FullBroadcast(0, ["separator"], "current collector"),
Q_rxn_p,
]
)
# Reversible electrochemical heating
Q_rev_n = j_n * (param.Theta ** (-1) + T_n) * dUdT_n
Q_rev_p = j_p * (param.Theta ** (-1) + T_p) * dUdT_p
Q_rev = pybamm.Concatenation(
*[
Q_rev_n,
pybamm.FullBroadcast(0, ["separator"], "current collector"),
Q_rev_p,
]
)
# Total heating
Q = Q_ohm + Q_rxn + Q_rev
# Compute the X-average over the current collectors by default.
# Note: the method 'self._x_average' is overwritten by models which do
# not include current collector effects, so that the average is just taken
# over the negative electrode, separator and positive electrode.
Q_ohm_av = self._x_average(Q_ohm, Q_ohm_s_cn, Q_ohm_s_cp)
Q_rxn_av = self._x_average(Q_rxn, 0, 0)
Q_rev_av = self._x_average(Q_rev, 0, 0)
Q_av = self._x_average(Q, Q_ohm_s_cn, Q_ohm_s_cp)
# Compute volume-averaged heat source terms
Q_ohm_vol_av = self._yz_average(Q_ohm_av)
Q_rxn_vol_av = self._yz_average(Q_rxn_av)
Q_rev_vol_av = self._yz_average(Q_rev_av)
Q_vol_av = self._yz_average(Q_av)
# Dimensional scaling for heat source terms
Q_scale = param.i_typ * param.potential_scale / param.L_x
variables.update(
{
"Ohmic heating": Q_ohm,
"Ohmic heating [W.m-3]": Q_ohm * Q_scale,
"X-averaged Ohmic heating": Q_ohm_av,
"X-averaged Ohmic heating [W.m-3]": Q_ohm_av * Q_scale,
"Volume-averaged Ohmic heating": Q_ohm_vol_av,
"Volume-averaged Ohmic heating [W.m-3]": Q_ohm_vol_av * Q_scale,
"Irreversible electrochemical heating": Q_rxn,
"Irreversible electrochemical heating [W.m-3]": Q_rxn * Q_scale,
"X-averaged irreversible electrochemical heating": Q_rxn_av,
"X-averaged irreversible electrochemical heating [W.m-3]": Q_rxn_av
* Q_scale,
"Volume-averaged irreversible electrochemical heating": Q_rxn_vol_av,
"Volume-averaged irreversible electrochemical heating "
+ "[W.m-3]": Q_rxn_vol_av * Q_scale,
"Reversible heating": Q_rev,
"Reversible heating [W.m-3]": Q_rev * Q_scale,
"X-averaged reversible heating": Q_rev_av,
"X-averaged reversible heating [W.m-3]": Q_rev_av * Q_scale,
"Volume-averaged reversible heating": Q_rev_vol_av,
"Volume-averaged reversible heating [W.m-3]": Q_rev_vol_av * Q_scale,
"Total heating": Q,
"Total heating [W.m-3]": Q * Q_scale,
"X-averaged total heating": Q_av,
"X-averaged total heating [W.m-3]": Q_av * Q_scale,
"Volume-averaged total heating": Q_vol_av,
"Volume-averaged total heating [W.m-3]": Q_vol_av * Q_scale,
}
)
return variables
def _flux_law(self, T):
raise NotImplementedError
def _unpack(self, variables):
raise NotImplementedError
def _current_collector_heating(self, variables):
raise NotImplementedError
def _yz_average(self, var):
raise NotImplementedError
def _x_average(self, var, var_cn, var_cp):
"""
Computes the X-average over the whole cell (including current collectors)
from the variable in the cell (negative electrode, separator,
positive electrode), negative current collector, and positive current
collector. This method is overwritten by models which do not include
current collector effects, so that the average is just taken over the
negative electrode, separator and positive electrode.
Note: we do this as we cannot create a single variable which is
the concatenation [var_cn, var, var_cp] since var_cn and var_cp share the
same domian. (In the N+1D formulation the current collector variables are
assumed independent of x, so we do not make the distinction between negative
and positive current collectors in the geometry).
"""
# When averging the temperature for x-lumped or xyz-lumped models, var
# is a concatenation of broadcasts of the X- or Volume- averaged temperature.
# In this instance we return the (unmodified) variable corresponding to
# the correct average to avoid a ModelError (the unmodified variables must
# be the key in model.rhs)
if isinstance(var, pybamm.Concatenation) and all(
isinstance(child, pybamm.Broadcast) for child in var.children
):
# Create list of var.ids
var_ids = [child.children[0].id for child in var.children]
var_ids.extend([var_cn.id, var_cp.id])
# If all var.ids the same, then the variable is uniform in x so can
# just return one the values (arbitrarily var_cn here)
if len(set(var_ids)) == 1:
out = var_cn
else:
out = (
self.param.l_cn * var_cn
+ pybamm.x_average(var)
+ self.param.l_cp * var_cp
) / self.param.l
return out
def _effective_properties(self):
"""
Computes the effective effective product of density and specific heat, and
effective thermal conductivity, respectively. These are computed differently
depending upon whether current collectors are included or not. Defualt
behaviour is to assume the presence of current collectors. Due to the choice
of non-dimensionalisation, the dimensionless effective properties are equal
to 1 in the case where current collectors are accounted for.
"""
rho_eff = pybamm.Scalar(1)
lambda_eff = pybamm.Scalar(1)
return rho_eff, lambda_eff
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='xavier', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='xavier', gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type)
return net
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='xavier', gpu_ids=[], use_tanh=True, classification=True):
netG = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif which_model_netG == 'siggraph':
netG = SIGGRAPHGenerator(input_nc, output_nc, norm_layer=norm_layer, use_tanh=use_tanh, classification=classification)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
return init_net(netG, init_type, gpu_ids)
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='xavier', gpu_ids=[]):
netD = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif which_model_netD == 'pixel':
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
return init_net(netD, init_type, gpu_ids)
##############################################################################
# Classes
##############################################################################
class HuberLoss(nn.Module):
def __init__(self, delta=.01):
super(HuberLoss, self).__init__()
self.delta = delta
def __call__(self, in0, in1):
mask = torch.zeros_like(in0)
mann = torch.abs(in0 - in1)
eucl = .5 * (mann**2)
mask[...] = mann < self.delta
# loss = eucl*mask + self.delta*(mann-.5*self.delta)*(1-mask)
loss = eucl * mask / self.delta + (mann - .5 * self.delta) * (1 - mask)
return torch.sum(loss, dim=1, keepdim=True)
class L1Loss(nn.Module):
def __init__(self):
super(L1Loss, self).__init__()
def __call__(self, in0, in1):
return torch.sum(torch.abs(in0 - in1), dim=1, keepdim=True)
class L2Loss(nn.Module):
def __init__(self):
super(L2Loss, self).__init__()
def __call__(self, in0, in1):
return torch.sum((in0 - in1)**2, dim=1, keepdim=True)
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
class SIGGRAPHGenerator(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, use_tanh=True, classification=True):
super(SIGGRAPHGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.classification = classification
use_bias = True
# Conv1
# model1=[nn.ReflectionPad2d(1),]
model1 = [nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model1+=[norm_layer(64),]
model1 += [nn.ReLU(True), ]
# model1+=[nn.ReflectionPad2d(1),]
model1 += [nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model1 += [nn.ReLU(True), ]
model1 += [norm_layer(64), ]
# add a subsampling operation
# Conv2
# model2=[nn.ReflectionPad2d(1),]
model2 = [nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model2+=[norm_layer(128),]
model2 += [nn.ReLU(True), ]
# model2+=[nn.ReflectionPad2d(1),]
model2 += [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model2 += [nn.ReLU(True), ]
model2 += [norm_layer(128), ]
# add a subsampling layer operation
# Conv3
# model3=[nn.ReflectionPad2d(1),]
model3 = [nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model3+=[norm_layer(256),]
model3 += [nn.ReLU(True), ]
# model3+=[nn.ReflectionPad2d(1),]
model3 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model3+=[norm_layer(256),]
model3 += [nn.ReLU(True), ]
# model3+=[nn.ReflectionPad2d(1),]
model3 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model3 += [nn.ReLU(True), ]
model3 += [norm_layer(256), ]
# add a subsampling layer operation
# Conv4
# model47=[nn.ReflectionPad2d(1),]
model4 = [nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model4+=[norm_layer(512),]
model4 += [nn.ReLU(True), ]
# model4+=[nn.ReflectionPad2d(1),]
model4 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model4+=[norm_layer(512),]
model4 += [nn.ReLU(True), ]
# model4+=[nn.ReflectionPad2d(1),]
model4 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model4 += [nn.ReLU(True), ]
model4 += [norm_layer(512), ]
# Conv5
# model47+=[nn.ReflectionPad2d(2),]
model5 = [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model5+=[norm_layer(512),]
model5 += [nn.ReLU(True), ]
# model5+=[nn.ReflectionPad2d(2),]
model5 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model5+=[norm_layer(512),]
model5 += [nn.ReLU(True), ]
# model5+=[nn.ReflectionPad2d(2),]
model5 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
model5 += [nn.ReLU(True), ]
model5 += [norm_layer(512), ]
# Conv6
# model6+=[nn.ReflectionPad2d(2),]
model6 = [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model6+=[norm_layer(512),]
model6 += [nn.ReLU(True), ]
# model6+=[nn.ReflectionPad2d(2),]
model6 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model6+=[norm_layer(512),]
model6 += [nn.ReLU(True), ]
# model6+=[nn.ReflectionPad2d(2),]
model6 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
model6 += [nn.ReLU(True), ]
model6 += [norm_layer(512), ]
# Conv7
# model47+=[nn.ReflectionPad2d(1),]
model7 = [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model7+=[norm_layer(512),]
model7 += [nn.ReLU(True), ]
# model7+=[nn.ReflectionPad2d(1),]
model7 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model7+=[norm_layer(512),]
model7 += [nn.ReLU(True), ]
# model7+=[nn.ReflectionPad2d(1),]
model7 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model7 += [nn.ReLU(True), ]
model7 += [norm_layer(512), ]
# Conv7
model8up = [nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=use_bias)]
# model3short8=[nn.ReflectionPad2d(1),]
model3short8 = [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model47+=[norm_layer(256),]
model8 = [nn.ReLU(True), ]
# model8+=[nn.ReflectionPad2d(1),]
model8 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model8+=[norm_layer(256),]
model8 += [nn.ReLU(True), ]
# model8+=[nn.ReflectionPad2d(1),]
model8 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model8 += [nn.ReLU(True), ]
model8 += [norm_layer(256), ]
# Conv9
model9up = [nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=use_bias), ]
# model2short9=[nn.ReflectionPad2d(1),]
model2short9 = [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# add the two feature maps above
# model9=[norm_layer(128),]
model9 = [nn.ReLU(True), ]
# model9+=[nn.ReflectionPad2d(1),]
model9 += [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model9 += [nn.ReLU(True), ]
model9 += [norm_layer(128), ]
# Conv10
model10up = [nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=use_bias), ]
# model1short10=[nn.ReflectionPad2d(1),]
model1short10 = [nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# add the two feature maps above
# model10=[norm_layer(128),]
model10 = [nn.ReLU(True), ]
# model10+=[nn.ReflectionPad2d(1),]
model10 += [nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=use_bias), ]
model10 += [nn.LeakyReLU(negative_slope=.2), ]
# classification output
model_class = [nn.Conv2d(256, 529, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias), ]
# regression output
model_out = [nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias), ]
if(use_tanh):
model_out += [nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'), ])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1), ])
def forward(self, input_A, input_B, mask_B):
conv1_2 = self.model1(torch.cat((input_A, input_B, mask_B), dim=1))
conv2_2 = self.model2(conv1_2[:, :, ::2, ::2])
conv3_3 = self.model3(conv2_2[:, :, ::2, ::2])
conv4_3 = self.model4(conv3_3[:, :, ::2, ::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3)
conv8_3 = self.model8(conv8_up)
if(self.classification):
out_class = self.model_class(conv8_3)
conv9_up = self.model9up(conv8_3.detach()) + self.model2short9(conv2_2.detach())
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2.detach())
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
else:
out_class = self.model_class(conv8_3.detach())
conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2)
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2)
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return (out_class, out_reg)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input_A, input_B, mask_B):
# embed()
return self.model(torch.cat((input_A, input_B, mask_B), dim=1))
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import Mock
import pytest
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader
from flash import Task, Trainer
from flash.core.classification import Labels, LabelsState
from flash.core.data.data_module import DataModule
from flash.core.data.data_pipeline import DataPipeline, DataPipelineState, DefaultPreprocess
from flash.core.data.data_source import DefaultDataSources
from flash.core.data.process import Serializer, SerializerMapping
from flash.core.data.properties import ProcessState, Properties
def test_properties_data_pipeline_state():
"""Tests that ``get_state`` and ``set_state`` work for properties and that ``DataPipelineState`` is attached
correctly."""
class MyProcessState1(ProcessState):
pass
class MyProcessState2(ProcessState):
pass
class OtherProcessState(ProcessState):
pass
my_properties = Properties()
my_properties.set_state(MyProcessState1())
assert my_properties._state == {MyProcessState1: MyProcessState1()}
assert my_properties.get_state(OtherProcessState) is None
data_pipeline_state = DataPipelineState()
data_pipeline_state.set_state(OtherProcessState())
my_properties.attach_data_pipeline_state(data_pipeline_state)
assert my_properties.get_state(OtherProcessState) == OtherProcessState()
my_properties.set_state(MyProcessState2())
assert data_pipeline_state.get_state(MyProcessState2) == MyProcessState2()
def test_serializer():
"""Tests that ``Serializer`` can be enabled and disabled correctly."""
my_serializer = Serializer()
assert my_serializer.serialize('test') == 'test'
my_serializer.serialize = Mock()
my_serializer.disable()
assert my_serializer('test') == 'test'
my_serializer.serialize.assert_not_called()
my_serializer.enable()
my_serializer('test')
my_serializer.serialize.assert_called_once()
def test_serializer_mapping():
"""Tests that ``SerializerMapping`` correctly passes its inputs to the underlying serializers. Also checks that
state is retrieved / loaded correctly."""
serializer1 = Serializer()
serializer1.serialize = Mock(return_value='test1')
class Serializer1State(ProcessState):
pass
serializer2 = Serializer()
serializer2.serialize = Mock(return_value='test2')
class Serializer2State(ProcessState):
pass
serializer_mapping = SerializerMapping({'key1': serializer1, 'key2': serializer2})
assert serializer_mapping({'key1': 'serializer1', 'key2': 'serializer2'}) == {'key1': 'test1', 'key2': 'test2'}
serializer1.serialize.assert_called_once_with('serializer1')
serializer2.serialize.assert_called_once_with('serializer2')
with pytest.raises(ValueError, match='output must be a mapping'):
serializer_mapping('not a mapping')
serializer1_state = Serializer1State()
serializer2_state = Serializer2State()
serializer1.set_state(serializer1_state)
serializer2.set_state(serializer2_state)
data_pipeline_state = DataPipelineState()
serializer_mapping.attach_data_pipeline_state(data_pipeline_state)
assert serializer1._data_pipeline_state is data_pipeline_state
assert serializer2._data_pipeline_state is data_pipeline_state
assert data_pipeline_state.get_state(Serializer1State) is serializer1_state
assert data_pipeline_state.get_state(Serializer2State) is serializer2_state
def test_saving_with_serializers(tmpdir):
checkpoint_file = os.path.join(tmpdir, 'tmp.ckpt')
class CustomModel(Task):
def __init__(self):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
serializer = Labels(["a", "b"])
model = CustomModel()
trainer = Trainer(fast_dev_run=True)
data_pipeline = DataPipeline(preprocess=DefaultPreprocess(), serializer=serializer)
data_pipeline.initialize()
model.data_pipeline = data_pipeline
assert isinstance(model.preprocess, DefaultPreprocess)
dummy_data = DataLoader(list(zip(torch.arange(10, dtype=torch.float), torch.arange(10, dtype=torch.float))))
trainer.fit(model, train_dataloader=dummy_data)
trainer.save_checkpoint(checkpoint_file)
model = CustomModel.load_from_checkpoint(checkpoint_file)
assert isinstance(model._data_pipeline_state, DataPipelineState)
assert model._data_pipeline_state._state[LabelsState] == LabelsState(["a", "b"])
class CustomPreprocess(DefaultPreprocess):
def __init__(self):
super().__init__(
data_sources={
"test": Mock(return_value="test"),
DefaultDataSources.TENSORS: Mock(return_value="tensors"),
},
default_data_source="test",
)
def test_data_source_of_name():
preprocess = CustomPreprocess()
assert preprocess.data_source_of_name("test")() == "test"
assert preprocess.data_source_of_name(DefaultDataSources.TENSORS)() == "tensors"
assert preprocess.data_source_of_name("tensors")() == "tensors"
assert preprocess.data_source_of_name("default")() == "test"
with pytest.raises(MisconfigurationException, match="available data sources are: test, tensor"):
preprocess.data_source_of_name("not available")
def test_available_data_sources():
preprocess = CustomPreprocess()
assert DefaultDataSources.TENSORS in preprocess.available_data_sources()
assert "test" in preprocess.available_data_sources()
assert len(preprocess.available_data_sources()) == 3
data_module = DataModule(preprocess=preprocess)
assert DefaultDataSources.TENSORS in data_module.available_data_sources()
assert "test" in data_module.available_data_sources()
assert len(data_module.available_data_sources()) == 3
|
from __future__ import annotations
import configparser
import os
import unittest
from unittest import TestCase
from unittest.mock import patch
from abiquo.client import Abiquo
from requests import Response
from abiquo_inventory import InventoryGenerator, InventoryGeneratorParameters, ConfigProvider
class ApiResponseCallable(object):
def load_file_content(self, path):
with open(path, encoding='utf-8-sig') as json_file:
return json_file.read()
def __call__(self, method: str, url: str, **kwargs):
response = Response()
response.status_code = 200
data = None
if url.endswith('/api/cloud/virtualdatacenters/8/action/virtualmachines') or url.endswith(
'/api/cloud/virtualmachines'):
data = self.load_file_content("fixtures/test_full_response/virtualmachines.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/network/nics'):
data = self.load_file_content("fixtures/test_full_response/233/nics.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/storage/disks'):
data = self.load_file_content("fixtures/test_full_response/233/disks.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/storage/volumes'):
data = self.load_file_content("fixtures/test_full_response/233/volumes.json")
if url.endswith('/api/admin/enterprises/1/datacenterrepositories/2/virtualmachinetemplates/106'):
data = self.load_file_content("fixtures/test_full_response/233/virtualmachinetemplate.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/tags'):
data = self.load_file_content("fixtures/test_full_response/233/tags.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/metadata'):
data = self.load_file_content("fixtures/test_full_response/233/metadata.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/network/nics'):
data = self.load_file_content("fixtures/test_full_response/234/nics.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/storage/disks'):
data = self.load_file_content("fixtures/test_full_response/234/disks.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/storage/volumes'):
data = self.load_file_content("fixtures/test_full_response/234/volumes.json")
if url.endswith('/api/admin/enterprises/1/datacenterrepositories/2/virtualmachinetemplates/106'):
data = self.load_file_content("fixtures/test_full_response/234/virtualmachinetemplate.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/tags'):
data = self.load_file_content("fixtures/test_full_response/234/tags.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/metadata'):
data = self.load_file_content("fixtures/test_full_response/234/metadata.json")
if data is None:
raise Exception('Invalid path: ' + url)
b = bytearray()
b.extend(data.encode())
response._content = b
return response
class InventoryGeneratorCase(unittest.TestCase):
def setUp(self) -> None:
self.mock_get_patcher = patch('requests.sessions.Session.request', new_callable=ApiResponseCallable)
self.mock_get = self.mock_get_patcher.start()
def tearDown(self) -> None:
self.mock_get_patcher.stop()
def test_it_returns_two_vms_full_response(self):
url = 'https://localhost/api'
api_user = ''
api_pass = ''
api = Abiquo(
url=url,
auth=(api_user, api_pass),
verify=False
)
parameters = InventoryGeneratorParameters(
default_net_iface='nic0',
vdc_id="8",
deployed_only=True,
public_ip_only=False,
get_metadata=True
)
generator = InventoryGenerator(parameters, api)
generated_inventory = generator.generate()
expectedInventory = {
'_meta': {
'hostvars': {
'abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain': {
'ansible_host': '10.60.13.203',
'ansible_user': ''
},
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain': {
'ansible_host': '10.60.13.202',
'ansible_user': ''}
}
},
'ABQ_6feae9be-0c2b-48c6-9501-462ef8941b12': [
'abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain'],
'template_ubuntu1804': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vapp_wordpress': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vdc_AWX-XAAS': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vdc_AWX-XAAS_vapp_wordpress': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'hwprof_test': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'network_External_support': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'dstier_Default_Tier': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'tag_type_wordpressdb': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain'],
'ABQ_b93e3155-4e4a-445e-8523-e19834cf57dc': [
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'tag_type_wordpressvm': ['abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain']}
self.maxDiff = None
self.assertEqual(expectedInventory, generated_inventory)
class TestConfigProvider(TestCase):
def setUp(self) -> None:
config_parser = configparser.ConfigParser()
self.default_api_user_value = "user"
self.default_ssl_verify_value = False
config_parser.read_dict({
"auth": {
"apiuser": self.default_api_user_value,
"apipass": "pass"
},
"api": {
"ssl_verify": self.default_ssl_verify_value
}
})
self.config_provider = ConfigProvider(config_parser)
os.environ.clear()
def test_find_value_returns_env_value_if_present(self):
os.environ['APIUSER'] = 'admin'
value = self.config_provider.find_value('APIUSER', 'auth', 'apiuser')
self.assertEqual('admin', value)
def test_find_value_returns_none_if_value_is_missing(self):
value = self.config_provider.find_value('APIUSER', 'auth', 'fake')
self.assertEqual(None, value)
def test_find_value_returns_config_value_if_value_is_present(self):
value = self.config_provider.find_value('APIUSER', 'auth', 'apiuser')
self.assertEqual(self.default_api_user_value, value)
def test_find_boolean_value_returns_env_value_if_present(self):
os.environ['SSL_VERIFY'] = "true"
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(True, value)
def test_find_boolean_value_returns_none_if_value_is_missing(self):
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(self.default_ssl_verify_value, value)
def test_find_boolean_value_returns_config_value_if_value_is_present(self):
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(False, value)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
__version__ = '1.8.1'
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
NAME='chen2'
SPELL='chén'
CN='辰'
SEQ='5'
if __name__=='__main__':
pass
|
#Desafio 022
print("Desafio 022")
nome=str(input("Digite o seu nome:"))
print("O seu nome em letras maiúsculas: {}\n O seu nome em letras minúsculas:{}".format(nome.upper(), nome.lower()))
print("O seu nome tem ao todo {} letras.".format(len(nome)-nome.count(' ')))
nome1=nome.split()
print("O seu primeiro nome:{}.".format(nome1[0]))
print("O seu primeiro nome tem {} letras.".format(len(nome1[0])))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetLedgerResult:
"""
A collection of values returned by getLedger.
"""
def __init__(__self__, arn=None, deletion_protection=None, id=None, name=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
__self__.arn = arn
"""
Amazon Resource Name (ARN) of the ledger.
"""
if deletion_protection and not isinstance(deletion_protection, bool):
raise TypeError("Expected argument 'deletion_protection' to be a bool")
__self__.deletion_protection = deletion_protection
"""
Deletion protection on the QLDB Ledger instance. Set to `true` by default.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
class AwaitableGetLedgerResult(GetLedgerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLedgerResult(
arn=self.arn,
deletion_protection=self.deletion_protection,
id=self.id,
name=self.name)
def get_ledger(name=None,opts=None):
"""
Use this data source to fetch information about a Quantum Ledger Database.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.qldb.get_ledger(name="an_example_ledger")
```
:param str name: The friendly name of the ledger to match.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:qldb/getLedger:getLedger', __args__, opts=opts).value
return AwaitableGetLedgerResult(
arn=__ret__.get('arn'),
deletion_protection=__ret__.get('deletionProtection'),
id=__ret__.get('id'),
name=__ret__.get('name'))
|
from django.contrib import admin
from . import models
class CampaignAdmin(admin.ModelAdmin):
list_display = ('__str__', 'created', 'is_active', )
list_filter = ('is_active', 'created', )
class UserReferrerAdmin(admin.ModelAdmin):
list_display = ('__str__', 'reward', )
raw_id_fields = ('user', 'campaign', )
class ReferrerAdmin(admin.ModelAdmin):
list_display = ('__str__', 'user_referrer', )
raw_id_fields = ('registered_user', 'user_referrer', )
class UserReferrerStatsAdmin(admin.ModelAdmin):
raw_id_fields = ('user_referrer', )
admin.site.register(models.Campaign, CampaignAdmin)
admin.site.register(models.UserReferrer, UserReferrerAdmin)
admin.site.register(models.Referrer, ReferrerAdmin)
admin.site.register(models.UserReferrerStats, UserReferrerStatsAdmin)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import os
import requests
import pytest
import astropy.units as u
from astropy.table import Table
from ... import nrao
from ...utils.testing_tools import MockResponse
from ...utils import commons
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
DATA_FILES = {'votable': 'votable.xml',
'archive': 'archive_html.html',
}
@pytest.fixture
def patch_parse_coordinates(request):
def parse_coordinates_mock_return(c):
return c
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(commons, 'parse_coordinates', parse_coordinates_mock_return)
return mp
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests.Session, 'request', post_mockreturn)
return mp
def post_mockreturn(self, method, url, data=None, timeout=10, files=None,
params=None, headers=None, **kwargs):
if method != 'POST':
raise ValueError("A 'post request' was made with method != POST")
if params['PROTOCOL'] == "VOTable-XML":
filename = data_path(DATA_FILES['votable'])
elif params['PROTOCOL'] == "HTML" and params['QUERYTYPE'] == 'ARCHIVE':
filename = data_path(DATA_FILES['archive'])
else:
raise NotImplementedError("Test type not implemented")
content = open(filename, "rb").read()
return MockResponse(content, **kwargs)
def test_query_region_async(patch_post, patch_parse_coordinates):
response = nrao.core.Nrao.query_region_async(
commons.ICRSCoordGenerator("04h33m11.1s 05d21m15.5s"),
radius='5d0m0s', equinox='B1950',
freq_low=1000 * u.kHz, freq_up=2000 * u.kHz,
get_query_payload=True)
assert response['SRAD'].startswith('5') and response['SRAD'].endswith('d')
assert response['EQUINOX'] == 'B1950'
assert response['OBSFREQ1'] == '1.0-2.0'
response = nrao.core.Nrao.query_region_async(
commons.ICRSCoordGenerator("04h33m11.1s 05d21m15.5s"))
assert response is not None
def test_query_region(patch_post, patch_parse_coordinates):
result = nrao.core.Nrao.query_region(
commons.ICRSCoordGenerator("04h33m11.1s 05d21m15.5s"))
assert isinstance(result, Table)
assert len(result) > 0
if 'Start Time' in result.colnames:
truth = b'83-Sep-27 09:19:30' if commons.ASTROPY_LT_4_1 else '83-Sep-27 09:19:30'
assert result['Start Time'][0] == truth
truth = b'04h33m11.096s' if commons.ASTROPY_LT_4_1 else '04h33m11.096s'
assert result['RA'][0] == truth
def test_query_region_archive(patch_post, patch_parse_coordinates):
result = nrao.core.Nrao.query_region(
commons.ICRSCoordGenerator("05h35.8m 35d43m"), querytype='ARCHIVE')
assert isinstance(result, Table)
assert len(result) == 230
assert result['Obs. Data Starts'][0] == '78-Jun-18 14:17:49'
def test_query_region_multiconfig(patch_post, patch_parse_coordinates):
# regression test for issue 1020
# All we're testing for is that the list-form telescope_config is parsed
# properly and doesn't crash; this does NOT test for correctness (see
# remote tests for that)
result = nrao.core.Nrao.query_region(
commons.ICRSCoordGenerator("05h35.8m 35d43m"), querytype='ARCHIVE',
telescope_config=['A', 'AB', 'B', 'BC', 'C', 'CD', 'D'],
)
assert isinstance(result, Table)
def test_query_region_lowercase(patch_post, patch_parse_coordinates):
# regression test for issue 1282
# test that the checker allows BnA, etc.
result = nrao.core.Nrao.query_region(
commons.ICRSCoordGenerator("05h35.8m 35d43m"), querytype='ARCHIVE',
telescope_config=['A', 'BnA', 'AB', 'B', 'BC', 'C', 'CD', 'D'],
)
assert isinstance(result, Table)
result = nrao.core.Nrao.query_region(
commons.ICRSCoordGenerator("05h35.8m 35d43m"), querytype='ARCHIVE',
obs_band=['K', 'Ka'],
)
assert isinstance(result, Table)
|
from django.core import mail
from django.urls import reverse
from lib.tests.utils import ClientTest
class BaseAccountsTest(ClientTest):
@classmethod
def setUpTestData(cls):
# Call the parent's setup (while still using this class as cls)
super().setUpTestData()
cls.user = cls.create_user()
def register(self, username='alice', email='alice123@example.com',
password1='GreatBarrier', password2='GreatBarrier',
first_name="Alice", last_name="Baker",
affiliation="Testing Society",
reason_for_registering="Trying labeling tools",
project_description="Labeling corals",
how_did_you_hear_about_us="Colleagues",
agree_to_data_policy=True,
username2=''):
data = dict(
username=username, email=email,
password1=password1, password2=password2,
first_name=first_name, last_name=last_name,
affiliation=affiliation,
reason_for_registering=reason_for_registering,
project_description=project_description,
how_did_you_hear_about_us=how_did_you_hear_about_us,
agree_to_data_policy=agree_to_data_policy,
username2=username2)
response = self.client.post(
reverse('django_registration_register'), data, follow=True)
return response
def register_and_get_activation_link(self):
"""Shortcut function for tests focusing on the activation step."""
self.register()
activation_email = mail.outbox[-1]
# Activation link: should be the first link (first "word" with '://')
# in the activation email.
activation_link = None
for word in activation_email.body.split():
if '://' in word:
activation_link = word
break
self.assertIsNotNone(activation_link)
return activation_link
def assert_sign_in_success(self, response, user):
# We should be past the sign-in page now.
self.assertTemplateNotUsed(response, 'registration/login.html')
# Check that we're signed in as the expected user.
# From http://stackoverflow.com/a/6013115
self.assertIn('_auth_user_id', self.client.session)
self.assertEqual(
int(self.client.session['_auth_user_id']), user.pk)
|
def max_profit(prices):
"""get the maximum profit from buying and selling stock"""
max_profit = None
lowest_price = None
highest_price = None
for price in prices:
print "checking ", price
# if we have a new lowest price, grab it and reset out highest
if not lowest_price or price < lowest_price:
lowest_price = price
highest_price = None
print "\tnew lowest_price ", price
# if we have a new highest, grab it and calculate the profit
elif not highest_price or price > highest_price:
highest_price = price
profit = highest_price - lowest_price
print "\tnew highest_price ", price
print "\tpossible profit ", profit
# check for a new max_profit
if not max_profit or max_profit < profit:
max_profit = profit
print "\tnew max_profit ", profit
return max_profit or 0
prices = [10, 5, 3, 7, 11, 1, 4]
bad_prices = [5, 4, 3, 2, 1]
profit = max_profit(prices)
print "maximum profit: ", profit
|
# flake8: noqa
"""
Autobahn App API
Was passiert auf Deutschlands Bundesstraßen? API für aktuelle Verwaltungsdaten zu Baustellen, Staus und Ladestationen. Außerdem Zugang zu Verkehrsüberwachungskameras und vielen weiteren Datensätzen. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from deutschland.autobahn.api_client import ApiClient
# import Configuration
from deutschland.autobahn.configuration import Configuration
# import exceptions
from deutschland.autobahn.exceptions import OpenApiException
from deutschland.autobahn.exceptions import ApiAttributeError
from deutschland.autobahn.exceptions import ApiTypeError
from deutschland.autobahn.exceptions import ApiValueError
from deutschland.autobahn.exceptions import ApiKeyError
from deutschland.autobahn.exceptions import ApiException
|
import urllib.request, json
import csv
import codecs
import sys
if(len(sys.argv) != 2):
print("No paramaters to run.")
exit()
header = {"Authorization": sys.argv[1]}
AllDATA = {"US": {}}
#['Province_State', 'Country_Region', 'Last_Update', 'Lat', 'Long_', 'Confirmed', 'Deaths', 'Recovered', 'Active', 'FIPS', 'Incident_Rate', 'People_Tested', 'People_Hospitalized', 'Mortality_Rate', 'UID', 'ISO3', 'Testing_Rate', 'Hospitalization_Rate']
def do_stuff(DATA):
if(DATA[1] != 'US'):
return
if(DATA[0] != 'Recovered'):
if(DATA[0] not in AllDATA[DATA[1]]):
AllDATA[DATA[1]][DATA[0]] = {}
AllDATA[DATA[1]][DATA[0]][DATA[2]] = {
'Confirmed': DATA[5],
'Deaths' : DATA[6],
'Recovered': DATA[7],
'Active': DATA[8],
'FIPS': DATA[9],
'Incident_Rate': DATA[10],
'People_Tested': DATA[11],
'People_Hospitalized': DATA[12],
'Mortality_Rate': DATA[13],
'UID': DATA[14],
'ISO3': DATA[15],
'Testing_Rate': DATA[16],
'Hospitalization_Rate': DATA[17]}
data = [] #temp data
## get list of files.
request = urllib.request.Request("https://api.github.com/repos/CSSEGISandData/COVID-19/contents/csse_covid_19_data/csse_covid_19_daily_reports_us/", headers=header)
with urllib.request.urlopen(request) as response:
data = json.loads(response.read().decode())
#remove header
data.pop()
print("Starting Proccess...")
##Convert data in each file to json
#count = 0#for debuging
for i in data:
#for debuging
#count+=1
#print(str(round((count/len(data))*100))+ "%")
ftpstream = urllib.request.urlopen(i["download_url"]+"?u=ocampossoto")
csvfile = csv.reader(codecs.iterdecode(ftpstream, 'utf-8'))
for line in csvfile:
do_stuff(line)
print("Complete writing file...")
with open('CovidDataUS.json', 'w') as fp:
json.dump(AllDATA, fp)
|
import math
import random
import re
from pathlib import Path
from .path import wordlist_path
from .normalize import slugify
ASCII_a = 97
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
LOWERCASE_VOWELS = 'aeiou'
LOWERCASE_CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
# An estimate of the frequencies of letters in English, as a length-26
# vector of proportions
LETTER_FREQS = [
0.08331452, 0.01920814, 0.04155464, 0.03997236, 0.11332581,
0.01456622, 0.02694035, 0.02517641, 0.08116646, 0.00305369,
0.00930784, 0.05399477, 0.02984008, 0.06982714, 0.06273243,
0.0287359 , 0.00204801, 0.07181286, 0.07714659, 0.06561591,
0.03393991, 0.01232891, 0.01022719, 0.0037979 , 0.01733258,
0.00303336
]
# Loaded only if needed.
# Use 'get_bigram_freqs' and 'get_trigram_freqs' to access these.
BIGRAM_FREQS = []
TRIGRAM_FREQS = []
def letters_to_vec(letters):
"""
Convert an iterator of lowercase letters, such as a 'slug' form, into
a length-26 vector indicating how often those letters occur.
"""
vec = [0] * 26
for letter in letters:
index = ord(letter) - ASCII_a
vec[index] += 1
return vec
def to_proportion(vec):
"""
Convert a vector that counts occurrences to a vector of proportions
(whose sum is 1).
"""
vecsum = sum(vec)
return [value / vecsum for value in vec]
def alphagram(slug):
"""
Given text in 'slug' form, return its alphagram, which is the string of
its letters sorted in alphabetical order.
"""
return ''.join(sorted(slug))
def alphabytes(slug):
"""
This representation is used internally to Solvertools. It's like an
alphagram, but represents up to 7 occurrences of a letter as unique bytes
that can be searched for in a specially-prepared word list.
This allows simple regexes to match "a word with at most two e's and at
most three t's", a search which would be very complex and inefficient in
the usual string representation.
"""
alpha = alphagram(slug)
current_letter = None
rank = 0
bytenums = []
for letter in alpha:
if letter == current_letter:
rank += 1
else:
rank = 0
if rank < 6:
num = ord(letter) - 96 + (rank + 2) * 32
else:
num = ord(letter) - 96
bytenums.append(num)
current_letter = letter
return bytes(bytenums)
def alphabytes_to_alphagram(abytes):
"""
Convert the specialized 'alphabytes' form described above to an ordinary,
printable alphagram.
"""
letters = [chr(96 + byte % 32) for byte in abytes]
return ''.join(letters)
def anagram_diff(a1, a2):
"""
Find the difference between two multisets of letters, in a way specialized
for anagramming.
Returns a pair containing:
- the alphagram of letters that remain
- the number of letters in a2 that are not found in a1, which is the number
of "wildcards" to consume
"""
adiff = ''
wildcards_used = 0
for letter in set(a2) - set(a1):
diff = a2.count(letter) - a1.count(letter)
wildcards_used += diff
for letter in sorted(set(a1)):
diff = (a1.count(letter) - a2.count(letter))
if diff < 0:
wildcards_used -= diff
else:
adiff += letter * diff
return adiff, wildcards_used
def diff_both(a1, a2):
"""
Compare two multisets of letters, returning:
- The alphagram of letters in a1 but not in a2
- The alphagram of letters in a2 but not in a1
"""
diff1 = ''
diff2 = ''
for letter in set(a2) - set(a1):
diff = a2.count(letter) - a1.count(letter)
diff2 += letter * diff
for letter in sorted(set(a1)):
diff = (a1.count(letter) - a2.count(letter))
if diff < 0:
diff2 += letter * diff
else:
diff1 += letter * diff
return diff1, diff2
def diff_exact(full, part):
"""
Find the difference between two multisets of letters, returning the
alphagram of letters that are in `full` but not in `part`. If any letters
are in `part` but not in `full`, raises an error.
"""
diff1, diff2 = diff_both(full, part)
if diff2:
raise ValueError("Letters were left over: %s" % diff2)
return diff1
def anahash(slug):
if slug == '':
return ''
vec = to_proportion(letters_to_vec(slug))
anomalies = []
for i in range(26):
if vec[i] > LETTER_FREQS[i]:
anomalies.append(i + ASCII_a)
return bytes(list(anomalies)).decode('ascii')
def anagram_cost(letters):
"""
Return a value that's probably larger for sets of letters that are
harder to anagram.
I came up with this formula in the original version of anagram.js. Much
like most of anagram.js, I can't entirely explain why it is the way it
is.
The 'discrepancy' of a set of letters is a vector indicating how far
it is from the average proportions of a set of letters. These values
are raised to the fourth power and summed to form one factor of this
cost formula. The other factor is the number of letters.
"""
if letters == '':
return 0
n_letters = len(letters)
vec = to_proportion(letters_to_vec(letters))
sq_cost = 0.0
for i in range(26):
discrepancy = (vec[i] / LETTER_FREQS[i] - 1) ** 2
sq_cost += discrepancy
return sq_cost ** 0.5 * n_letters
VOWELS_RE = re.compile('[aeiouy]')
def consonantcy(slug):
"""
Given a word in 'slug' form, return just the consonants. 'y' is always
considered a vowel and 'w' is always considered a consonant, regardless
of context.
"""
return VOWELS_RE.sub('', slug)
def random_letters(num):
"""
Get `num` random letters that are distributed like English. Useful for
testing against a null hypothesis.
"""
letters = []
for i in range(num):
rand = random.random()
choice = '#'
for j in range(26):
if rand < LETTER_FREQS[j]:
choice = chr(j + ord('a'))
break
else:
rand -= LETTER_FREQS[j]
letters.append(choice)
return ''.join(letters)
def only_vowels(word, preserve_spaces=False):
return ''.join([k for k in word if k in LOWERCASE_VOWELS \
or (preserve_spaces and k == ' ')])
def only_consonants(word, preserve_spaces=False):
return ''.join([k for k in word if k not in LOWERCASE_VOWELS \
or (preserve_spaces and k == ' ')])
def get_bigram_freqs():
global BIGRAM_FREQS
if not BIGRAM_FREQS:
BIGRAM_FREQS = [[-1000 for _ in range(26)] for _ in range(26)]
total_bigrams = 0
with open(wordlist_path('letter_bigrams.txt'), 'r') as fd:
lines = [l.split(' ') for l in fd.readlines()]
for line in lines:
bigram, freq = line
total_bigrams += int(freq)
for line in lines:
line[1] = math.log(int(line[1]) / total_bigrams)
ch1 = ord(line[0][0].lower()) - ord('a')
ch2 = ord(line[0][1].lower()) - ord('a')
BIGRAM_FREQS[ch1][ch2] = line[1]
return BIGRAM_FREQS
def get_trigram_freqs():
global TRIGRAM_FREQS
if not TRIGRAM_FREQS:
TRIGRAM_FREQS = [[[-1000 for _ in range(26)] for _ in range(26)] for _ in range(26)]
total_trigrams = 0
with open(wordlist_path('letter_trigrams.txt'), 'r') as fd:
lines = [l.split(' ') for l in fd.readlines()]
for line in lines:
trigram, freq = line
total_trigrams += int(freq)
for line in lines:
line[1] = math.log(int(line[1]) / total_trigrams)
ch1 = ord(line[0][0].lower()) - ord('a')
ch2 = ord(line[0][1].lower()) - ord('a')
ch3 = ord(line[0][2].lower()) - ord('a')
TRIGRAM_FREQS[ch1][ch2][ch3] = line[1]
return TRIGRAM_FREQS
|
# Copyright (C) 2018
# This notice is to be included in all relevant source files.
# "Brandon Goldbeck" <bpg@pdx.edu>
# “Anthony Namba” <anamba@pdx.edu>
# “Brandon Le” <lebran@pdx.edu>
# “Ann Peake” <peakean@pdx.edu>
# “Sohan Tamang” <sohan@pdx.edu>
# “An Huynh” <an35@pdx.edu>
# “Theron Anderson” <atheron@pdx.edu>
# This software is licensed under the MIT License.
# See LICENSE file for the full text.
import numpy
from OpenGL.GL import *
from src.rendering.basic_material import BasicMaterial
from src.rendering.scene_object import SceneObject
from src.rendering.rendering_engine import RenderingEngine
class BasicMeshObject(SceneObject):
"""This class serves as a derived class for a SceneObject that contains mesh data
for OpenGL rendering context.
"""
def __init__(self, tag, mesh):
"""Constructor for a BasicMeshObject.
:param tag: The tag str to recognize this object.
:param mesh: The Mesh object to use for OpenGL rendering.
"""
SceneObject.__init__(self, tag)
self.mesh_data = mesh
triangle_data = []
for i in range(len(self.mesh_data.normals)):
triangle_data.append(self.mesh_data.v2[i][0])
triangle_data.append(self.mesh_data.v2[i][1])
triangle_data.append(self.mesh_data.v2[i][2])
triangle_data.append(0.000059)
triangle_data.append(1.0 - 0.000059)
triangle_data.append(self.mesh_data.normals[i][0])
triangle_data.append(self.mesh_data.normals[i][1])
triangle_data.append(self.mesh_data.normals[i][2])
triangle_data.append(self.mesh_data.v1[i][0])
triangle_data.append(self.mesh_data.v1[i][1])
triangle_data.append(self.mesh_data.v1[i][2])
triangle_data.append(0.000103)
triangle_data.append(1.0 - 0.336048)
triangle_data.append(self.mesh_data.normals[i][0])
triangle_data.append(self.mesh_data.normals[i][1])
triangle_data.append(self.mesh_data.normals[i][2])
triangle_data.append(self.mesh_data.v0[i][0])
triangle_data.append(self.mesh_data.v0[i][1])
triangle_data.append(self.mesh_data.v0[i][2])
triangle_data.append(0.335973)
triangle_data.append(1.0 - 0.335903)
triangle_data.append(self.mesh_data.normals[i][0])
triangle_data.append(self.mesh_data.normals[i][1])
triangle_data.append(self.mesh_data.normals[i][2])
if glInitGl42VERSION():
RenderingEngine.opengl_success = True
self.vao = glGenVertexArrays(1)
self.bind()
self.material = BasicMaterial(numpy.array(triangle_data, dtype=numpy.float32))
self.unbind()
else:
RenderingEngine.opengl_success = False
def bind(self):
"""Bind the vertex array object.
:return: None
"""
if RenderingEngine.opengl_success:
glBindVertexArray(self.vao)
def unbind(self):
"""Unbind the vertex array object.
:return:
"""
if RenderingEngine.opengl_success:
glBindVertexArray(0)
def draw(self):
"""Draw the vertex buffer.
:return: None
"""
if RenderingEngine.opengl_success:
self.bind()
glDrawArrays(GL_TRIANGLES, 0, len(self.mesh_data.normals) * 3)
self.unbind()
def update(self):
"""Update the material values for the vertex buffers.
:return: None
"""
if RenderingEngine.opengl_success:
self.material.set_view_matrix(RenderingEngine.camera.get_view_matrix())
self.material.set_model_matrix(self.transform.get_trs_matrix())
def get_mesh_data(self):
"""Retrieve the stored mesh data.
:return: The Mesh data stored in this object.
"""
return self.mesh_data
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeElasticityAssurancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeElasticityAssurances')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Platform(self):
return self.get_query_params().get('Platform')
def set_Platform(self,Platform):
self.add_query_param('Platform',Platform)
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PrivatePoolOptionsIds(self):
return self.get_query_params().get('PrivatePoolOptions.Ids')
def set_PrivatePoolOptionsIds(self,PrivatePoolOptionsIds):
self.add_query_param('PrivatePoolOptions.Ids',PrivatePoolOptionsIds)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
|
# Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This is an example application that takes "votes" for different letters of
the alphabet and keeps a running total of the votes received for each
letter. For each incoming message, it sends out a message with the total
votes for that letter. The total number of votes for each letter are stored
together in a single state object.
"""
import struct
import wallaroo
def application_setup(args):
in_host, in_port = wallaroo.tcp_parse_input_addrs(args)[0]
out_host, out_port = wallaroo.tcp_parse_output_addrs(args)[0]
votes = wallaroo.source("alphabet",
wallaroo.TCPSourceConfig(in_host, in_port, decoder))
pipeline = (votes
.key_by(extract_letter)
.to(add_votes)
.to_sink(wallaroo.TCPSinkConfig(out_host, out_port, encoder)))
return wallaroo.build_application("alphabet", pipeline)
class Votes(object):
def __init__(self, letter, votes):
self.letter = letter
self.votes = votes
class AllVotes(object):
def __init__(self):
self.votes_by_letter = {}
def update(self, votes):
letter = votes.letter
vote_count = votes.votes
votes_for_letter = self.votes_by_letter.get(letter, Votes(letter, 0))
votes_for_letter.votes += vote_count
self.votes_by_letter[letter] = votes_for_letter
def get_votes(self, letter):
vbl = self.votes_by_letter[letter]
# Return a new Votes instance here!
return Votes(letter, vbl.votes)
@wallaroo.key_extractor
def extract_letter(data):
return data.letter
@wallaroo.decoder(header_length=4, length_fmt=">I")
def decoder(bs):
(letter, vote_count) = struct.unpack(">sI", bs)
letter = letter.decode("utf-8") # for Python3 comptibility
return Votes(letter, vote_count)
@wallaroo.state_computation(name="add votes", state=AllVotes)
def add_votes(data, state):
state.update(data)
return state.get_votes(data.letter)
@wallaroo.encoder
def encoder(data):
# data is a Votes
return ("%s => %d\n" % (data.letter, data.votes)).encode()
|
# flake8: ignore=E501
from logger import RoboLogger
import threading
from ev3dev2.motor import SpeedDPS # , SpeedPercent
from mymotor import MyMotor
from gyrosensor import GyroSensor
import traceback
# import asyncio
import time
from exceptions import ControlerRunningTooLongException, MotorRunningFastException, MotorUrgentStopException, OutOfBoundAngle
from constant import JOINT_MAX_SPEED_PERCENT, MOVEMENT_TIME_SCALING_METHOD_DEFAULT, STOP_ACTION_HOLD # noqa : F401
from numpy import degrees, radians
from constant import LOGGER_LINK_MAIN, LOGGER_LINK_MTA, LOGGER_LINK_RESET
log = RoboLogger.getLogger()
class ArmLink(object):
"""
Implementation of an arm joint (mainly the motor and methods around it being an arm joint vs a wheel for exampe)
"""
__slots__ = [
'linkID',
'motor',
'urgentstop',
'gyroAngleVerticalOffsetX',
'_odoOffset',
'_urgentStop',
'virtual',
'initOrder',
'jointScrewB',
'_gyroSensor',
'gearRatio',
'angleLimit',
'depLink',
'stopAction',
'_polarity',
'_motorMinSpeedDPS',
'_motorMaxSpeedDPS',
'maxDelta',
'_killThread',
'lock',
'wiggleDeg'
]
def __init__(self,
jointScrewB,
angleLimit,
linkID,
motorMinSpeedDPS=None,
motorMaxSpeedDPS=None,
maxDelta=None,
gearRatio=None,
initOrder=None,
gyroAngleVerticalOffsetX=None,
gyroLinkDep=None,
gyroSensor=None,
motorAddress=None,
stopAction=STOP_ACTION_HOLD,
polarity=None,
virtualLink=False,
wiggleDeg=5.0
):
'''
Description:constructor for the robot arm link
param:jointScrewB:desc:part of the B list for the joint in question
param:jointScrewB:type:[float, float, float, float, float, float]
param:angleLimit:desc: limit of the thing in radians, min and max
param:angleLimit:type: [float, float]
param:linkID:int:id of the link
param:linkID:type:int
param:motorMaxSpeedDPS:desc:max speed in DPS for motor operation
param:motorMaxSpeedDPS:type:int
param:motorMinSpeedDPS:desc:min speed in DPS for motor operation
param:motorMinSpeedDPS:type:int
param:maxDelta:desc:when at this angle delta for motion, motor
will be start at max speed.
param:gearRatio:desc:gear ratio - used to estimate ETA for motion
param:gearRatio:type:float
param:initOrder:desc:order in which the links were initialized
param:initOrder:type:int
param:gyroAngleVerticalOffsetX:desc:degrees offset from vertical line to the zero for the robot (or None if virtual joint)
param:gyroAngleVerticalOffsetX:type:float
param:gyroLinkDep:desc:value of the link that this depends on.
param:gyroLinkDep:int
param:gyroSensor:desc:associated gyrosensor class
param:gyroSensor:type:class `GyroSensor`
param:motorAddress:desc: RoboMotor class
param:motorAddress:type:
param:stopAction:desc:STOP_ACTION_HOLD, STOP_ACTION_COAST, STOP_ACTION_BRAKE, says what happens
when we're stopping the motor.
param:stopAction:str
param:polarity:desc:RoboMotor.POLARITY_INVERSED or RoboMotor.POLARITY_NORMAL so that + is in
one direction and - is in the other direction
param:polarity:type:str
param:virtualLink:desc:if the link has a motor, it's not virtual, if not, it is (like simulated links for robot base)
param:virtualLink:bool
param:wiggleDeg:float:used to determine precision of motion, eg at what point is a set point ok. If set to
40 degrees and wiggleDeg=2, then it's ok between 38 and 42.
'''
LOGGER_LINK_MAIN_INSTANCE = LOGGER_LINK_MAIN.format(linkID)
self.linkID = linkID
self.angleLimit = angleLimit
if not virtualLink:
log.debug(LOGGER_LINK_MAIN_INSTANCE, 'Initializing physical link...')
self.motor = MyMotor(address=motorAddress)
if self.motor: # don't run if motor is set to None (testing mode)
self.polarity = MyMotor.POLARITY_INVERSED if polarity is False else MyMotor.POLARITY_NORMAL
self.motorMaxSpeedDPS = motorMaxSpeedDPS
self.motorMinSpeedDPS = motorMinSpeedDPS
self.maxDelta = maxDelta
self._gyroSensor = GyroSensor(sensorPort=gyroSensor['input'],
linkID=self.linkID,
i2cBus=int(gyroSensor['i2cbus']),
i2cAddress=int(gyroSensor['address'], 16),
gyroFilterValue=1,
gyroSensitivity='2G')
self.stopAction = stopAction
self.motor.stop(stop_action=self.stopAction)
self.urgentStop = False
self.virtual = False
self.gearRatio = gearRatio
self.gyroAngleVerticalOffsetX = gyroAngleVerticalOffsetX
self.depLink = gyroLinkDep
self.wiggleDeg = wiggleDeg
if self.depLink is None:
self.odoOffset = self.gyroAngleVerticalOffsetX
else:
log.debug(LOGGER_LINK_MAIN_INSTANCE, 'Initializing virtual link...')
self.virtual = True
self.initOrder = initOrder
self.jointScrewB = jointScrewB
self.lock = threading.Lock()
log.info(LOGGER_LINK_MAIN_INSTANCE, 'Done link constructor')
# region properties, setters and getters
@property
def motorMinSpeedDPS(self):
return self._motorMinSpeedDPS
@motorMinSpeedDPS.setter
def motorMinSpeedDPS(self, value):
if value > 0:
self._motorMinSpeedDPS = int(value)
@property
def motorMaxSpeedDPS(self):
return self._motorMaxSpeedDPS
@motorMaxSpeedDPS.setter
def motorMaxSpeedDPS(self, value):
if value > 0:
self._motorMaxSpeedDPS = int(value)
@property
def killThread(self):
return self._killThread
@killThread.setter
def killThread(self, value):
self.gyroSensor.killThread = value
@property
def gyroSensor(self):
return self._gyroSensor
@gyroSensor.setter
def gyroSensor(self, value):
if isinstance(value, GyroSensor):
self._gyroSensor = value
else:
raise("Wrong class for the angle sensor.")
@property
def armAngleDegX(self):
if self.depLink:
# add readings from dependnts
self.odoOffset = self.depLink.odoOffset + self.depLink.armAngleDegX
return (90 - self.odoOffset + self.gyroSensor.currentAngleXDeg)
@property
def armAngleRadX(self):
return radians(self.armAngleDegX)
@property
def armAngleDegY(self):
return self._gyroSensor.currentAngleYDeg
@property
def armAngleDegZ(self):
return self._gyroSensor.currentAngleZDeg
@property
def odoOffset(self):
return self._odoOffset
@odoOffset.setter
def odoOffset(self, value):
if isinstance(float(value), float):
self._odoOffset = float(value)
else:
raise("wrong type, has to be float")
@property
def polarity(self):
return self._polarity
@polarity.setter
def polarity(self, value):
self._polarity = value
self.motor.polarity = self._polarity
@property
def position(self):
return self._motor.position
@property
def urgentStop(self):
return self._urgentStop
@urgentStop.setter
def urgentStop(self, value):
if value:
self.motor.off()
self._urgentStop = value
# endregion setters and getters
def resetLink(self):
'''
Description:resets the motors.
params:None
'''
LOGGER_LINK_RESET_INSTANCE = LOGGER_LINK_RESET.format(self.linkID)
log.debug(LOGGER_LINK_RESET_INSTANCE, 'checking if link is virtual...')
if not self.virtual:
log.debug(LOGGER_LINK_RESET_INSTANCE, 'link non virtual, resetting.')
self.motor.reset()
log.debug(LOGGER_LINK_RESET_INSTANCE, 'Link motor reset.')
# Looks useless... but the setter resets the motor's polarity
self.motor.stop(stop_action=self.stopAction)
self.polarity = self.polarity
def moveToAngle(self,
targetAngleRadians,
dryrun=False):
'''
Description:meant to move link to position (in radians) at a certain fixed speed
param:targetAngleRadians:desc:target angle in radians
param:targetAngleRadians:type:float
# param:stop_action:desc:what to do when finished ('coast', 'brake', 'hold'
# choices of ev3dev2.motor.LargeMotor.STOP_ACTION_COAST
# ev3dev2.motor.LargeMotor.STOP_ACTION_BRAKE
# ev3dev2.motor.LargeMotor.STOP_ACTION_HOLD
param:stop_action:type:str
param:dryrun:desc:if true, won't run, if false, will do.
param:dryrun:type:bool
'''
LOGGER_LINK_MTA_INSTANCE = LOGGER_LINK_MTA.format(self.linkID)
try:
if targetAngleRadians < self.angleLimit[0] or targetAngleRadians > self.angleLimit[1]:
raise OutOfBoundAngle('Setpoint does not fit within acceptable bounds', targetAngle=targetAngleRadians)
with self.lock:
# 0. Check for urgent stop
if self.urgentStop:
raise MotorUrgentStopException(self.motor.description)
# 1. Estimate time to target:
currentAngleDegrees = self.armAngleDegX
targetAngleDegrees = degrees(targetAngleRadians)
eta = abs(targetAngleDegrees - currentAngleDegrees) / abs(self.motorMinSpeedDPS) * self.gearRatio
# 2. Initialize variables
idx = 0
startTime = time.time()
initialMotorPosition = self.motor.position
maxLoopTimeFactor = 4
logEvery = 1
avgStepDuration = 0
if dryrun:
log.warning(LOGGER_LINK_MTA_INSTANCE, "Step {0} - Not moving motor {1} : dry run".format(idx, self.motor.kwargs['address']))
return
angleError = abs(targetAngleDegrees - self.armAngleDegX)
thetaDotPrev = 0
while angleError > self.wiggleDeg:
loopStartTime = time.time()
# Check for silly and urgent conditions
if self.urgentStop:
raise MotorUrgentStopException(self.motor.description)
if self.armAngleRadX < self.angleLimit[0]:
log.critical(LOGGER_LINK_MTA_INSTANCE, f'Current angle {self.armAngleDegX} lower than angleLimit[0] {degrees(self.angleLimit[0])}')
raise OutOfBoundAngle('While moving arm link, we reached the lower angle limit.', self.armAngleDegX)
if self.armAngleRadX > self.angleLimit[1]:
log.critical(LOGGER_LINK_MTA_INSTANCE, f'Current angle {self.armAngleDegX} higher than angleLimit[1] {degrees(self.angleLimit[0])}')
raise OutOfBoundAngle('While moving arm link, we reached the upper angle limit.', self.armAngleDegX)
if loopStartTime - startTime > maxLoopTimeFactor * eta:
raise ControlerRunningTooLongException(f'Took over {maxLoopTimeFactor} times original anticipated ETA (of {eta:.2f}s ==> {maxLoopTimeFactor} * {eta:.2f}s = {maxLoopTimeFactor*eta:.2f}s).')
if loopStartTime - startTime > 3 and self.motor.position == initialMotorPosition:
raise ControlerRunningTooLongException('After 3 seconds, the motor is still in the same initial position')
if abs(self.motor.speed - self.motor.max_dps) < 50:
raise MotorRunningFastException(self.motor.description)
# Calculate motor speed for step
delta = angleError
thetaDot = min(((self.motorMaxSpeedDPS - self.motorMinSpeedDPS) / self.maxDelta) * delta + self.motorMinSpeedDPS, self.motorMaxSpeedDPS)
# Calculate direction of motor rotation
thetaDot = int(-thetaDot) if self.armAngleDegX > targetAngleDegrees else int(thetaDot)
# Validate if motor speed needs to change
if thetaDotPrev != thetaDot:
log.debug(LOGGER_LINK_MTA_INSTANCE, f'Step {idx} - Updating motor {self.motor.address}\'s speed from {thetaDotPrev} to {thetaDot} dps.')
self.motor.on(SpeedDPS(thetaDot), brake=False, block=False)
# Note current angle error and last motor speed
thetaDotPrev = thetaDot
angleError = abs(targetAngleDegrees - self.armAngleDegX)
stepDuration = time.time() - loopStartTime
avgStepDuration += stepDuration
if idx % logEvery == 0:
avgStepDuration /= logEvery
infoLogString = f'Step {idx+1} - Avg. Loop : {avgStepDuration:.3f}s => Position/Target/Error = ' \
f'{self.armAngleDegX:.2f}/{targetAngleDegrees:.2f}/{angleError:.2f}'
log.debug(LOGGER_LINK_MTA_INSTANCE, infoLogString)
avgStepDuration = 0
idx += 1
# Stop the motor at this point - and hold position
log.debug(LOGGER_LINK_MTA_INSTANCE, f'Final position = {self.armAngleDegX:.2f} deg - set point is {targetAngleDegrees:.2f} deg')
except OutOfBoundAngle as error:
log.error(LOGGER_LINK_MTA_INSTANCE, f'Asked for an angle for this link out of bounds - asked : {error.targetAngle}, limits = [{degrees(self.angleLimit[0]):.2f}, {degrees(self.angleLimit[1]):.2f}]. Reason = {error.reason}')
except MotorRunningFastException as error:
log.error(LOGGER_LINK_MTA_INSTANCE, "Motor {} running too fast ({}) - exiting and stopping motor".format(error.motorName, error.speed))
raise
except ControlerRunningTooLongException as error:
log.error(LOGGER_LINK_MTA_INSTANCE, "MoveToAngle routine running for too long. Something is likely wrong.")
log.error(LOGGER_LINK_MTA_INSTANCE, "Detailed Error Message = {}".format(error.reason))
except MotorUrgentStopException as error:
log.error(LOGGER_LINK_MTA_INSTANCE, "Urgent Motor Stop called. {}".format(error.motorName))
self.resetLink()
except:
log.error(LOGGER_LINK_MTA_INSTANCE, 'Error : {}'.format(traceback.print_exc()))
raise
finally:
self.motor.stop(stop_action=self.stopAction)
return
|
# -*- coding: utf-8 -*-
'''
In-memory caching used by Salt
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import salt libs
import salt.config
import salt.payload
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.msgpack
# Import third party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.utils.zeromq import zmq
log = logging.getLogger(__name__)
class CacheFactory(object):
'''
Cache which can use a number of backends
'''
@classmethod
def factory(cls, backend, ttl, *args, **kwargs):
log.info('Factory backend: %s', backend)
if backend == 'memory':
return CacheDict(ttl, *args, **kwargs)
elif backend == 'disk':
return CacheDisk(ttl, kwargs['minion_cache_path'], *args, **kwargs)
else:
log.error('CacheFactory received unrecognized cache type')
class CacheDict(dict):
'''
Subclass of dict that will lazily delete items past ttl
'''
def __init__(self, ttl, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self._ttl = ttl
self._key_cache_time = {}
def _enforce_ttl_key(self, key):
'''
Enforce the TTL to a specific key, delete if its past TTL
'''
if key not in self._key_cache_time:
return
if time.time() - self._key_cache_time[key] > self._ttl:
del self._key_cache_time[key]
dict.__delitem__(self, key)
def __getitem__(self, key):
'''
Check if the key is ttld out, then do the get
'''
self._enforce_ttl_key(key)
return dict.__getitem__(self, key)
def __setitem__(self, key, val):
'''
Make sure to update the key cache time
'''
self._key_cache_time[key] = time.time()
dict.__setitem__(self, key, val)
def __contains__(self, key):
self._enforce_ttl_key(key)
return dict.__contains__(self, key)
class CacheDisk(CacheDict):
'''
Class that represents itself as a dictionary to a consumer
but uses a disk-based backend. Serialization and de-serialization
is done with msgpack
'''
def __init__(self, ttl, path, *args, **kwargs):
super(CacheDisk, self).__init__(ttl, *args, **kwargs)
self._path = path
self._dict = {}
self._read()
def _enforce_ttl_key(self, key):
'''
Enforce the TTL to a specific key, delete if its past TTL
'''
if key not in self._key_cache_time:
return
if time.time() - self._key_cache_time[key] > self._ttl:
del self._key_cache_time[key]
self._dict.__delitem__(key)
def __contains__(self, key):
self._enforce_ttl_key(key)
return self._dict.__contains__(key)
def __getitem__(self, key):
'''
Check if the key is ttld out, then do the get
'''
self._enforce_ttl_key(key)
return self._dict.__getitem__(key)
def __setitem__(self, key, val):
'''
Make sure to update the key cache time
'''
self._key_cache_time[key] = time.time()
self._dict.__setitem__(key, val)
# Do the same as the parent but also persist
self._write()
def __delitem__(self, key):
'''
Make sure to remove the key cache time
'''
del self._key_cache_time[key]
self._dict.__delitem__(key)
# Do the same as the parent but also persist
self._write()
def _read(self):
'''
Read in from disk
'''
if not salt.utils.msgpack.HAS_MSGPACK or not os.path.exists(self._path):
return
with salt.utils.files.fopen(self._path, 'rb') as fp_:
cache = salt.utils.data.decode(salt.utils.msgpack.load(fp_, encoding=__salt_system_encoding__))
if "CacheDisk_cachetime" in cache: # new format
self._dict = cache["CacheDisk_data"]
self._key_cache_time = cache["CacheDisk_cachetime"]
else: # old format
self._dict = cache
timestamp = os.path.getmtime(self._path)
for key in self._dict:
self._key_cache_time[key] = timestamp
if log.isEnabledFor(logging.DEBUG):
log.debug('Disk cache retrieved: %s', cache)
def _write(self):
'''
Write out to disk
'''
if not salt.utils.msgpack.HAS_MSGPACK:
return
# TODO Add check into preflight to ensure dir exists
# TODO Dir hashing?
with salt.utils.files.fopen(self._path, 'wb+') as fp_:
cache = {
"CacheDisk_data": self._dict,
"CacheDisk_cachetime": self._key_cache_time
}
salt.utils.msgpack.dump(cache, fp_, use_bin_type=True)
class CacheCli(object):
'''
Connection client for the ConCache. Should be used by all
components that need the list of currently connected minions
'''
def __init__(self, opts):
'''
Sets up the zmq-connection to the ConCache
'''
self.opts = opts
self.serial = salt.payload.Serial(self.opts.get('serial', ''))
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
self.cache_upd_sock = os.path.join(
self.opts['sock_dir'], 'con_upd.ipc')
context = zmq.Context()
# the socket for talking to the cache
self.creq_out = context.socket(zmq.REQ)
self.creq_out.setsockopt(zmq.LINGER, 100)
self.creq_out.connect('ipc://' + self.cache_sock)
# the socket for sending updates to the cache
self.cupd_out = context.socket(zmq.PUB)
self.cupd_out.setsockopt(zmq.LINGER, 1)
self.cupd_out.connect('ipc://' + self.cache_upd_sock)
def put_cache(self, minions):
'''
published the given minions to the ConCache
'''
self.cupd_out.send(self.serial.dumps(minions))
def get_cached(self):
'''
queries the ConCache for a list of currently connected minions
'''
msg = self.serial.dumps('minions')
self.creq_out.send(msg)
min_list = self.serial.loads(self.creq_out.recv())
return min_list
class CacheRegex(object):
'''
Create a regular expression object cache for the most frequently
used patterns to minimize compilation of the same patterns over
and over again
'''
def __init__(self, prepend='', append='', size=1000,
keep_fraction=0.8, max_age=3600):
self.prepend = prepend
self.append = append
self.size = size
self.clear_size = int(size - size * (keep_fraction))
if self.clear_size >= size:
self.clear_size = int(size/2) + 1
if self.clear_size > size:
self.clear_size = size
self.max_age = max_age
self.cache = {}
self.timestamp = time.time()
def clear(self):
'''
Clear the cache
'''
self.cache.clear()
def sweep(self):
'''
Sweep the cache and remove the outdated or least frequently
used entries
'''
if self.max_age < time.time() - self.timestamp:
self.clear()
self.timestamp = time.time()
else:
paterns = list(self.cache.values())
paterns.sort()
for idx in range(self.clear_size):
del self.cache[paterns[idx][2]]
def get(self, pattern):
'''
Get a compiled regular expression object based on pattern and
cache it when it is not in the cache already
'''
try:
self.cache[pattern][0] += 1
return self.cache[pattern][1]
except KeyError:
pass
if len(self.cache) > self.size:
self.sweep()
regex = re.compile('{0}{1}{2}'.format(
self.prepend, pattern, self.append))
self.cache[pattern] = [1, regex, pattern, time.time()]
return regex
class ContextCache(object):
def __init__(self, opts, name):
'''
Create a context cache
'''
self.opts = opts
self.cache_path = os.path.join(opts['cachedir'], 'context', '{0}.p'.format(name))
self.serial = salt.payload.Serial(self.opts)
def cache_context(self, context):
'''
Cache the given context to disk
'''
if not os.path.isdir(os.path.dirname(self.cache_path)):
os.mkdir(os.path.dirname(self.cache_path))
with salt.utils.files.fopen(self.cache_path, 'w+b') as cache:
self.serial.dump(context, cache)
def get_cache_context(self):
'''
Retrieve a context cache from disk
'''
with salt.utils.files.fopen(self.cache_path, 'rb') as cache:
return salt.utils.data.decode(self.serial.load(cache))
def context_cache(func):
'''
A decorator to be used module functions which need to cache their
context.
To evaluate a __context__ and re-hydrate it if a given key
is empty or contains no items, pass a list of keys to evaulate.
'''
def context_cache_wrap(*args, **kwargs):
func_context = func.__globals__['__context__']
func_opts = func.__globals__['__opts__']
func_name = func.__globals__['__name__']
context_cache = ContextCache(func_opts, func_name)
if not func_context and os.path.isfile(context_cache.cache_path):
salt.utils.dictupdate.update(func_context, context_cache.get_cache_context())
else:
context_cache.cache_context(func_context)
return func(*args, **kwargs)
return context_cache_wrap
# test code for the CacheCli
if __name__ == '__main__':
opts = salt.config.master_config('/etc/salt/master')
ccli = CacheCli(opts)
ccli.put_cache(['test1', 'test10', 'test34'])
ccli.put_cache(['test12'])
ccli.put_cache(['test18'])
ccli.put_cache(['test21'])
print('minions: {0}'.format(ccli.get_cached()))
|
"""Sparse Dtype"""
import re
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
import warnings
import numpy as np
from pandas._typing import Dtype, DtypeObj
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool_dtype,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
if TYPE_CHECKING:
from pandas.core.arrays.sparse.array import SparseArray # noqa: F401
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
Attributes
----------
None
Methods
-------
None
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None):
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype("object")
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError(f"fill_value must be a scalar. Got {fill_value} instead")
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super().__hash__()
def __eq__(self, other: Any) -> bool:
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value
and isinstance(self.fill_value, type(other.fill_value))
or isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
return isna(self.fill_value)
@property
def _is_numeric(self) -> bool:
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self) -> bool:
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> Type["SparseArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.sparse.array import SparseArray # noqa: F811
return SparseArray
@classmethod
def construct_from_string(cls, string: str) -> "SparseDtype":
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg)
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype: str) -> Tuple[str, bool]:
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype: object) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str) and dtype.startswith("Sparse"):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == "Sparse"
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if is_extension_array_dtype(dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
<class 'str'>
"""
if isinstance(self.fill_value, str):
return type(self.fill_value)
return self.subtype
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# TODO for now only handle SparseDtypes and numpy dtypes => extend
# with other compatibtle extension dtypes
if any(
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
for x in dtypes
):
return None
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn(
"Concatenating sparse arrays with multiple fill "
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=6,
)
np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)
|
from random import randint
from utils import http
from utils.endpoint import Endpoint, setup
from utils.perspective import box_resize
from utils.glitch import soft_glitch
@setup
class SoftGlitch(Endpoint):
def generate(self, kwargs):
image_url = kwargs['image']
img = http.get_image(image_url)
img = box_resize(img, 500)
img = img.convert('RGBA')
img = soft_glitch(img, glitch_amount=randint(3, 5), color_offset=True, scan_lines=True)
return self.send_file(img)
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import re
import socket
from .__init__ import MACOS, ANYWIN
from .util import chkcmd
class TcpSrv(object):
"""
tcplistener which forwards clients to Hub
which then uses the least busy HttpSrv to handle it
"""
def __init__(self, hub):
self.hub = hub
self.args = hub.args
self.log = hub.log
self.stopping = False
ip = "127.0.0.1"
eps = {ip: "local only"}
nonlocals = [x for x in self.args.i if x != ip]
if nonlocals:
eps = self.detect_interfaces(self.args.i)
if not eps:
for x in nonlocals:
eps[x] = "external"
msgs = []
m = "available @ http://{}:{}/ (\033[33m{}\033[0m)"
for ip, desc in sorted(eps.items(), key=lambda x: x[1]):
for port in sorted(self.args.p):
msgs.append(m.format(ip, port, desc))
if msgs:
msgs[-1] += "\n"
for m in msgs:
self.log("tcpsrv", m)
self.srv = []
for ip in self.args.i:
for port in self.args.p:
self.srv.append(self._listen(ip, port))
def _listen(self, ip, port):
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
srv.bind((ip, port))
return srv
except (OSError, socket.error) as ex:
if ex.errno in [98, 48]:
e = "\033[1;31mport {} is busy on interface {}\033[0m".format(port, ip)
elif ex.errno in [99, 49]:
e = "\033[1;31minterface {} does not exist\033[0m".format(ip)
else:
raise
raise Exception(e)
def run(self):
for srv in self.srv:
srv.listen(self.args.nc)
ip, port = srv.getsockname()
fno = srv.fileno()
msg = "listening @ {}:{} f{}".format(ip, port, fno)
self.log("tcpsrv", msg)
if self.args.q:
print(msg)
self.hub.broker.put(False, "listen", srv)
def shutdown(self):
self.stopping = True
try:
for srv in self.srv:
srv.close()
except:
pass
self.log("tcpsrv", "ok bye")
def ips_linux(self):
eps = {}
try:
txt, _ = chkcmd(["ip", "addr"])
except:
return eps
r = re.compile(r"^\s+inet ([^ ]+)/.* (.*)")
for ln in txt.split("\n"):
try:
ip, dev = r.match(ln.rstrip()).groups()
eps[ip] = dev
except:
pass
return eps
def ips_macos(self):
eps = {}
try:
txt, _ = chkcmd(["ifconfig"])
except:
return eps
rdev = re.compile(r"^([^ ]+):")
rip = re.compile(r"^\tinet ([0-9\.]+) ")
dev = None
for ln in txt.split("\n"):
m = rdev.match(ln)
if m:
dev = m.group(1)
m = rip.match(ln)
if m:
eps[m.group(1)] = dev
dev = None
return eps
def ips_windows_ipconfig(self):
eps = {}
try:
txt, _ = chkcmd(["ipconfig"])
except:
return eps
rdev = re.compile(r"(^[^ ].*):$")
rip = re.compile(r"^ +IPv?4? [^:]+: *([0-9\.]{7,15})$")
dev = None
for ln in txt.replace("\r", "").split("\n"):
m = rdev.match(ln)
if m:
dev = m.group(1).split(" adapter ", 1)[-1]
m = rip.match(ln)
if m and dev:
eps[m.group(1)] = dev
dev = None
return eps
def ips_windows_netsh(self):
eps = {}
try:
txt, _ = chkcmd("netsh interface ip show address".split())
except:
return eps
rdev = re.compile(r'.* "([^"]+)"$')
rip = re.compile(r".* IP\b.*: +([0-9\.]{7,15})$")
dev = None
for ln in txt.replace("\r", "").split("\n"):
m = rdev.match(ln)
if m:
dev = m.group(1)
m = rip.match(ln)
if m and dev:
eps[m.group(1)] = dev
dev = None
return eps
def detect_interfaces(self, listen_ips):
if MACOS:
eps = self.ips_macos()
elif ANYWIN:
eps = self.ips_windows_ipconfig() # sees more interfaces
eps.update(self.ips_windows_netsh()) # has better names
else:
eps = self.ips_linux()
if "0.0.0.0" not in listen_ips:
eps = {k: v for k, v in eps.items() if k in listen_ips}
default_route = None
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in [
"10.255.255.255",
"172.31.255.255",
"192.168.255.255",
"239.255.255.255",
# could add 1.1.1.1 as a final fallback
# but external connections is kinshi
]:
try:
s.connect((ip, 1))
# raise OSError(13, "a")
default_route = s.getsockname()[0]
break
except (OSError, socket.error) as ex:
if ex.errno == 13:
self.log("tcpsrv", "eaccess {} (trying next)".format(ip))
elif ex.errno not in [101, 10065, 10051]:
self.log("tcpsrv", "route lookup failed; err {}".format(ex.errno))
s.close()
for lip in listen_ips:
if default_route and lip in ["0.0.0.0", default_route]:
desc = "\033[32mexternal"
try:
eps[default_route] += ", " + desc
except:
eps[default_route] = desc
return eps
|
# Generated by Django 3.1.7 on 2021-03-04 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Pages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='Esalary',
field=models.DecimalField(decimal_places=2, max_digits=5),
),
]
|
from pprint import pprint
from ttp import ttp
import json
import time
from netmiko import ConnectHandler
ssh = {
'device_type': 'alcatel_sros',
'ip': '135.243.92.119',
'username': 'admin',
'password': 'admin',
'port': '22'
}
print ('Connection successful')
net_connect = ConnectHandler(**ssh)
output = net_connect.send_command('show port detail')
#print (output)
parser = ttp(data=data_to_parse, template=ttp_template)
parser.parse()
results = parser.result(format='json')[0]
#converting str to json.
result = json.loads(results)
#print(result[0][1]['Port_Number'])
#print(len(result[0]))
i = 0
while i < len(result[0]):
# print(result[0][i]['Port_Number'])
if "Port_Number" in result[0][i] and "Utilization_Input" in result[0][i] and "Utilization_Output" in result[0][i]:
print(f"{result[0][i]['Port_Number']} --> Utilization Input degeri : {result[0][i]['Utilization_Input']} Utilization Output degeri: {result[0][i]['Utilization_Output']}")
i = i + 1
|
import metallurgy as mg
def test_melting_temperature():
assert mg.linear_mixture({"Cu": 0.5, "Zr": 0.5},
"melting_temperature") == 1742.885
assert mg.linear_mixture([{"Cu": 0.5, "Zr": 0.5},
{"Cu": 0.25, "Zr": 0.75}],
"melting_temperature") == [1742.885, 1935.4425]
|
"Run all tests."
from test_root import Root
from test_about import About
from test_user import User
if __name__ == '__main__':
import base
base.run()
|
# The Integrity Verification Proxy (IVP) additions are ...
#
# Copyright (c) 2012 The Pennsylvania State University
# Systems and Internet Infrastructure Security Laboratory
#
# they were developed by:
#
# Joshua Schiffman <jschiffm@cse.psu.edu>
# Hayawardh Vijayakumar <huv101@cse.psu.edu>
# Trent Jaeger <tjaeger@cse.psu.edu>
#
# Unless otherwise noted, all code additions are ...
#
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
"""
VM Integrity Monitor
Filename: ivc.py
Author: Joshua Schiffman <jschiffm@cse.psu.edu>
Description: VM Integrity Monitor for Integrity Verified Channels (IVC) project.
This server accepts connections from the IKE daemon to register
client criteria and sends a accept / reject notice on registration
if the conditions are met. The server also flushes the client's
integrity association (IA) when its conditions are violated.
The monitor registers integrity monitoring modules that are
specified in its local configuration file "modules.cfg". Modules
subclass the Introspection_Module and have three functions:
Initialize, Callback, and Check. The modules can be registered on
two different hooks, HOOK_LOADPARAMS and
HOOK_WATCHPOINT_TRIGGER.
"""
import subprocess
from util import *
from ConfigParser import ConfigParser
CONF_FILE = "cfg/monitor.cfg"
if __name__ == "__main__":
cfg = ConfigParser()
cfg.read(CONF_FILE)
server = vmctl.VMServer(cfg)
server.register_introspection_functions()
try:
server.serve_forever()
except KeyboardInterrupt:
exit()
|
#!/usr/bin/env python
import pathlib
from setuptools import find_packages, setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="DRUGpy",
version="1.1.0",
description="Some PyMOL utilities",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/labimm/DRUGpy",
author="Pedro Sousa Lacerda",
author_email="pslacerda@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Environment :: Plugins",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(),
install_requires=[
"lxml",
"pandas",
"scipy",
"requests",
"cached_property",
"matplotlib",
"seaborn",
"jinja2",
],
)
|
from os.path import realpath
from whisk.project import Project
project = Project.from_module(realpath(__file__))
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './htc_hrnetv2p_w32_20e_coco.py'
model = dict(
pretrained='open-mmlab://msra/hrnetv2_w40',
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320)))),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
"""
Adds support for generic thermostat units.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.generic_thermostat/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.components.climate import (
STATE_HEAT, STATE_COOL, STATE_IDLE, STATE_AUTO, ClimateDevice,
ATTR_OPERATION_MODE, ATTR_AWAY_MODE, SUPPORT_OPERATION_MODE,
SUPPORT_AWAY_MODE, SUPPORT_TARGET_TEMPERATURE, PLATFORM_SCHEMA)
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT, STATE_ON, STATE_OFF, ATTR_TEMPERATURE,
CONF_NAME, ATTR_ENTITY_ID, SERVICE_TURN_ON, SERVICE_TURN_OFF,
STATE_UNKNOWN)
from homeassistant.helpers import condition
from homeassistant.helpers.event import (
async_track_state_change, async_track_time_interval)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import async_get_last_state
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['switch', 'sensor']
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = 'Generic Thermostat'
CONF_HEATER = 'heater'
CONF_SENSOR = 'target_sensor'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
CONF_TARGET_TEMP = 'target_temp'
CONF_AC_MODE = 'ac_mode'
CONF_MIN_DUR = 'min_cycle_duration'
CONF_COLD_TOLERANCE = 'cold_tolerance'
CONF_HOT_TOLERANCE = 'hot_tolerance'
CONF_KEEP_ALIVE = 'keep_alive'
CONF_INITIAL_OPERATION_MODE = 'initial_operation_mode'
CONF_AWAY_TEMP = 'away_temp'
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE |
SUPPORT_OPERATION_MODE)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(
float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(
float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): vol.All(
cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_INITIAL_OPERATION_MODE):
vol.In([STATE_AUTO, STATE_OFF]),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float)
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the generic thermostat platform."""
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_operation_mode = config.get(CONF_INITIAL_OPERATION_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
async_add_devices([GenericThermostat(
hass, name, heater_entity_id, sensor_entity_id, min_temp, max_temp,
target_temp, ac_mode, min_cycle_duration, cold_tolerance,
hot_tolerance, keep_alive, initial_operation_mode, away_temp)])
class GenericThermostat(ClimateDevice):
"""Representation of a Generic Thermostat device."""
def __init__(self, hass, name, heater_entity_id, sensor_entity_id,
min_temp, max_temp, target_temp, ac_mode, min_cycle_duration,
cold_tolerance, hot_tolerance, keep_alive,
initial_operation_mode, away_temp):
"""Initialize the thermostat."""
self.hass = hass
self._name = name
self.heater_entity_id = heater_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._initial_operation_mode = initial_operation_mode
self._saved_target_temp = target_temp if target_temp is not None \
else away_temp
if self.ac_mode:
self._current_operation = STATE_COOL
self._operation_list = [STATE_COOL, STATE_OFF]
else:
self._current_operation = STATE_HEAT
self._operation_list = [STATE_HEAT, STATE_OFF]
if initial_operation_mode == STATE_OFF:
self._enabled = False
self._current_operation = STATE_OFF
else:
self._enabled = True
self._active = False
self._cur_temp = None
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = hass.config.units.temperature_unit
self._support_flags = SUPPORT_FLAGS
if away_temp is not None:
self._support_flags = SUPPORT_FLAGS | SUPPORT_AWAY_MODE
self._away_temp = away_temp
self._is_away = False
async_track_state_change(
hass, sensor_entity_id, self._async_sensor_changed)
async_track_state_change(
hass, heater_entity_id, self._async_switch_changed)
if self._keep_alive:
async_track_time_interval(
hass, self._async_keep_alive, self._keep_alive)
sensor_state = hass.states.get(sensor_entity_id)
if sensor_state and sensor_state.state != STATE_UNKNOWN:
self._async_update_temp(sensor_state)
@asyncio.coroutine
def async_added_to_hass(self):
"""Run when entity about to be added."""
# Check If we have an old state
old_state = yield from async_get_last_state(self.hass,
self.entity_id)
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning("Undefined target temperature,"
"falling back to %s", self._target_temp)
else:
self._target_temp = float(
old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_AWAY_MODE) is not None:
self._is_away = str(
old_state.attributes[ATTR_AWAY_MODE]) == STATE_ON
if (self._initial_operation_mode is None and
old_state.attributes[ATTR_OPERATION_MODE] is not None):
self._current_operation = \
old_state.attributes[ATTR_OPERATION_MODE]
self._enabled = self._current_operation != STATE_OFF
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning("No previously saved temperature, setting to %s",
self._target_temp)
@property
def state(self):
"""Return the current state."""
if self._is_device_active:
return self.current_operation
if self._enabled:
return STATE_IDLE
return STATE_OFF
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def current_operation(self):
"""Return current operation."""
return self._current_operation
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode == STATE_HEAT:
self._current_operation = STATE_HEAT
self._enabled = True
self._async_control_heating()
elif operation_mode == STATE_COOL:
self._current_operation = STATE_COOL
self._enabled = True
self._async_control_heating()
elif operation_mode == STATE_OFF:
self._current_operation = STATE_OFF
self._enabled = False
if self._is_device_active:
self._heater_turn_off()
else:
_LOGGER.error("Unrecognized operation mode: %s", operation_mode)
return
# Ensure we update the current operation after changing the mode
self.schedule_update_ha_state()
@asyncio.coroutine
def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._target_temp = temperature
self._async_control_heating()
yield from self.async_update_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
# pylint: disable=no-member
if self._min_temp:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
# pylint: disable=no-member
if self._max_temp:
return self._max_temp
# Get default temp from super class
return super().max_temp
@asyncio.coroutine
def _async_sensor_changed(self, entity_id, old_state, new_state):
"""Handle temperature changes."""
if new_state is None:
return
self._async_update_temp(new_state)
self._async_control_heating()
yield from self.async_update_ha_state()
@callback
def _async_switch_changed(self, entity_id, old_state, new_state):
"""Handle heater switch state changes."""
if new_state is None:
return
self.async_schedule_update_ha_state()
@callback
def _async_keep_alive(self, time):
"""Call at constant intervals for keep-alive purposes."""
if self._is_device_active:
self._heater_turn_on()
else:
self._heater_turn_off()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
try:
self._cur_temp = self.hass.config.units.temperature(
float(state.state), unit)
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
@callback
def _async_control_heating(self):
"""Check if we need to turn heating on or off."""
if not self._active and None not in (self._cur_temp,
self._target_temp):
self._active = True
_LOGGER.info("Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp, self._target_temp)
if not self._active:
return
if not self._enabled:
return
if self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = STATE_OFF
long_enough = condition.state(
self.hass, self.heater_entity_id, current_state,
self.min_cycle_duration)
if not long_enough:
return
if self.ac_mode:
is_cooling = self._is_device_active
if is_cooling:
too_cold = self._target_temp - self._cur_temp >= \
self._cold_tolerance
if too_cold:
_LOGGER.info("Turning off AC %s", self.heater_entity_id)
self._heater_turn_off()
else:
too_hot = self._cur_temp - self._target_temp >= \
self._hot_tolerance
if too_hot:
_LOGGER.info("Turning on AC %s", self.heater_entity_id)
self._heater_turn_on()
else:
is_heating = self._is_device_active
if is_heating:
too_hot = self._cur_temp - self._target_temp >= \
self._hot_tolerance
if too_hot:
_LOGGER.info("Turning off heater %s",
self.heater_entity_id)
self._heater_turn_off()
else:
too_cold = self._target_temp - self._cur_temp >= \
self._cold_tolerance
if too_cold:
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
self._heater_turn_on()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@callback
def _heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
self.hass.async_add_job(
self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_ON, data))
@callback
def _heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
self.hass.async_add_job(
self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_OFF, data))
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._is_away
def turn_away_mode_on(self):
"""Turn away mode on by setting it on away hold indefinitely."""
self._is_away = True
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
self._async_control_heating()
self.schedule_update_ha_state()
def turn_away_mode_off(self):
"""Turn away off."""
self._is_away = False
self._target_temp = self._saved_target_temp
self._async_control_heating()
self.schedule_update_ha_state()
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtIothubprovisioningservices(PythonPackage):
"""Microsoft Azure IoTHub Provisioning Services Client Library for Python.
"""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-iothubprovisioningservices/azure-mgmt-iothubprovisioningservices-0.2.0.zip"
version('0.2.0', sha256='8c37acfd1c33aba845f2e0302ef7266cad31cba503cc990a48684659acb7b91d')
depends_on('py-setuptools', type='build')
depends_on('py-msrestazure@0.4.20:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
|
from __future__ import print_function
import json
print('Loading function')
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
return
# print(event) # Echo back the first key value
|
import os
import time
import sys
import glob
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.policy_gradient.pg_agent_config import PolicyGradientAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.config.hp_tuning_config import HpTuningConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util, hp_tuning
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def hp_tuning_config(client_config: ClientConfig) -> ClientConfig:
"""
Setup config for hparam tuning
:param client_config: the client config
:return: the updated client config
"""
client_config.hp_tuning = True
client_config.hp_tuning_config = HpTuningConfig(param_1="alpha", param_2="num_hidden_layers",
alpha=[0.000001, 0.00001, 0.0001, 0.001, 0.01],
num_hidden_layers=[1, 2, 4, 8, 16])
client_config.run_many = False
return client_config
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
pg_agent_config = PolicyGradientAgentConfig(gamma=0.999, alpha_attacker=0.0001, alpha_defender=0.0001,
epsilon=1, render=False, eval_sleep=0.9,
min_epsilon=0.01, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.9999, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/results/videos",
num_episodes=450001,
eval_render=False, gifs=True,
gif_dir=default_output_dir() + "/results/gifs",
eval_frequency=1000, attacker=True, defender=True, video_frequency=101,
save_dir=default_output_dir() + "/results/data",
checkpoint_freq=5000, input_dim_attacker=(4 + 3) * 2,
input_dim_defender=(4+2)*3,
output_dim_attacker=4 * 2,
output_dim_defender=(4+1)*3,
hidden_dim=32,
num_hidden_layers=1, batch_size=16,
gpu=False, tensorboard=True,
tensorboard_dir=default_output_dir() + "/results/tensorboard",
optimizer="Adam", lr_exp_decay=False, lr_decay_rate=0.999,
normalize_features=False, merged_ad_features=True,
zero_mean_features=False, gpu_id=0
)
env_name = "idsgame-v16"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.REINFORCE_AGENT.value,
defender_type=AgentType.REINFORCE_AGENT.value,
mode=RunnerMode.TRAIN_DEFENDER_AND_ATTACKER.value,
pg_agent_config=pg_agent_config, output_dir=default_output_dir(),
title="REINFORCE vs REINFORCE",
run_many=False, random_seeds=[0, 999, 299, 399, 499])
#client_config = hp_tuning_config(client_config)
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str, random_seed : int = 0) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:param random_seed: the random seed of the experiment
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path, config.pg_agent_config.train_log_frequency,
config.pg_agent_config.eval_frequency, config.pg_agent_config.eval_log_frequency,
config.pg_agent_config.eval_episodes, config.output_dir, sim=False,
random_seed = random_seed)
def plot_average_results(experiment_title :str, config: ClientConfig, eval_csv_paths:list,
train_csv_paths: str) -> None:
"""
Plots average results after training with different seeds
:param experiment_title: title of the experiment
:param config: experiment config
:param eval_csv_paths: paths to csv files with evaluation data
:param train_csv_paths: path to csv files with training data
:return: None
"""
plotting_util.read_and_plot_average_results(experiment_title, train_csv_paths, eval_csv_paths,
config.pg_agent_config.train_log_frequency,
config.pg_agent_config.eval_frequency,
config.output_dir,
plot_attacker_loss = True, plot_defender_loss = False)
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
"""
Runs one experiment and saves results and plots
:param configpath: path to configfile
:param noconfig: whether to override config
:return: (train_csv_path, eval_csv_path)
"""
if configpath is not None and not noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir, random_seed)
logger = util.setup_logger("reinforce_vs_reinforce-v16", config.output_dir + "/results/logs/" +
str(random_seed) + "/",
time_str=time_str)
config.pg_agent_config.save_dir = default_output_dir() + "/results/data/" + str(random_seed) + "/"
config.pg_agent_config.video_dir= default_output_dir() + "/results/videos/" + str(random_seed) + "/"
config.pg_agent_config.gif_dir= default_output_dir() + "/results/gifs/" + str(random_seed) + "/"
config.pg_agent_config.tensorboard_dir = default_output_dir() + "/results/tensorboard/" \
+ str(random_seed) + "/"
config.logger = logger
config.pg_agent_config.logger = logger
config.pg_agent_config.random_seed = random_seed
config.random_seed = random_seed
config.pg_agent_config.to_csv(config.output_dir + "/results/hyperparameters/" + str(random_seed) + "/" + time_str + ".csv")
train_csv_path = ""
eval_csv_path = ""
if config.hp_tuning:
hp_tuning.hype_grid(config)
else:
train_result, eval_result = Runner.run(config)
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed)
return train_csv_path, eval_csv_path
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
experiment_title = "REINFORCE vs REINFORCE"
if args.configpath is not None and not args.noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
if args.plotonly:
base_dir = default_output_dir() + "/results/data/"
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path = glob.glob(base_dir + str(seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(seed) + "/*_eval.csv")[0]
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed=seed)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
else:
if not config.run_many:
run_experiment(args.configpath, 0, args.noconfig)
else:
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path, eval_csv_path = run_experiment(args.configpath, seed, args.noconfig)
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
|
print("Installing dependency modules.")
system("pip3 install -r requirements.txt")
|
from flask import render_template, request
from back.mongo.data.collect.clients import valid_client
def register_500_error_route(app):
@app.errorhandler(500)
def internal_server_error(error):
data = {"plot": {"type": "500"}, "code": 500, "message": "Internal Server Error"}
if "id" in request.cookies: data["client"] = valid_client(request.cookies.get("id"))
return render_template("tree/errors/_500_/page.html", data=data)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Userservice manages user account creation, user login, and related tasks
"""
import atexit
from datetime import datetime, timedelta
import logging
import os
from pathlib import Path
import sys
import re
import bcrypt
import jwt
from flask import Flask, jsonify, request
import bleach
from sqlalchemy.exc import OperationalError, SQLAlchemyError
from db import UserDb
from opentelemetry import trace
import tracing
from tracing import OpenTelemetryConfiguration
from flask_management_endpoints import ManagementEndpoints
APP_NAME = 'userservice'
def create_app():
"""Flask application factory to create instances
of the Userservice Flask App
"""
app = Flask('userservice')
@app.route('/users', methods=['POST'])
def create_user():
"""Create a user record.
Fails if that username already exists.
Generates a unique accountid.
request fields:
- username
- password
- password-repeat
- firstname
- lastname
- birthday
- timezone
- address
- state
- zip
- ssn
"""
span = trace.get_current_span()
try:
app.logger.debug('Sanitizing input')
req = {k: bleach.clean(v) for k, v in request.form.items()}
if 'username' in req:
span.set_attribute('username', req['username'])
__validate_new_user(req)
# Check if user already exists
if users_db.get_user(req['username']) is not None:
raise NameError(f"user {req['username']} already exists")
# Create password hash with salt
app.logger.debug("Creating password hash.")
password = req['password']
salt = bcrypt.gensalt()
passhash = bcrypt.hashpw(password.encode('utf-8'), salt)
accountid = users_db.generate_accountid()
# Create user data to be added to the database
user_data = {
'accountid': accountid,
'username': req['username'],
'passhash': passhash,
'firstname': req['firstname'],
'lastname': req['lastname'],
'birthday': req['birthday'],
'timezone': req['timezone'],
'address': req['address'],
'state': req['state'],
'zip': req['zip'],
'ssn': req['ssn'],
}
# Add user_data to database
app.logger.debug("Adding user to the database")
users_db.add_user(user_data)
app.logger.info("Successfully created user")
except UserWarning as warn:
app.logger.error("Error creating new user: %s", str(warn))
span.add_event(str(warn))
return str(warn), 400
except NameError as err:
app.logger.error("Error creating new user: %s", str(err))
span.record_exception(err)
return str(err), 409
except SQLAlchemyError as err:
app.logger.error("Error creating new user: %s", str(err))
span.record_exception(err)
return 'failed to create user', 500
return jsonify({}), 201
def __validate_new_user(req):
app.logger.debug('validating create user request: %s', str(req))
# Check if required fields are filled
fields = (
'username',
'password',
'password-repeat',
'firstname',
'lastname',
'birthday',
'timezone',
'address',
'state',
'zip',
'ssn',
)
if any(f not in req for f in fields):
raise UserWarning('missing required field(s)')
if any(not bool(req[f] or req[f].strip()) for f in fields):
raise UserWarning('missing value for input field(s)')
# Verify username contains only 2-15 alphanumeric or underscore characters
if not re.match(r"\A[a-zA-Z0-9_]{2,15}\Z", req['username']):
raise UserWarning('username must contain 2-15 alphanumeric characters or underscores')
# Check if passwords match
if not req['password'] == req['password-repeat']:
raise UserWarning('passwords do not match')
@app.route('/login', methods=['GET'])
def login():
"""Login a user and return a JWT token
Fails if username doesn't exist or password doesn't match hash
token expiry time determined by environment variable
request fields:
- username
- password
"""
span = trace.get_current_span()
app.logger.debug('Sanitizing login input')
username = bleach.clean(request.args.get('username'))
span.set_attribute('username', username)
password = bleach.clean(request.args.get('password'))
# Get user data
try:
app.logger.debug('Getting the user data')
user = users_db.get_user(username)
if user is None:
raise LookupError(f"user {username} does not exist")
# Validate the password
app.logger.debug('Validating the password')
if not bcrypt.checkpw(password.encode('utf-8'), user['passhash']):
raise PermissionError('invalid login')
full_name = f"{user['firstname']} {user['lastname']}"
exp_time = datetime.utcnow() + timedelta(seconds=app.config['EXPIRY_SECONDS'])
payload = {
'user': username,
'acct': user['accountid'],
'name': full_name,
'iat': datetime.utcnow(),
'exp': exp_time,
}
app.logger.debug('Creating jwt token.')
token = jwt.encode(payload, app.config['PRIVATE_KEY'], algorithm='RS256')
app.logger.info('Login Successful')
return jsonify({'token': token.decode("utf-8")}), 200
except LookupError as err:
app.logger.error('Error logging in: %s', str(err))
span.record_exception(err)
return str(err), 404
except PermissionError as err:
app.logger.error('Error logging in: %s', str(err))
span.record_exception(err)
return str(err), 401
except SQLAlchemyError as err:
app.logger.error('Error logging in: %s', str(err))
span.record_exception(err)
return 'failed to retrieve user information', 500
@atexit.register
def _shutdown():
"""Executed when web app is terminated."""
app.logger.info("Stopping userservice.")
# Set up logger
app.logger.handlers = logging.getLogger('gunicorn.error').handlers
app.logger.setLevel(logging.getLogger('gunicorn.error').level)
app.logger.info('Starting userservice.')
app.config['VERSION'] = os.environ.get('VERSION')
app.config['EXPIRY_SECONDS'] = int(os.environ.get('TOKEN_EXPIRY_SECONDS'))
private_key_path = os.environ.get('PRIV_KEY_PATH')
if private_key_path:
app.config['PRIVATE_KEY'] = Path(private_key_path).read_text(encoding='ascii')
public_key_path = os.environ.get('PUB_KEY_PATH')
if os.environ.get('PUB_KEY_PATH'):
app.config['PUBLIC_KEY'] = Path(public_key_path).read_text(encoding='ascii')
# Configure database connection
try:
users_db = UserDb(os.environ.get("ACCOUNTS_DB_URI"), app.logger)
except OperationalError:
app.logger.critical("users_db database connection failed")
sys.exit(1)
# Set up tracing and export spans to Open Telemetry
if tracing.config:
tracing.config.instrument_app(app)
# Setup health checks and management endpoints
ManagementEndpoints(app)
def db_check():
try:
engine = users_db.engine
result = engine.execute('SELECT 1')
return result.first()[0] == 1
except SQLAlchemyError as err:
app.logger.error(f'DB health check failed: {err}')
return False
app.config.update(
Z_ENDPOINTS={
'check_functions': {
'readiness': {
'db': db_check
}
}
}
)
return app
if __name__ == "__main__":
if not tracing.config:
tracing.config = OpenTelemetryConfiguration(APP_NAME)
tracing.config.setup_exporter()
# Create an instance of flask server when called directly
USERSERVICE = create_app()
USERSERVICE.run(port=os.getenv('FLASK_RUN_PORT', 5001))
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLConfig, CTRLLMHeadModel, CTRLModel
class CTRLModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
@require_torch
class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()
all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()
test_pruning = True
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class CTRLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_ctrl(self):
model = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(torch_device)
input_ids = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device
) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
|
import os, sys
from read import VINTFile
abspath = os.path.abspath(sys.argv[0])
dname = os.path.dirname(abspath)
os.chdir(dname)
file=VINTFile("test.vmf")
file.parse()
print()
|
import numpy as np # type: ignore
import pandas as pd # type: ignore
import struct
from typing import cast, Iterable, Optional, Union
from typeguard import typechecked
from arkouda.client import generic_msg
from arkouda.dtypes import *
from arkouda.dtypes import structDtypeCodes, NUMBER_FORMAT_STRINGS
from arkouda.dtypes import dtype as akdtype
from arkouda.pdarrayclass import pdarray, create_pdarray
from arkouda.strings import Strings
from arkouda.strings import SArrays
from multipledispatch import dispatch
__all__ = ["array", "zeros", "ones", "zeros_like", "ones_like", "arange",
"linspace", "randint", "uniform", "standard_normal",
"random_strings_uniform", "random_strings_lognormal", "from_series",
"suffix_array"]
numericDTypes = frozenset(["bool", "int64", "float64"])
RANDINT_TYPES = {'int64','float64'}
series_dtypes = {'string' : np.str_,
"<class 'str'>" : np.str_,
'int64' : np.int64,
"<class 'numpy.int64'>" : np.int64,
'float64' : np.float64,
"<class 'numpy.float64'>" : np.float64,
'bool' : np.bool,
"<class 'bool'>" : np.bool,
'datetime64[ns]' : np.int64
}
@typechecked
def from_series(series : pd.Series, dtype : Optional[type]=None) -> Union[pdarray,Strings]:
"""
Converts a Pandas Series to an Arkouda pdarray or Strings object. If
dtype is None, the dtype is inferred from the Pandas Series. Otherwise,
the dtype parameter is set if the dtype of the Pandas Series is to be overridden or is
unknown (for example, in situations where the Series dtype is object).
Parameters
----------
series : Pandas Series
The Pandas Series with a dtype of bool, float64, int64, or string
dtype : Optional[type]
The valid dtype types are np.bool, np.float64, np.int64, and np.str
Returns
-------
Union[pdarray,Strings]
Raises
------
TypeError
Raised if series is not a Pandas Series object
ValueError
Raised if the Series dtype is not bool, float64, int64, string, or datetime
Examples
--------
>>> ak.from_series(pd.Series(np.random.randint(0,10,5)))
array([9, 0, 4, 7, 9])
>>> ak.from_series(pd.Series(['1', '2', '3', '4', '5']),dtype=np.int64)
array([1, 2, 3, 4, 5])
>>> ak.from_series(pd.Series(np.random.uniform(low=0.0,high=1.0,size=3)))
array([0.57600036956445599, 0.41619265571741659, 0.6615356693784662])
>>> ak.from_series(pd.Series(['0.57600036956445599', '0.41619265571741659',
'0.6615356693784662']), dtype=np.float64)
array([0.57600036956445599, 0.41619265571741659, 0.6615356693784662])
>>> ak.from_series(pd.Series(np.random.choice([True, False],size=5)))
array([True, False, True, True, True])
>>> ak.from_series(pd.Series(['True', 'False', 'False', 'True', 'True']), dtype=np.bool)
array([True, True, True, True, True])
>>> ak.from_series(pd.Series(['a', 'b', 'c', 'd', 'e'], dtype="string"))
array(['a', 'b', 'c', 'd', 'e'])
>>> ak.from_series(pd.Series(['a', 'b', 'c', 'd', 'e']),dtype=np.str)
array(['a', 'b', 'c', 'd', 'e'])
>>> ak.from_series(pd.Series(pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01')])))
array([1514764800000000000, 1514764800000000000])
Notes
-----
The supported datatypes are bool, float64, int64, string, and datetime64[ns],which are
either inferred from the the Pandas Series or is set via the dtype parameter.
Series of datetime are converted to Arkouda arrays of dtype int64 (date in milliseconds)
"""
if not dtype:
dt = series.dtype.name
else:
dt = str(dtype)
try:
n_array = series.to_numpy(dtype=series_dtypes[dt])
except KeyError:
raise ValueError(('dtype {} is unsupported. Supported dtypes are bool, ' +
'float64, int64, string, and datetime64[ns]').format(dt))
return array(n_array)
def array(a : Union[pdarray,np.ndarray, Iterable]) -> Union[pdarray, Strings]:
"""
Convert an iterable to a pdarray or Strings object, sending the corresponding
data to the arkouda server.
Parameters
----------
a : Union[pdarray, np.ndarray]
Rank-1 array of a supported dtype
Returns
-------
pdarray or Strings
A pdarray instance stored on arkouda server or Strings instance, which
is composed of two pdarrays stored on arkouda server
Raises
------
TypeError
Raised if a is not a pdarray, np.ndarray, or Python Iterable such as a
list, array, tuple, or deque
RuntimeError
If a is not one-dimensional, nbytes > maxTransferBytes, a.dtype is
not supported (not in DTypes), or if the product of a size and
a.itemsize > maxTransferBytes
See Also
--------
pdarray.to_ndarray
Notes
-----
The number of bytes in the input array cannot exceed `arkouda.maxTransferBytes`,
otherwise a RuntimeError will be raised. This is to protect the user
from overwhelming the connection between the Python client and the arkouda
server, under the assumption that it is a low-bandwidth connection. The user
may override this limit by setting ak.maxTransferBytes to a larger value,
but should proceed with caution.
If the pdrray or ndarray is of type U, this method is called twice recursively
to create the Strings object and the two corresponding pdarrays for string
bytes and offsets, respectively.
Examples
--------
>>> a = [3, 5, 7]
>>> b = ak.array(a)
>>> b
array([3, 5, 7])
>>> type(b)
arkouda.pdarray
"""
# If a is already a pdarray, do nothing
if isinstance(a, pdarray):
return a
from arkouda.client import maxTransferBytes
# If a is not already a numpy.ndarray, convert it
if not isinstance(a, np.ndarray):
try:
a = np.array(a)
except:
raise TypeError(('a must be a pdarray, np.ndarray, or convertible to' +
' a numpy array'))
# Only rank 1 arrays currently supported
if a.ndim != 1:
raise RuntimeError("Only rank-1 pdarrays or ndarrays supported")
# Check if array of strings
if a.dtype.kind == 'U' or 'U' in a.dtype.kind:
encoded = np.array([elem.encode() for elem in a])
# Length of each string, plus null byte terminator
lengths = np.array([len(elem) for elem in encoded]) + 1
# Compute zero-up segment offsets
offsets = np.cumsum(lengths) - lengths
# Allocate and fill bytes array with string segments
nbytes = offsets[-1] + lengths[-1]
if nbytes > maxTransferBytes:
raise RuntimeError(("Creating pdarray would require transferring {} bytes," +
" which exceeds allowed transfer size. Increase " +
"ak.maxTransferBytes to force.").format(nbytes))
values = np.zeros(nbytes, dtype=np.uint8)
for s, o in zip(encoded, offsets):
for i, b in enumerate(s):
values[o+i] = b
# Recurse to create pdarrays for offsets and values, then return Strings object
return Strings(array(offsets), array(values))
# If not strings, then check that dtype is supported in arkouda
if a.dtype.name not in DTypes:
raise RuntimeError("Unhandled dtype {}".format(a.dtype))
# Do not allow arrays that are too large
size = a.size
if (size * a.itemsize) > maxTransferBytes:
raise RuntimeError(("Array exceeds allowed transfer size. Increase " +
"ak.maxTransferBytes to allow"))
# Pack binary array data into a bytes object with a command header
# including the dtype and size
fmt = ">{:n}{}".format(size, structDtypeCodes[a.dtype.name])
req_msg = "array {} {:n} ".\
format(a.dtype.name, size).encode() + struct.pack(fmt, *a)
repMsg = generic_msg(req_msg, send_bytes=True)
return create_pdarray(cast(str,repMsg))
def zeros(size : int, dtype : type=np.float64) -> pdarray:
"""
Create a pdarray filled with zeros.
Parameters
----------
size : int
Size of the array (only rank-1 arrays supported)
dtype : {float64, int64, bool}
Type of resulting array, default float64
Returns
-------
pdarray
Zeros of the requested size and dtype
Raises
------
TypeError
Raised if the supplied dtype is not supported or if the size
parameter is neither an int nor a str that is parseable to an int.
See Also
--------
ones, zeros_like
Examples
--------
>>> ak.zeros(5, dtype=ak.int64)
array([0, 0, 0, 0, 0])
>>> ak.zeros(5, dtype=ak.float64)
array([0, 0, 0, 0, 0])
>>> ak.zeros(5, dtype=ak.bool)
array([False, False, False, False, False])
"""
if not np.isscalar(size):
raise TypeError("size must be a scalar, not {}".\
format(size.__class__.__name__))
dtype = akdtype(dtype) # normalize dtype
# check dtype for error
if cast(np.dtype,dtype).name not in numericDTypes:
raise TypeError("unsupported dtype {}".format(dtype))
kind, itemsize = translate_np_dtype(dtype)
repMsg = generic_msg("create {} {}".format(cast(np.dtype,dtype).name, size))
return create_pdarray(cast(str, repMsg))
def ones(size : int, dtype : type=float64) -> pdarray:
"""
Create a pdarray filled with ones.
Parameters
----------
size : int
Size of the array (only rank-1 arrays supported)
dtype : {float64, int64, bool}
Resulting array type, default float64
Returns
-------
pdarray
Ones of the requested size and dtype
Raises
------
TypeError
Raised if the supplied dtype is not supported or if the size
parameter is neither an int nor a str that is parseable to an int.
See Also
--------
zeros, ones_like
Examples
--------
>>> ak.ones(5, dtype=ak.int64)
array([1, 1, 1, 1, 1])
>>> ak.ones(5, dtype=ak.float64)
array([1, 1, 1, 1, 1])
>>> ak.ones(5, dtype=ak.bool)
array([True, True, True, True, True])
"""
if not np.isscalar(size):
raise TypeError("size must be a scalar, not {}".\
format(size.__class__.__name__))
dtype = akdtype(dtype) # normalize dtype
# check dtype for error
if cast(np.dtype,dtype).name not in numericDTypes:
raise TypeError("unsupported dtype {}".format(dtype))
kind, itemsize = translate_np_dtype(dtype)
repMsg = generic_msg("create {} {}".format(cast(np.dtype,dtype).name, size))
a = create_pdarray(cast(str,repMsg))
a.fill(1)
return a
@typechecked
def zeros_like(pda : pdarray) -> pdarray:
"""
Create a zero-filled pdarray of the same size and dtype as an existing
pdarray.
Parameters
----------
pda : pdarray
Array to use for size and dtype
Returns
-------
pdarray
Equivalent to ak.zeros(pda.size, pda.dtype)
Raises
------
TypeError
Raised if the pda parameter is not a pdarray.
See Also
--------
zeros, ones_like
Examples
--------
>>> zeros = ak.zeros(5, dtype=ak.int64)
>>> ak.zeros_like(zeros)
array([0, 0, 0, 0, 0])
>>> zeros = ak.zeros(5, dtype=ak.float64)
>>> ak.zeros_like(zeros)
array([0, 0, 0, 0, 0])
>>> zeros = ak.zeros(5, dtype=ak.bool)
>>> ak.zeros_like(zeros)
array([False, False, False, False, False])
"""
return zeros(pda.size, pda.dtype)
@typechecked
def ones_like(pda : pdarray) -> pdarray:
"""
Create a one-filled pdarray of the same size and dtype as an existing
pdarray.
Parameters
----------
pda : pdarray
Array to use for size and dtype
Returns
-------
pdarray
Equivalent to ak.ones(pda.size, pda.dtype)
Raises
------
TypeError
Raised if the pda parameter is not a pdarray.
See Also
--------
ones, zeros_like
Notes
-----
Logic for generating the pdarray is delegated to the ak.ones method.
Accordingly, the supported dtypes match are defined by the ak.ones method.
Examples
--------
>>> ones = ak.ones(5, dtype=ak.int64)
>>> ak.ones_like(ones)
array([1, 1, 1, 1, 1])
>>> ones = ak.ones(5, dtype=ak.float64)
>>> ak.ones_like(ones)
array([1, 1, 1, 1, 1])
>>> ones = ak.ones(5, dtype=ak.bool)
>>> ak.ones_like(ones)
array([True, True, True, True, True])
"""
return ones(pda.size, pda.dtype)
def arange(*args) -> pdarray:
"""
arange([start,] stop[, stride])
Create a pdarray of consecutive integers within the interval [start, stop).
If only one arg is given then arg is the stop parameter. If two args are given
then the first arg is start and second is stop. If three args are given
then the first arg is start, second is stop, third is stride.
Parameters
----------
start : int, optional
Starting value (inclusive), the default starting value is 0
stop : int
Stopping value (exclusive)
stride : int, optional
The difference between consecutive elements, the default stride is 1,
if stride is specified then start must also be specified.
Returns
-------
pdarray, int64
Integers from start (inclusive) to stop (exclusive) by stride
Raises
------
TypeError
Raised if start, stop, or stride is not an int object
ZeroDivisionError
Raised if stride == 0
See Also
--------
linspace, zeros, ones, randint
Notes
-----
Negative strides result in decreasing values. Currently, only int64 pdarrays
can be created with this function. For float64 arrays, use linspace.
Examples
--------
>>> ak.arange(0, 5, 1)
array([0, 1, 2, 3, 4])
>>> ak.arange(5, 0, -1)
array([5, 4, 3, 2, 1])
>>> ak.arange(0, 10, 2)
array([0, 2, 4, 6, 8])
"""
#if one arg is given then arg is stop
if len(args) == 1:
start = 0
stop = args[0]
stride = 1
#if two args are given then first arg is start and second is stop
if len(args) == 2:
start = args[0]
stop = args[1]
stride = 1
#if three args are given then first arg is start,
#second is stop, third is stride
if len(args) == 3:
start = args[0]
stop = args[1]
stride = args[2]
if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(stride))):
raise TypeError("all arguments must be scalars")
if stride == 0:
raise ZeroDivisionError("division by zero")
if isinstance(start, int) and isinstance(stop, int) and isinstance(stride, int):
# TO DO: fix bug in server that goes 2 steps too far for negative strides
if stride < 0:
stop = stop + 2
repMsg = generic_msg("arange {} {} {}".format(start, stop, stride))
return create_pdarray(cast(str,repMsg))
else:
raise TypeError("start,stop,stride must be type int {} {} {}".\
format(start,stop,stride))
def linspace(start : int, stop : int, length : int) -> pdarray:
"""
Create a pdarray of linearly-spaced floats in a closed interval.
Parameters
----------
start : scalar
Start of interval (inclusive)
stop : scalar
End of interval (inclusive)
length : int
Number of points
Returns
-------
pdarray, float64
Array of evenly spaced float values along the interval
Raises
------
TypeError
Raised if start or stop is not a scalar or if length is not int
See Also
--------
arange
Notes
-----
If that start is greater than stop, the pdarray values are generated in
descending order.
Examples
--------
>>> ak.linspace(0, 1, 5)
array([0, 0.25, 0.5, 0.75, 1])
>>> ak.linspace(start=1, stop=0, length=5)
array([1, 0.75, 0.5, 0.25, 0])
>>> ak.linspace(start=-5, stop=0, length=5)
array([-5, -3.75, -2.5, -1.25, 0])
"""
if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(length))):
raise TypeError("all arguments must be scalars")
starttype = resolve_scalar_dtype(start)
try:
startstr = NUMBER_FORMAT_STRINGS[starttype].format(start)
except KeyError as ke:
raise TypeError(('The start parameter must be an int or a scalar that' +
' can be parsed to an int, but is a {}'.format(ke)))
stoptype = resolve_scalar_dtype(stop)
try:
stopstr = NUMBER_FORMAT_STRINGS[stoptype].format(stop)
except KeyError as ke:
raise TypeError(('The stop parameter must be an int or a scalar that' +
' can be parsed to an int, but is a {}'.format(ke)))
lentype = resolve_scalar_dtype(length)
if lentype != 'int64':
raise TypeError("The length parameter must be an int64")
try:
lenstr = NUMBER_FORMAT_STRINGS[lentype].format(length)
except KeyError as ke:
raise TypeError(('The length parameter must be an int or a scalar that' +
' can be parsed to an int, but is a {}'.format(ke)))
repMsg = generic_msg("linspace {} {} {}".format(startstr, stopstr, lenstr))
return create_pdarray(cast(str,repMsg))
def randint(low : Union[int,float], high : Union[int,float], size : int, dtype=int64, seed : Union[None, int]=None) -> pdarray:
"""
Generate a pdarray of randomized int, float, or bool values in a specified range.
Parameters
----------
low : Union[int,float]
The low value (inclusive) of the range
high : Union[int,float]
The high value (exclusive for int, inclusive for float) of the range
size : int
The length of the returned array
dtype : {int64, float64, bool}
The dtype of the array
Returns
-------
pdarray
Values drawn uniformly from the specified range having the desired dtype
Raises
------
TypeError
Raised if dtype.name not in DTypes, size is not an int, low or if
not a scalar
ValueError
Raised if size < 0 or if high < low
Notes
-----
Calling randint with dtype=float64 will result in uniform non-integral
floating point values.
Examples
--------
>>> ak.randint(0, 10, 5)
array([5, 7, 4, 8, 3])
>>> ak.randint(0, 1, 3, dtype=ak.float64)
array([0.92176432277231968, 0.083130710959903542, 0.68894208386667544])
>>> ak.randint(0, 1, 5, dtype=ak.bool)
array([True, False, True, True, True])
"""
if not all((np.isscalar(low), np.isscalar(high), np.isscalar(size))):
raise TypeError("all arguments must be scalars")
if resolve_scalar_dtype(size) != 'int64':
raise TypeError("The size parameter must be an integer")
if resolve_scalar_dtype(low) not in RANDINT_TYPES:
raise TypeError("The low parameter must be an integer or float")
if resolve_scalar_dtype(high) not in RANDINT_TYPES:
raise TypeError("The high parameter must be an integer or float")
if size < 0 or high < low:
raise ValueError("size must be > 0 and high > low")
dtype = akdtype(dtype) # normalize dtype
# check dtype for error
if dtype.name not in DTypes:
raise TypeError("unsupported dtype {}".format(dtype))
lowstr = NUMBER_FORMAT_STRINGS[dtype.name].format(low)
highstr = NUMBER_FORMAT_STRINGS[dtype.name].format(high)
sizestr = NUMBER_FORMAT_STRINGS['int64'].format(size)
repMsg = generic_msg("randint {} {} {} {} {}".\
format(sizestr, dtype.name, lowstr, highstr, seed))
return create_pdarray(repMsg)
@typechecked
def uniform(size : int, low : float=0.0, high : float=1.0, seed: Union[None, int]=None) -> pdarray:
"""
Generate a pdarray with uniformly distributed random values
in a specified range.
Parameters
----------
low : float
The low value (inclusive) of the range
high : float
The high value (inclusive) of the range
size : int
The length of the returned array
Returns
-------
pdarray, float64
Values drawn uniformly from the specified range
Raises
------
TypeError
Raised if dtype.name not in DTypes, size is not an int, or if
either low or high is not an int or float
ValueError
Raised if size < 0 or if high < low
Examples
--------
>>> ak.uniform(3)
array([0.92176432277231968, 0.083130710959903542, 0.68894208386667544])
"""
return randint(low=low, high=high, size=size, dtype='float64', seed=seed)
@typechecked
def standard_normal(size : int, seed : Union[None, int]=None) -> pdarray:
"""
Draw real numbers from the standard normal distribution.
Parameters
----------
size : int
The number of samples to draw (size of the returned array)
Returns
-------
pdarray, float64
The array of random numbers
Raises
------
TypeError
Raised if size is not an int
ValueError
Raised if size < 0
See Also
--------
randint
Notes
-----
For random samples from :math:`N(\mu, \sigma^2)`, use:
``(sigma * standard_normal(size)) + mu``
"""
if size < 0:
raise ValueError("The size parameter must be > 0")
msg = "randomNormal {} {}".format(NUMBER_FORMAT_STRINGS['int64'].format(size), seed)
repMsg = generic_msg(msg)
return create_pdarray(cast(str,repMsg))
@typechecked
def random_strings_uniform(minlen : int, maxlen : int, size : int,
characters : str='uppercase', seed : Union[None, int]=None) -> Strings:
"""
Generate random strings with lengths uniformly distributed between
minlen and maxlen, and with characters drawn from a specified set.
Parameters
----------
minlen : int
The minimum allowed length of string
maxlen : int
The maximum allowed length of string
size : int
The number of strings to generate
characters : (uppercase, lowercase, numeric, printable, binary)
The set of characters to draw from
Returns
-------
Strings
The array of random strings
Raises
------
ValueError
Raised if minlen < 0, maxlen < minlen, or size < 0
See Also
--------
random_strings_lognormal, randint
"""
if minlen < 0 or maxlen < minlen or size < 0:
raise ValueError(("Incompatible arguments: minlen < 0, maxlen < minlen, " +
"or size < 0"))
msg = "randomStrings {} {} {} {} {} {}".\
format(NUMBER_FORMAT_STRINGS['int64'].format(size),
"uniform", characters,
NUMBER_FORMAT_STRINGS['int64'].format(minlen),
NUMBER_FORMAT_STRINGS['int64'].format(maxlen),
seed)
repMsg = generic_msg(msg)
return Strings(*(cast(str,repMsg).split('+')))
@typechecked
def random_strings_lognormal(logmean : Union[float, int], logstd : Union[float, int],
size : int, characters : str='uppercase',
seed : Union[None, int]=None) -> Strings:
"""
Generate random strings with log-normally distributed lengths and
with characters drawn from a specified set.
Parameters
----------
logmean : Union[float, int]
The log-mean of the length distribution
logstd : float
The log-standard-deviation of the length distribution
size : int
The number of strings to generate
characters : (uppercase, lowercase, numeric, printable, binary)
The set of characters to draw from
Returns
-------
Strings
The Strings object encapsulating a pdarray of random strings
Raises
------
TypeError
Raised if logmean is neither a float nor a int, logstd is not a float,
size is not an int, or if characters is not a str
ValueError
Raised if logstd <= 0 or size < 0
See Also
--------
random_strings_lognormal, randint
Notes
-----
The lengths of the generated strings are distributed $Lognormal(\mu, \sigma^2)$,
with :math:`\mu = logmean` and :math:`\sigma = logstd`. Thus, the strings will
have an average length of :math:`exp(\mu + 0.5*\sigma^2)`, a minimum length of
zero, and a heavy tail towards longer strings.
"""
if logstd <= 0 or size < 0:
raise ValueError("Incompatible arguments: logstd <= 0 or size < 0")
msg = "randomStrings {} {} {} {} {} {}".\
format(NUMBER_FORMAT_STRINGS['int64'].format(size),
"lognormal", characters,
NUMBER_FORMAT_STRINGS['float64'].format(logmean),
NUMBER_FORMAT_STRINGS['float64'].format(logstd),
seed)
repMsg = generic_msg(msg)
return Strings(*(cast(str,repMsg).split('+')))
#@typechecked
@dispatch(Strings)
def suffix_array( strings : Strings) -> SArrays:
"""
Return the suffix arrays of given strings. The size/shape of each suffix
arrays is the same as the corresponding strings.
A simple example of suffix array is as follow. Given a string "banana$",
all the suffixes are as follows.
s[0]="banana$"
s[1]="anana$"
s[2]="nana$"
s[3]="ana$"
s[4]="na$"
s[5]="a$"
s[6]="$"
The suffix array of string "banana$" is the array of indices of sorted suffixes.
s[6]="$"
s[5]="a$"
s[3]="ana$"
s[1]="anana$"
s[0]="banana$"
s[4]="na$"
s[2]="nana$"
so sa=[6,5,3,1,0,4,2]
Returns
-------
pdarray
The suffix arrays of the given strings
See Also
--------
Notes
-----
Raises
------
RuntimeError
Raised if there is a server-side error in executing group request or
creating the pdarray encapsulating the return message
"""
msg = "segmentedSuffixAry {} {} {}".format( strings.objtype,
strings.offsets.name,
strings.bytes.name)
repMsg = generic_msg(msg)
pdarrays= SArrays(*(repMsg.split('+')))
return pdarrays
@dispatch(str)
def suffix_array(filename: str) -> SArrays:
"""
This function is major used for testing correctness and performance
Return the suffix array of given file name's content as a string.
A simple example of suffix array is as follow. Given string "banana$",
all the suffixes are as follows.
s[0]="banana$"
s[1]="anana$"
s[2]="nana$"
s[3]="ana$"
s[4]="na$"
s[5]="a$"
s[6]="$"
The suffix array of string "banana$" is the array of indices of sorted suffixes.
s[6]="$"
s[5]="a$"
s[3]="ana$"
s[1]="anana$"
s[0]="banana$"
s[4]="na$"
s[2]="nana$"
so sa=[6,5,3,1,0,4,2]
Returns
-------
pdarray
The suffix arrays of the given strings
See Also
--------
Notes
-----
Raises
------
RuntimeError
Raised if there is a server-side error in executing group request or
creating the pdarray encapsulating the return message
"""
msg = "segmentedSAFile {}".format( filename )
repMsg = generic_msg(msg)
pdarrays= SArrays(*(repMsg.split('+')))
return pdarrays
|
'''
Classes to solve canonical consumption-savings models with idiosyncratic shocks
to income. All models here assume CRRA utility with geometric discounting, no
bequest motive, and income shocks are fully transitory or fully permanent.
It currently solves three types of models:
1) A very basic "perfect foresight" consumption-savings model with no uncertainty.
2) A consumption-savings model with risk over transitory and permanent income shocks.
3) The model described in (2), with an interest rate for debt that differs
from the interest rate for savings.
See NARK for information on variable naming conventions.
See HARK documentation for mathematical descriptions of the models being solved.
'''
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
from copy import copy, deepcopy
import numpy as np
from scipy.optimize import newton
from HARK import AgentType, Solution, NullFunc, HARKobject
from HARK.utilities import warnings # Because of "patch" to warnings modules
from HARK.interpolation import CubicInterp, LowerEnvelope, LinearInterp
from HARK.simulation import drawDiscrete, drawBernoulli, drawLognormal, drawUniform
from HARK.utilities import approxMeanOneLognormal, addDiscreteOutcomeConstantMean,\
combineIndepDstns, makeGridExpMult, CRRAutility, CRRAutilityP, \
CRRAutilityPP, CRRAutilityP_inv, CRRAutility_invP, CRRAutility_inv, \
CRRAutilityP_invP
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
# =====================================================================
# === Classes that help solve consumption-saving models ===
# =====================================================================
class ConsumerSolution(Solution):
'''
A class representing the solution of a single period of a consumption-saving
problem. The solution must include a consumption function and marginal
value function.
Here and elsewhere in the code, Nrm indicates that variables are normalized
by permanent income.
'''
distance_criteria = ['vPfunc']
def __init__(self, cFunc=None, vFunc=None,
vPfunc=None, vPPfunc=None,
mNrmMin=None, hNrm=None, MPCmin=None, MPCmax=None):
'''
The constructor for a new ConsumerSolution object.
Parameters
----------
cFunc : function
The consumption function for this period, defined over market
resources: c = cFunc(m).
vFunc : function
The beginning-of-period value function for this period, defined over
market resources: v = vFunc(m).
vPfunc : function
The beginning-of-period marginal value function for this period,
defined over market resources: vP = vPfunc(m).
vPPfunc : function
The beginning-of-period marginal marginal value function for this
period, defined over market resources: vPP = vPPfunc(m).
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
Returns
-------
None
'''
# Change any missing function inputs to NullFunc
if cFunc is None:
cFunc = NullFunc()
if vFunc is None:
vFunc = NullFunc()
if vPfunc is None:
vPfunc = NullFunc()
if vPPfunc is None:
vPPfunc = NullFunc()
self.cFunc = cFunc
self.vFunc = vFunc
self.vPfunc = vPfunc
self.vPPfunc = vPPfunc
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def appendSolution(self,new_solution):
'''
Appends one solution to another to create a ConsumerSolution whose
attributes are lists. Used in ConsMarkovModel, where we append solutions
*conditional* on a particular value of a Markov state to each other in
order to get the entire solution.
Parameters
----------
new_solution : ConsumerSolution
The solution to a consumption-saving problem; each attribute is a
list representing state-conditional values or functions.
Returns
-------
None
'''
if type(self.cFunc)!=list:
# Then we assume that self is an empty initialized solution instance.
# Begin by checking this is so.
assert NullFunc().distance(self.cFunc) == 0, 'appendSolution called incorrectly!'
# We will need the attributes of the solution instance to be lists. Do that here.
self.cFunc = [new_solution.cFunc]
self.vFunc = [new_solution.vFunc]
self.vPfunc = [new_solution.vPfunc]
self.vPPfunc = [new_solution.vPPfunc]
self.mNrmMin = [new_solution.mNrmMin]
else:
self.cFunc.append(new_solution.cFunc)
self.vFunc.append(new_solution.vFunc)
self.vPfunc.append(new_solution.vPfunc)
self.vPPfunc.append(new_solution.vPPfunc)
self.mNrmMin.append(new_solution.mNrmMin)
class ValueFunc(HARKobject):
'''
A class for representing a value function. The underlying interpolation is
in the space of (m,u_inv(v)); this class "re-curves" to the value function.
'''
distance_criteria = ['func','CRRA']
def __init__(self,vFuncNvrs,CRRA):
'''
Constructor for a new value function object.
Parameters
----------
vFuncNvrs : function
A real function representing the value function composed with the
inverse utility function, defined on market resources: u_inv(vFunc(m))
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.func = deepcopy(vFuncNvrs)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose value is to
be found.
Returns
-------
v : float or np.array
Lifetime value of beginning this period with market resources m; has
same size as input m.
'''
return utility(self.func(m),gam=self.CRRA)
class MargValueFunc(HARKobject):
'''
A class for representing a marginal value function in models where the
standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the marginal value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
value is to be found.
Returns
-------
vP : float or np.array
Marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
return utilityP(self.cFunc(m),gam=self.CRRA)
def derivative(self,m):
'''
Evaluate the derivative of the marginal value function at given levels
of market resources m; this is the marginal marginal value function.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
marginal value is to be found.
Returns
-------
vPP : float or np.array
Marginal marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
c, MPC = self.cFunc.eval_with_derivative(m)
return MPC*utilityPP(c,gam=self.CRRA)
class MargMargValueFunc(HARKobject):
'''
A class for representing a marginal marginal value function in models where
the standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the marginal marginal value function at given levels of market
resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
marginal value is to be found.
Returns
-------
vPP : float or np.array
Marginal marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
c, MPC = self.cFunc.eval_with_derivative(m)
return MPC*utilityPP(c,gam=self.CRRA)
# =====================================================================
# === Classes and functions that solve consumption-saving models ===
# =====================================================================
class ConsPerfForesightSolver(object):
'''
A class for solving a one period perfect foresight consumption-saving problem.
An instance of this class is created by the function solvePerfForesight in each period.
'''
def __init__(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Constructor for a new ConsPerfForesightSolver.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one-period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the next period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns:
----------
None
'''
# We ask that HARK users define single-letter variables they use in a dictionary
# attribute called notation.
# Do that first.
self.notation = {'a': 'assets after all actions',
'm': 'market resources at decision time',
'c': 'consumption'}
self.assignParameters(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac)
def assignParameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Saves necessary parameters as attributes of self for use by other methods.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
none
'''
self.solution_next = solution_next
self.DiscFac = DiscFac
self.LivPrb = LivPrb
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
def defUtilityFuncs(self):
'''
Defines CRRA utility function for this period (and its derivatives),
saving them as attributes of self for other methods to use.
Parameters
----------
none
Returns
-------
none
'''
self.u = lambda c : utility(c,gam=self.CRRA) # utility function
self.uP = lambda c : utilityP(c,gam=self.CRRA) # marginal utility function
self.uPP = lambda c : utilityPP(c,gam=self.CRRA)# marginal marginal utility function
def defValueFuncs(self):
'''
Defines the value and marginal value functions for this period.
Uses the fact that for a perfect foresight CRRA utility problem,
if the MPC in period t is :math:`\kappa_{t}`, and relative risk
aversion :math:`\rho`, then the inverse value vFuncNvrs has a
constant slope of :math:`\kappa_{t}^{-\rho/(1-\rho)}` and
vFuncNvrs has value of zero at the lower bound of market resources
mNrmMin. See PerfForesightConsumerType.ipynb documentation notebook
for a brief explanation and the links below for a fuller treatment.
https://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/#vFuncAnalytical
https://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#vFuncPF
Parameters
----------
none
Returns
-------
none
'''
# See the PerfForesightConsumerType.ipynb documentation notebook for the derivations
vFuncNvrsSlope = self.MPC**(-self.CRRA/(1.0-self.CRRA))
vFuncNvrs = LinearInterp(np.array([self.mNrmMin, self.mNrmMin+1.0]),np.array([0.0, vFuncNvrsSlope]))
self.vFunc = ValueFunc(vFuncNvrs,self.CRRA)
self.vPfunc = MargValueFunc(self.cFunc,self.CRRA)
def makePFcFunc(self):
'''
Makes the (linear) consumption function for this period.
Parameters
----------
none
Returns
-------
none
'''
# Calculate human wealth this period (and lower bound of m)
self.hNrmNow = (self.PermGroFac/self.Rfree)*(self.solution_next.hNrm + 1.0)
self.mNrmMin = -self.hNrmNow
# Calculate the (constant) marginal propensity to consume
PatFac = ((self.Rfree*self.DiscFacEff)**(1.0/self.CRRA))/self.Rfree
self.MPC = 1.0/(1.0 + PatFac/self.solution_next.MPCmin)
# Construct the consumption function
self.cFunc = LinearInterp([self.mNrmMin, self.mNrmMin+1.0],[0.0, self.MPC])
# Add two attributes to enable calculation of steady state market resources
self.ExIncNext = 1.0 # Perfect foresight income of 1
self.mNrmMinNow = self.mNrmMin # Relabeling for compatibility with addSSmNrm
def addSSmNrm(self,solution):
'''
Finds steady state (normalized) market resources and adds it to the
solution. This is the level of market resources such that the expectation
of market resources in the next period is unchanged. This value doesn't
necessarily exist.
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with the attribute mNrmSS.
'''
# Make a linear function of all combinations of c and m that yield mNext = mNow
mZeroChangeFunc = lambda m : (1.0-self.PermGroFac/self.Rfree)*m + (self.PermGroFac/self.Rfree)*self.ExIncNext
# Find the steady state level of market resources
searchSSfunc = lambda m : solution.cFunc(m) - mZeroChangeFunc(m) # A zero of this is SS market resources
m_init_guess = self.mNrmMinNow + self.ExIncNext # Minimum market resources plus next income is okay starting guess
try:
mNrmSS = newton(searchSSfunc,m_init_guess)
except:
mNrmSS = None
# Add mNrmSS to the solution and return it
solution.mNrmSS = mNrmSS
return solution
def solve(self):
'''
Solves the one period perfect foresight consumption-saving problem.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
'''
self.defUtilityFuncs()
self.DiscFacEff = self.DiscFac*self.LivPrb
self.makePFcFunc()
self.defValueFuncs()
solution = ConsumerSolution(cFunc=self.cFunc, vFunc=self.vFunc, vPfunc=self.vPfunc,
mNrmMin=self.mNrmMin, hNrm=self.hNrmNow,
MPCmin=self.MPC, MPCmax=self.MPC)
#solution = self.addSSmNrm(solution)
return solution
def solvePerfForesight(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Solves a single period consumption-saving problem for a consumer with perfect foresight.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
'''
solver = ConsPerfForesightSolver(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac)
solution = solver.solve()
return solution
###############################################################################
###############################################################################
class ConsIndShockSetup(ConsPerfForesightSolver):
'''
A superclass for solvers of one period consumption-saving problems with
constant relative risk aversion utility and permanent and transitory shocks
to income. Has methods to set up but not solve the one period problem.
'''
def __init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver-setup for problems with income subject to
permanent and transitory shocks.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
self.assignParameters(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
self.defUtilityFuncs()
def assignParameters(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Assigns period parameters as attributes of self for use by other methods
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
none
'''
ConsPerfForesightSolver.assignParameters(self,solution_next,DiscFac,LivPrb,
CRRA,Rfree,PermGroFac)
self.BoroCnstArt = BoroCnstArt
self.IncomeDstn = IncomeDstn
self.aXtraGrid = aXtraGrid
self.vFuncBool = vFuncBool
self.CubicBool = CubicBool
def defUtilityFuncs(self):
'''
Defines CRRA utility function for this period (and its derivatives,
and their inverses), saving them as attributes of self for other methods
to use.
Parameters
----------
none
Returns
-------
none
'''
ConsPerfForesightSolver.defUtilityFuncs(self)
self.uPinv = lambda u : utilityP_inv(u,gam=self.CRRA)
self.uPinvP = lambda u : utilityP_invP(u,gam=self.CRRA)
self.uinvP = lambda u : utility_invP(u,gam=self.CRRA)
if self.vFuncBool:
self.uinv = lambda u : utility_inv(u,gam=self.CRRA)
def setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac):
'''
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None
'''
self.DiscFacEff = DiscFac*LivPrb # "effective" discount factor
self.ShkPrbsNext = IncomeDstn[0]
self.PermShkValsNext = IncomeDstn[1]
self.TranShkValsNext = IncomeDstn[2]
self.PermShkMinNext = np.min(self.PermShkValsNext)
self.TranShkMinNext = np.min(self.TranShkValsNext)
self.vPfuncNext = solution_next.vPfunc
self.WorstIncPrb = np.sum(self.ShkPrbsNext[
(self.PermShkValsNext*self.TranShkValsNext)==
(self.PermShkMinNext*self.TranShkMinNext)])
if self.CubicBool:
self.vPPfuncNext = solution_next.vPPfunc
if self.vFuncBool:
self.vFuncNext = solution_next.vFunc
# Update the bounding MPCs and PDV of human wealth:
self.PatFac = ((self.Rfree*self.DiscFacEff)**(1.0/self.CRRA))/self.Rfree
self.MPCminNow = 1.0/(1.0 + self.PatFac/solution_next.MPCmin)
self.ExIncNext = np.dot(self.ShkPrbsNext,self.TranShkValsNext*self.PermShkValsNext)
self.hNrmNow = self.PermGroFac/self.Rfree*(self.ExIncNext + solution_next.hNrm)
self.MPCmaxNow = 1.0/(1.0 + (self.WorstIncPrb**(1.0/self.CRRA))*
self.PatFac/solution_next.MPCmax)
self.cFuncLimitIntercept = self.MPCminNow*self.hNrmNow
self.cFuncLimitSlope = self.MPCminNow
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
none
'''
# Calculate the minimum allowable value of money resources in this period
self.BoroCnstNat = (self.solution_next.mNrmMin - self.TranShkMinNext)*\
(self.PermGroFac*self.PermShkMinNext)/self.Rfree
# Note: need to be sure to handle BoroCnstArt==None appropriately.
# In Py2, this would evaluate to 5.0: np.max([None, 5.0]).
# However in Py3, this raises a TypeError. Thus here we need to directly
# address the situation in which BoroCnstArt == None:
if BoroCnstArt is None:
self.mNrmMinNow = self.BoroCnstNat
else:
self.mNrmMinNow = np.max([self.BoroCnstNat,BoroCnstArt])
if self.BoroCnstNat < self.mNrmMinNow:
self.MPCmaxEff = 1.0 # If actually constrained, MPC near limit is 1
else:
self.MPCmaxEff = self.MPCmaxNow
# Define the borrowing constraint (limiting consumption function)
self.cFuncNowCnst = LinearInterp(np.array([self.mNrmMinNow, self.mNrmMinNow+1]),
np.array([0.0, 1.0]))
def prepareToSolve(self):
'''
Perform preparatory work before calculating the unconstrained consumption
function.
Parameters
----------
none
Returns
-------
none
'''
self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac)
self.defBoroCnst(self.BoroCnstArt)
####################################################################################################
####################################################################################################
class ConsIndShockSolverBasic(ConsIndShockSetup):
'''
This class solves a single period of a standard consumption-saving problem,
using linear interpolation and without the ability to calculate the value
function. ConsIndShockSolver inherits from this class and adds the ability
to perform cubic interpolation and to calculate the value function.
Note that this class does not have its own initializing method. It initial-
izes the same problem in the same way as ConsIndShockSetup, from which it
inherits.
'''
def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
'''
# We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)
# even if BoroCnstNat < BoroCnstArt, so we can construct the consumption
# function as the lower envelope of the (by the artificial borrowing con-
# straint) uconstrained consumption function, and the artificially con-
# strained consumption function.
aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow,(ShkCount,1))
# Tile arrays of the income shocks and put them into useful shapes
aNrmCount = aNrmNow.shape[0]
PermShkVals_temp = (np.tile(self.PermShkValsNext,(aNrmCount,1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext,(aNrmCount,1))).transpose()
ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aNrmCount,1))).transpose()
# Get cash on hand next period
mNrmNext = self.Rfree/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp
# Store and report the results
self.PermShkVals_temp = PermShkVals_temp
self.ShkPrbs_temp = ShkPrbs_temp
self.mNrmNext = mNrmNext
self.aNrmNow = aNrmNow
return aNrmNow
def calcEndOfPrdvP(self):
'''
Calculate end-of-period marginal value of assets at each point in aNrmNow.
Does so by taking a weighted sum of next period marginal values across
income shocks (in a preconstructed grid self.mNrmNext).
Parameters
----------
none
Returns
-------
EndOfPrdvP : np.array
A 1D array of end-of-period marginal value of assets
'''
EndOfPrdvP = self.DiscFacEff*self.Rfree*self.PermGroFac**(-self.CRRA)*np.sum(
self.PermShkVals_temp**(-self.CRRA)*
self.vPfuncNext(self.mNrmNext)*self.ShkPrbs_temp,axis=0)
return EndOfPrdvP
def getPointsForInterpolation(self,EndOfPrdvP,aNrmNow):
'''
Finds interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
cNrmNow = self.uPinv(EndOfPrdvP)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.insert(cNrmNow,0,0.,axis=-1)
m_for_interpolation = np.insert(mNrmNow,0,self.BoroCnstNat,axis=-1)
# Store these for calcvFunc
self.cNrmNow = cNrmNow
self.mNrmNow = mNrmNow
return c_for_interpolation,m_for_interpolation
def usePointsForInterpolation(self,cNrm,mNrm,interpolator):
'''
Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cNrm : np.array
(Normalized) consumption points for interpolation.
mNrm : np.array
(Normalized) corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
# Construct the unconstrained consumption function
cFuncNowUnc = interpolator(mNrm,cNrm)
# Combine the constrained and unconstrained functions into the true consumption function
cFuncNow = LowerEnvelope(cFuncNowUnc,self.cFuncNowCnst)
# Make the marginal value function and the marginal marginal value function
vPfuncNow = MargValueFunc(cFuncNow,self.CRRA)
# Pack up the solution and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow)
return solution_now
def makeBasicSolution(self,EndOfPrdvP,aNrm,interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
cNrm,mNrm = self.getPointsForInterpolation(EndOfPrdvP,aNrm)
solution_now = self.usePointsForInterpolation(cNrm,mNrm,interpolator)
return solution_now
def addMPCandHumanWealth(self,solution):
'''
Take a solution and add human wealth and the bounding MPCs to it.
Parameters
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem.
Returns:
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem, but now
with human wealth and the bounding MPCs.
'''
solution.hNrm = self.hNrmNow
solution.MPCmin = self.MPCminNow
solution.MPCmax = self.MPCmaxEff
return solution
def makeLinearcFunc(self,mNrm,cNrm):
'''
Makes a linear interpolation to represent the (unconstrained) consumption function.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : LinearInterp
The unconstrained consumption function for this period.
'''
cFuncUnc = LinearInterp(mNrm, cNrm, self.cFuncLimitIntercept, self.cFuncLimitSlope)
return cFuncUnc
def solve(self):
'''
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
'''
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution)
return solution
###############################################################################
###############################################################################
class ConsIndShockSolver(ConsIndShockSolverBasic):
'''
This class solves a single period of a standard consumption-saving problem.
It inherits from ConsIndShockSolverBasic, adding the ability to perform cubic
interpolation and to calculate the value function.
'''
def makeCubiccFunc(self,mNrm,cNrm):
'''
Makes a cubic spline interpolation of the unconstrained consumption
function for this period.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)* \
np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*
self.vPPfuncNext(self.mNrmNext)*self.ShkPrbs_temp,axis=0)
dcda = EndOfPrdvPP/self.uPP(np.array(cNrm[1:]))
MPC = dcda/(dcda+1.)
MPC = np.insert(MPC,0,self.MPCmaxNow)
cFuncNowUnc = CubicInterp(mNrm,cNrm,MPC,self.MPCminNow*self.hNrmNow,self.MPCminNow)
return cFuncNowUnc
def makeEndOfPrdvFunc(self,EndOfPrdvP):
'''
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
none
'''
VLvlNext = (self.PermShkVals_temp**(1.0-self.CRRA)*\
self.PermGroFac**(1.0-self.CRRA))*self.vFuncNext(self.mNrmNext)
EndOfPrdv = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbs_temp,axis=0)
EndOfPrdvNvrs = self.uinv(EndOfPrdv) # value transformed through inverse utility
EndOfPrdvNvrsP = EndOfPrdvP*self.uinvP(EndOfPrdv)
EndOfPrdvNvrs = np.insert(EndOfPrdvNvrs,0,0.0)
EndOfPrdvNvrsP = np.insert(EndOfPrdvNvrsP,0,EndOfPrdvNvrsP[0]) # This is a very good approximation, vNvrsPP = 0 at the asset minimum
aNrm_temp = np.insert(self.aNrmNow,0,self.BoroCnstNat)
EndOfPrdvNvrsFunc = CubicInterp(aNrm_temp,EndOfPrdvNvrs,EndOfPrdvNvrsP)
self.EndOfPrdvFunc = ValueFunc(EndOfPrdvNvrsFunc,self.CRRA)
def addvFunc(self,solution,EndOfPrdvP):
'''
Creates the value function for this period and adds it to the solution.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, likely including the
consumption function, marginal value function, etc.
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
solution : ConsumerSolution
The single period solution passed as an input, but now with the
value function (defined over market resources m) as an attribute.
'''
self.makeEndOfPrdvFunc(EndOfPrdvP)
solution.vFunc = self.makevFunc(solution)
return solution
def makevFunc(self,solution):
'''
Creates the value function for this period, defined over market resources m.
self must have the attribute EndOfPrdvFunc in order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
normalized market resources m: v = vFuncNow(m).
'''
# Compute expected value and marginal value on a grid of market resources
mNrm_temp = self.mNrmMinNow + self.aXtraGrid
cNrmNow = solution.cFunc(mNrm_temp)
aNrmNow = mNrm_temp - cNrmNow
vNrmNow = self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow)
vPnow = self.uP(cNrmNow)
# Construct the beginning-of-period value function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNrmNow)
mNrm_temp = np.insert(mNrm_temp,0,self.mNrmMinNow)
vNvrs = np.insert(vNvrs,0,0.0)
vNvrsP = np.insert(vNvrsP,0,self.MPCmaxEff**(-self.CRRA/(1.0-self.CRRA)))
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
vNvrsFuncNow = CubicInterp(mNrm_temp,vNvrs,vNvrsP,MPCminNvrs*self.hNrmNow,MPCminNvrs)
vFuncNow = ValueFunc(vNvrsFuncNow,self.CRRA)
return vFuncNow
def addvPPfunc(self,solution):
'''
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
'''
vPPfuncNow = MargMargValueFunc(solution.cFunc,self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution
def solve(self):
'''
Solves the single period consumption-saving problem using the method of
endogenous gridpoints. Solution includes a consumption function cFunc
(using cubic or linear splines), a marginal value function vPfunc, a min-
imum acceptable level of normalized market resources mNrmMin, normalized
human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also
have a value function vFunc and marginal marginal value function vPPfunc.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem.
'''
# Make arrays of end-of-period assets and end-of-period marginal value
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
# Construct a basic solution for this period
if self.CubicBool:
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeCubiccFunc)
else:
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution) # add a few things
solution = self.addSSmNrm(solution) # find steady state m
# Add the value function if requested, as well as the marginal marginal
# value function if cubic splines were used (to prepare for next period)
if self.vFuncBool:
solution = self.addvFunc(solution,EndOfPrdvP)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution
def solveConsIndShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc.
'''
# Use the basic solver if user doesn't want cubic splines or the value function
if (not CubicBool) and (not vFuncBool):
solver = ConsIndShockSolverBasic(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,
Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,
CubicBool)
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now
####################################################################################################
####################################################################################################
class ConsKinkedRsolver(ConsIndShockSolver):
'''
A class to solve a single period consumption-saving problem where the interest
rate on debt differs from the interest rate on savings. Inherits from
ConsIndShockSolver, with nearly identical inputs and outputs. The key diff-
erence is that Rfree is replaced by Rsave (a>0) and Rboro (a<0). The solver
can handle Rboro == Rsave, which makes it identical to ConsIndShocksolver, but
it terminates immediately if Rboro < Rsave, as this has a different solution.
'''
def __init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,
Rboro,Rsave,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver for problems with risky income and a different
interest rate on borrowing and saving.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rboro: float
Interest factor on assets between this period and the succeeding
period when assets are negative.
Rsave: float
Interest factor on assets between this period and the succeeding
period when assets are positive.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
assert Rboro>=Rsave, 'Interest factor on debt less than interest factor on savings!'
# Initialize the solver. Most of the steps are exactly the same as in
# the non-kinked-R basic case, so start with that.
ConsIndShockSolver.__init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rboro,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
# Assign the interest rates as class attributes, to use them later.
self.Rboro = Rboro
self.Rsave = Rsave
def makeCubiccFunc(self,mNrm,cNrm):
'''
Makes a cubic spline interpolation that contains the kink of the unconstrained
consumption function for this period.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
# Call the makeCubiccFunc from ConsIndShockSolver.
cFuncNowUncKink = super().makeCubiccFunc(mNrm, cNrm)
# Change the coeffients at the kinked points.
cFuncNowUncKink.coeffs[self.i_kink + 1] = [cNrm[self.i_kink], mNrm[self.i_kink + 1] - mNrm[self.i_kink], 0, 0]
return cFuncNowUncKink
def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period. This differs from the baseline case because
different savings choices yield different interest rates.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
'''
KinkBool = self.Rboro > self.Rsave # Boolean indicating that there is actually a kink.
# When Rboro == Rsave, this method acts just like it did in IndShock.
# When Rboro < Rsave, the solver would have terminated when it was called.
# Make a grid of end-of-period assets, including *two* copies of a=0
if KinkBool:
aNrmNow = np.sort(np.hstack((np.asarray(self.aXtraGrid) + self.mNrmMinNow,
np.array([0.0,0.0]))))
else:
aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow
aXtraCount = aNrmNow.size
# Make tiled versions of the assets grid and income shocks
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow,(ShkCount,1))
PermShkVals_temp = (np.tile(self.PermShkValsNext,(aXtraCount,1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext,(aXtraCount,1))).transpose()
ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aXtraCount,1))).transpose()
# Make a 1D array of the interest factor at each asset gridpoint
Rfree_vec = self.Rsave*np.ones(aXtraCount)
if KinkBool:
self.i_kink = np.sum(aNrmNow<=0)-1 # Save the index of the kink point as an attribute
Rfree_vec[0:self.i_kink] = self.Rboro
self.Rfree = Rfree_vec
Rfree_temp = np.tile(Rfree_vec,(ShkCount,1))
# Make an array of market resources that we could have next period,
# considering the grid of assets and the income shocks that could occur
mNrmNext = Rfree_temp/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp
# Recalculate the minimum MPC and human wealth using the interest factor on saving.
# This overwrites values from setAndUpdateValues, which were based on Rboro instead.
if KinkBool:
PatFacTop = ((self.Rsave*self.DiscFacEff)**(1.0/self.CRRA))/self.Rsave
self.MPCminNow = 1.0/(1.0 + PatFacTop/self.solution_next.MPCmin)
self.hNrmNow = self.PermGroFac/self.Rsave*(np.dot(self.ShkPrbsNext,
self.TranShkValsNext*self.PermShkValsNext) + self.solution_next.hNrm)
# Store some of the constructed arrays for later use and return the assets grid
self.PermShkVals_temp = PermShkVals_temp
self.ShkPrbs_temp = ShkPrbs_temp
self.mNrmNext = mNrmNext
self.aNrmNow = aNrmNow
return aNrmNow
def solveConsKinkedR(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rboro,Rsave,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks), and different interest
factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently
cannot construct a cubic spline consumption function, only linear. Can gen-
erate a value function if requested.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rboro: float
Interest factor on assets between this period and the succeeding
period when assets are negative.
Rsave: float
Interest factor on assets between this period and the succeeding
period when assets are positive.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc.
'''
solver = ConsKinkedRsolver(solution_next,IncomeDstn,LivPrb,
DiscFac,CRRA,Rboro,Rsave,PermGroFac,BoroCnstArt,
aXtraGrid,vFuncBool,CubicBool)
solver.prepareToSolve()
solution = solver.solve()
return solution
# ============================================================================
# == Classes for representing types of consumer agents (and things they do) ==
# ============================================================================
class PerfForesightConsumerType(AgentType):
'''
A perfect foresight consumer type who has no uncertainty other than mortality.
His problem is defined by a coefficient of relative risk aversion, intertemporal
discount factor, interest factor, and time sequences of the permanent income
growth rate and survival probability.
'''
# Define some universal values for all consumer types
cFunc_terminal_ = LinearInterp([0.0, 1.0],[0.0,1.0]) # c=m in terminal period
vFunc_terminal_ = LinearInterp([0.0, 1.0],[0.0,0.0]) # This is overwritten
solution_terminal_ = ConsumerSolution(cFunc = cFunc_terminal_,
vFunc = vFunc_terminal_, mNrmMin=0.0, hNrm=0.0,
MPCmin=1.0, MPCmax=1.0)
time_vary_ = ['LivPrb','PermGroFac']
time_inv_ = ['CRRA','Rfree','DiscFac']
poststate_vars_ = ['aNrmNow','pLvlNow']
shock_vars_ = []
def __init__(self,cycles=1, time_flow=True,verbose=False,quiet=False, **kwds):
'''
Instantiate a new consumer type with given data.
See ConsumerParameters.init_perfect_foresight for a dictionary of
the keywords that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
# Initialize a basic AgentType
AgentType.__init__(self,solution_terminal=deepcopy(self.solution_terminal_),
cycles=cycles,time_flow=time_flow,pseudo_terminal=False,**kwds)
# Add consumer-type specific objects, copying to create independent versions
self.time_vary = deepcopy(self.time_vary_)
self.time_inv = deepcopy(self.time_inv_)
self.poststate_vars = deepcopy(self.poststate_vars_)
self.shock_vars = deepcopy(self.shock_vars_)
self.verbose = verbose
self.quiet = quiet
self.solveOnePeriod = solvePerfForesight # solver for perfect foresight model
def preSolve(self):
self.updateSolutionTerminal()
def checkRestrictions(self):
"""
A method to check that various restrictions are met for the model class.
"""
if self.DiscFac < 0:
raise Exception('DiscFac is below zero with value: ' + str(self.DiscFac))
return
def updateSolutionTerminal(self):
'''
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
none
Returns
-------
none
'''
self.solution_terminal.vFunc = ValueFunc(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPfunc = MargValueFunc(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFunc(self.cFunc_terminal_,self.CRRA)
def unpackcFunc(self):
'''
"Unpacks" the consumption functions into their own field for easier access.
After the model has been solved, the consumption functions reside in the
attribute cFunc of each element of ConsumerType.solution. This method
creates a (time varying) attribute cFunc that contains a list of consumption
functions.
Parameters
----------
none
Returns
-------
none
'''
self.cFunc = []
for solution_t in self.solution:
self.cFunc.append(solution_t.cFunc)
self.addToTimeVary('cFunc')
def initializeSim(self):
self.PlvlAggNow = 1.0
self.PermShkAggNow = self.PermGroFacAgg # This never changes during simulation
AgentType.initializeSim(self)
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and permanent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
# Get and store states for newly born agents
N = np.sum(which_agents) # Number of new consumers to make
self.aNrmNow[which_agents] = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1))
pLvlInitMeanNow = self.pLvlInitMean + np.log(self.PlvlAggNow) # Account for newer cohorts having higher permanent income
self.pLvlNow[which_agents] = drawLognormal(N,mu=pLvlInitMeanNow,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.t_age[which_agents] = 0 # How many periods since each agent was born
self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in
return None
def simDeath(self):
'''
Determines which agents die this period and must be replaced. Uses the sequence in LivPrb
to determine survival probabilities for each agent.
Parameters
----------
None
Returns
-------
which_agents : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
'''
# Determine who dies
DiePrb_by_t_cycle = 1.0 - np.asarray(self.LivPrb)
DiePrb = DiePrb_by_t_cycle[self.t_cycle-1] # Time has already advanced, so look back one
DeathShks = drawUniform(N=self.AgentCount,seed=self.RNG.randint(0,2**31-1))
which_agents = DeathShks < DiePrb
if self.T_age is not None: # Kill agents that have lived for too many periods
too_old = self.t_age >= self.T_age
which_agents = np.logical_or(which_agents,too_old)
return which_agents
def getShocks(self):
'''
Finds permanent and transitory income "shocks" for each agent this period. As this is a
perfect foresight model, there are no stochastic shocks: PermShkNow = PermGroFac for each
agent (according to their t_cycle) and TranShkNow = 1.0 for all agents.
Parameters
----------
None
Returns
-------
None
'''
PermGroFac = np.array(self.PermGroFac)
self.PermShkNow = PermGroFac[self.t_cycle-1] # cycle time has already been advanced
self.TranShkNow = np.ones(self.AgentCount)
def getRfree(self):
'''
Returns an array of size self.AgentCount with self.Rfree in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
'''
RfreeNow = self.Rfree*np.ones(self.AgentCount)
return RfreeNow
def getStates(self):
'''
Calculates updated values of normalized market resources and permanent income level for each
agent. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow.
Parameters
----------
None
Returns
-------
None
'''
pLvlPrev = self.pLvlNow
aNrmPrev = self.aNrmNow
RfreeNow = self.getRfree()
# Calculate new states: normalized market resources and permanent income level
self.pLvlNow = pLvlPrev*self.PermShkNow # Updated permanent income level
self.PlvlAggNow = self.PlvlAggNow*self.PermShkAggNow # Updated aggregate permanent productivity level
ReffNow = RfreeNow/self.PermShkNow # "Effective" interest factor on normalized assets
self.bNrmNow = ReffNow*aNrmPrev # Bank balances before labor income
self.mNrmNow = self.bNrmNow + self.TranShkNow # Market resources after income
return None
def getControls(self):
'''
Calculates consumption for each consumer of this type using the consumption functions.
Parameters
----------
None
Returns
-------
None
'''
cNrmNow = np.zeros(self.AgentCount) + np.nan
MPCnow = np.zeros(self.AgentCount) + np.nan
for t in range(self.T_cycle):
these = t == self.t_cycle
cNrmNow[these], MPCnow[these] = self.solution[t].cFunc.eval_with_derivative(self.mNrmNow[these])
self.cNrmNow = cNrmNow
self.MPCnow = MPCnow
return None
def getPostStates(self):
'''
Calculates end-of-period assets for each consumer of this type.
Parameters
----------
None
Returns
-------
None
'''
self.aNrmNow = self.mNrmNow - self.cNrmNow
self.aLvlNow = self.aNrmNow*self.pLvlNow # Useful in some cases to precalculate asset level
return None
def checkConditions(self,verbose=False,verbose_reference=False,public_call=False):
'''
This method checks whether the instance's type satisfies the Growth Impatience Condition
(GIC), Return Impatience Condition (RIC), Absolute Impatience Condition (AIC), Weak Return
Impatience Condition (WRIC), Finite Human Wealth Condition (FHWC) and Finite Value of
Autarky Condition (FVAC). These are the conditions that are sufficient for nondegenerate
solutions under infinite horizon with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. To check which conditions are
relevant to the model at hand, a reference to the relevant theoretical literature is made.
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
'''
# This method only checks for the conditions for infinite horizon models
# with a 1 period cycle. If these conditions are not met, we exit early.
if self.cycles!=0 or self.T_cycle > 1:
return
violated = False
Thorn = (self.Rfree*self.DiscFac*self.LivPrb[0])**(1/self.CRRA)
AIF = Thorn
#Evaluate and report on the Absolute Impatience Condition
self.Thorn = Thorn
self.AIF = AIF
if AIF<1:
self.AIC = True
if public_call:
print('The value of the absolute impatience factor for the supplied parameter values satisfies the Absolute Impatience Condition.', end = " ")
if verbose:
violated = False
print('Therefore, the absolute amount of consumption is expected to fall over time.')
print()
else:
self.AIC = False
print('The given type violates the Absolute Impatience Condition with the supplied parameter values; the AIF is %1.5f ' % (AIF), end=" ")
if verbose:
violated = True
print('Therefore, the absolute amount of consumption is expected to grow over time')
print()
#Evaluate and report on the Growth Impatience Condition
GIF = Thorn/self.PermGroFac[0]
self.GIF = GIF
if GIF<1:
self.GIC = True
if public_call:
print('The value of the growth impatience factor for the supplied parameter values satisfies the Growth Impatience Condition.', end = " ")
if verbose:
print(' Therefore, the ratio of individual wealth to permanent income will fall indefinitely.')
print()
else:
self.GIC = False
violated = True
print('The given parameter values violate the Growth Impatience Condition for this consumer type; the GIF is: %2.4f' % (GIF), end = " ")
if verbose:
print(' Therefore, the ratio of individual wealth to permanent income grow toward infinity.')
print()
#Evaluate and report on the Return Impatience Condition
RIF = Thorn/self.Rfree
self.RIF = RIF
if RIF<1:
self.RIC = True
if public_call:
print('The return impatience factor value for the supplied parameter values satisfies the Return Impatience Condition.', end = " ")
if verbose:
print('Therefore, the limiting consumption function is not c(m)=0')
print()
else:
self.RIC = False
violated = True
print('The given type violates the Return Impatience Condition with the supplied parameter values; the factor is %1.5f ' % (RIF), end = " ")
if verbose:
print('Therefore, the limiting consumption function is c(m)=0')
print()
#Evaluate and report on the Finite Human Wealth Condition
FHWF = self.PermGroFac[0]/self.Rfree
self.FHWF = FHWF
if FHWF<1:
self.hNrm = 1.0/(1.0-self.PermGroFac[0]/self.Rfree)
self.FHWC = True
if public_call:
print('The Finite Human wealth factor value for the supplied parameter values satisfies the Finite Human Wealth Condition.', end = " ")
if verbose:
print('Therefore, the limiting consumption function is not c(m)=Infinity')
print('and human wealth normalized by permanent income is %2.5f' % (self.hNrm))
self.cNrmPDV = 1.0/(1.0-self.Thorn/self.Rfree)
print('and the PDV of future consumption growth is %2.5f' % (self.cNrmPDV) )
print()
else:
self.FHWC = False
print('The given type violates the Finite Human Wealth Condition; the Finite Human wealth factor value %2.5f ' % (FHWF), end = " ")
violated = True
if verbose:
print('Therefore, the limiting consumption function is c(m)=Infinity for all m')
priont()
if verbose and violated and verbose_reference:
print('[!] For more information on the conditions, see Table 3 in "Theoretical Foundations of Buffer Stock Saving" at http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/')
return violated
class IndShockConsumerType(PerfForesightConsumerType):
'''
A consumer type with idiosyncratic shocks to permanent and transitory income.
His problem is defined by a sequence of income distributions, survival prob-
abilities, and permanent income growth rates, as well as time invariant values
for risk aversion, discount factor, the interest rate, the grid of end-of-
period assets, and an artificial borrowing constraint.
'''
time_inv_ = PerfForesightConsumerType.time_inv_ + ['BoroCnstArt','vFuncBool','CubicBool']
shock_vars_ = ['PermShkNow','TranShkNow']
def __init__(self,cycles=1,time_flow=True,verbose=False,quiet=False,**kwds):
'''
Instantiate a new ConsumerType with given data.
See ConsumerParameters.init_idiosyncratic_shocks for a dictionary of
the keywords that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
# Initialize a basic AgentType
PerfForesightConsumerType.__init__(self,cycles=cycles,time_flow=time_flow,
verbose=verbose,quiet=quiet, **kwds)
# Add consumer-type specific objects, copying to create independent versions
self.solveOnePeriod = solveConsIndShock # idiosyncratic shocks solver
self.update() # Make assets grid, income process, terminal solution
def updateIncomeProcess(self):
'''
Updates this agent's income process based on his own attributes.
Parameters
----------
none
Returns:
-----------
none
'''
original_time = self.time_flow
self.timeFwd()
IncomeDstn, PermShkDstn, TranShkDstn = constructLognormalIncomeProcessUnemployment(self)
self.IncomeDstn = IncomeDstn
self.PermShkDstn = PermShkDstn
self.TranShkDstn = TranShkDstn
self.addToTimeVary('IncomeDstn','PermShkDstn','TranShkDstn')
if not original_time:
self.timeRev()
def updateAssetsGrid(self):
'''
Updates this agent's end-of-period assets grid by constructing a multi-
exponentially spaced grid of aXtra values.
Parameters
----------
none
Returns
-------
none
'''
aXtraGrid = constructAssetsGrid(self)
self.aXtraGrid = aXtraGrid
self.addToTimeInv('aXtraGrid')
def update(self):
'''
Update the income process, the assets grid, and the terminal solution.
Parameters
----------
none
Returns
-------
none
'''
self.updateIncomeProcess()
self.updateAssetsGrid()
self.updateSolutionTerminal()
def getShocks(self):
'''
Gets permanent and transitory income shocks for this period. Samples from IncomeDstn for
each period in the cycle.
Parameters
----------
None
Returns
-------
None
'''
PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
TranShkNow = np.zeros(self.AgentCount)
newborn = self.t_age == 0
for t in range(self.T_cycle):
these = t == self.t_cycle
N = np.sum(these)
if N > 0:
IncomeDstnNow = self.IncomeDstn[t-1] # set current income distribution
PermGroFacNow = self.PermGroFac[t-1] # and permanent growth factor
Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers
# Get random draws of income shocks from the discrete distribution
EventDraws = drawDiscrete(N,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
PermShkNow[these] = IncomeDstnNow[1][EventDraws]*PermGroFacNow # permanent "shock" includes expected growth
TranShkNow[these] = IncomeDstnNow[2][EventDraws]
# That procedure used the *last* period in the sequence for newborns, but that's not right
# Redraw shocks for newborns, using the *first* period in the sequence. Approximation.
N = np.sum(newborn)
if N > 0:
these = newborn
IncomeDstnNow = self.IncomeDstn[0] # set current income distribution
PermGroFacNow = self.PermGroFac[0] # and permanent growth factor
Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers
# Get random draws of income shocks from the discrete distribution
EventDraws = drawDiscrete(N,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
PermShkNow[these] = IncomeDstnNow[1][EventDraws]*PermGroFacNow # permanent "shock" includes expected growth
TranShkNow[these] = IncomeDstnNow[2][EventDraws]
# PermShkNow[newborn] = 1.0
TranShkNow[newborn] = 1.0
# Store the shocks in self
self.EmpNow = np.ones(self.AgentCount,dtype=bool)
self.EmpNow[TranShkNow == self.IncUnemp] = False
self.PermShkNow = PermShkNow
self.TranShkNow = TranShkNow
def calcBoundingValues(self):
'''
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty.
Parameters
----------
None
Returns
-------
None
'''
# Unpack the income distribution and get average and worst outcomes
PermShkValsNext = self.IncomeDstn[0][1]
TranShkValsNext = self.IncomeDstn[0][2]
ShkPrbsNext = self.IncomeDstn[0][0]
ExIncNext = np.dot(ShkPrbsNext,PermShkValsNext*TranShkValsNext)
PermShkMinNext = np.min(PermShkValsNext)
TranShkMinNext = np.min(TranShkValsNext)
WorstIncNext = PermShkMinNext*TranShkMinNext
WorstIncPrb = np.sum(ShkPrbsNext[(PermShkValsNext*TranShkValsNext)==WorstIncNext])
# Calculate human wealth and the infinite horizon natural borrowing constraint
hNrm = (ExIncNext*self.PermGroFac[0]/self.Rfree)/(1.0-self.PermGroFac[0]/self.Rfree)
temp = self.PermGroFac[0]*PermShkMinNext/self.Rfree
BoroCnstNat = -TranShkMinNext*temp/(1.0-temp)
PatFac = (self.DiscFac*self.LivPrb[0]*self.Rfree)**(1.0/self.CRRA)/self.Rfree
if BoroCnstNat < self.BoroCnstArt:
MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1
else:
MPCmax = 1.0 - WorstIncPrb**(1.0/self.CRRA)*PatFac
MPCmin = 1.0 - PatFac
# Store the results as attributes of self
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def makeEulerErrorFunc(self,mMax=100,approx_inc_dstn=True):
'''
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncomeDstn
or to use a (temporary) very dense approximation.
Only works on (one period) infinite horizon models at this time, will
be generalized later.
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncomeDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncomeDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
'''
# Get the income distribution (or make a very dense one)
if approx_inc_dstn:
IncomeDstn = self.IncomeDstn[0]
else:
TranShkDstn = approxMeanOneLognormal(N=200,sigma=self.TranShkStd[0],
tail_N=50,tail_order=1.3, tail_bound=[0.05,0.95])
TranShkDstn = addDiscreteOutcomeConstantMean(TranShkDstn,self.UnempPrb,self.IncUnemp)
PermShkDstn = approxMeanOneLognormal(N=200,sigma=self.PermShkStd[0],
tail_N=50,tail_order=1.3, tail_bound=[0.05,0.95])
IncomeDstn = combineIndepDstns(PermShkDstn,TranShkDstn)
# Make a grid of market resources
mNowMin = self.solution[0].mNrmMin + 10**(-15) # add tiny bit to get around 0/0 problem
mNowMax = mMax
mNowGrid = np.linspace(mNowMin,mNowMax,1000)
# Get the consumption function this period and the marginal value function
# for next period. Note that this part assumes a one period cycle.
cFuncNow = self.solution[0].cFunc
vPfuncNext = self.solution[0].vPfunc
# Calculate consumption this period at each gridpoint (and assets)
cNowGrid = cFuncNow(mNowGrid)
aNowGrid = mNowGrid - cNowGrid
# Tile the grids for fast computation
ShkCount = IncomeDstn[0].size
aCount = aNowGrid.size
aNowGrid_tiled = np.tile(aNowGrid,(ShkCount,1))
PermShkVals_tiled = (np.tile(IncomeDstn[1],(aCount,1))).transpose()
TranShkVals_tiled = (np.tile(IncomeDstn[2],(aCount,1))).transpose()
ShkPrbs_tiled = (np.tile(IncomeDstn[0],(aCount,1))).transpose()
# Calculate marginal value next period for each gridpoint and each shock
mNextArray = self.Rfree/(self.PermGroFac[0]*PermShkVals_tiled)*aNowGrid_tiled + TranShkVals_tiled
vPnextArray = vPfuncNext(mNextArray)
# Calculate expected marginal value and implied optimal consumption
ExvPnextGrid = self.DiscFac*self.Rfree*self.LivPrb[0]*self.PermGroFac[0]**(-self.CRRA)* \
np.sum(PermShkVals_tiled**(-self.CRRA)*vPnextArray*ShkPrbs_tiled,axis=0)
cOptGrid = ExvPnextGrid**(-1.0/self.CRRA)
# Calculate Euler error and store an interpolated function
EulerErrorNrmGrid = (cNowGrid - cOptGrid)/cOptGrid
eulerErrorFunc = LinearInterp(mNowGrid,EulerErrorNrmGrid)
self.eulerErrorFunc = eulerErrorFunc
def preSolve(self):
# AgentType.preSolve(self)
# Update all income process variables to match any attributes that might
# have been changed since `__init__` or `solve()` was last called.
# self.updateIncomeProcess()
self.updateSolutionTerminal()
if not self.quiet:
self.checkConditions(verbose=self.verbose,public_call=False)
def checkConditions(self,verbose=False,public_call=True):
'''
This method checks whether the instance's type satisfies the Growth Impatience Condition
(GIC), Return Impatience Condition (RIC), Absolute Impatience Condition (AIC), Weak Return
Impatience Condition (WRIC), Finite Human Wealth Condition (FHWC) and Finite Value of
Autarky Condition (FVAC). These are the conditions that are sufficient for nondegenerate
infinite horizon solutions when there is a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. (For an exposition of the
conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
'''
violated = PerfForesightConsumerType.checkConditions(self, verbose=verbose, verbose_reference=False)
if self.cycles!=0 or self.T_cycle > 1:
return
EPermShkInv=np.dot(self.PermShkDstn[0][0],1/self.PermShkDstn[0][1])
PermGroFacAdj=self.PermGroFac[0]*EPermShkInv
Thorn = (self.Rfree*self.DiscFac*self.LivPrb[0])**(1/self.CRRA)
GIF = Thorn/PermGroFacAdj
self.GIF = GIF
self.Thorn = Thorn
self.PermGroFacAdj = PermGroFacAdj
self.EPermShkInv = EPermShkInv
#Evaluate and report on the Growth Impatience Condition
if GIF<1:
self.GIC = True
if public_call:
print('The value of the growth impatience factor for the supplied parameter values satisfies the Growth Impatience Condition.', end = " ")
if verbose:
print('Therefore, a target level of wealth exists.')
print()
else:
self.GIC = False
violated = True
print('The given parameter values violate the Growth Impatience Condition for this consumer type; the GIF is: %2.4f' % (GIF), end = " ")
if verbose:
print('Therefore, a target level of wealth does not exist.')
print()
#Evaluate and report on the Weak Return Impatience Condition
WRIF=(self.LivPrb[0]*(self.UnempPrb**(1/self.CRRA))*(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.Rfree
self.WRIF = WRIF
if WRIF<1:
self.WRIC = True
if public_call:
print('The Weak Return Impatience Factor value for the supplied parameter values satisfies the Weak Return Impatience Condition.')
else:
self.WRIC = False
violated = True
print('The given type violates the Weak Return Impatience Condition with the supplied parameter values. The WRIF is: %2.4f' % (WRIF), end = " ")
if verbose:
print('Therefore, a nondegenerate solution is not available.')
print()
#Evaluate and report on the Finite Value of Autarky Condition
EPermShkValFunc=np.dot(self.PermShkDstn[0][0],self.PermShkDstn[0][1]**(1-self.CRRA))
self.EPermShkValFunc = EPermShkValFunc
FVAF=self.LivPrb[0]*self.DiscFac*EPermShkValFunc*(self.PermGroFac[0]**(1-self.CRRA))
self.FVAF = FVAF
if FVAF<1:
self.FVAC = True
if public_call:
print('The Finite Value of autarky factor value for the supplied parameter values satisfies the Finite Value of Autarky Condition.')
else:
self.FVAC = False
print('The given type violates the Finite Value of Autarky Condition with the supplied parameter values. The FVAF is %2.4f' %(FVAF), end = " ")
violated = True
if verbose:
print('Therefore, a nondegenerate solution is not available.')
print()
if verbose and violated:
print('\n[!] For more information on the conditions, see Table 3 in "Theoretical Foundations of Buffer Stock Saving" at http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/')
class KinkedRconsumerType(IndShockConsumerType):
'''
A consumer type that faces idiosyncratic shocks to income and has a different
interest factor on saving vs borrowing. Extends IndShockConsumerType, with
very small changes. Solver for this class is currently only compatible with
linear spline interpolation.
'''
time_inv_ = copy(IndShockConsumerType.time_inv_)
time_inv_.remove('Rfree')
time_inv_ += ['Rboro', 'Rsave']
def __init__(self,cycles=1,time_flow=True,**kwds):
'''
Instantiate a new ConsumerType with given data.
See ConsumerParameters.init_kinked_R for a dictionary of
the keywords that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
# Initialize a basic AgentType
PerfForesightConsumerType.__init__(self,cycles=cycles,time_flow=time_flow,**kwds)
# Add consumer-type specific objects, copying to create independent versions
self.solveOnePeriod = solveConsKinkedR # kinked R solver
self.update() # Make assets grid, income process, terminal solution
def preSolve(self):
# AgentType.preSolve(self)
self.updateSolutionTerminal()
def calcBoundingValues(self):
'''
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty. This version deals
with the different interest rates on borrowing vs saving.
Parameters
----------
None
Returns
-------
None
'''
# Unpack the income distribution and get average and worst outcomes
PermShkValsNext = self.IncomeDstn[0][1]
TranShkValsNext = self.IncomeDstn[0][2]
ShkPrbsNext = self.IncomeDstn[0][0]
ExIncNext = np.dot(ShkPrbsNext,PermShkValsNext*TranShkValsNext)
PermShkMinNext = np.min(PermShkValsNext)
TranShkMinNext = np.min(TranShkValsNext)
WorstIncNext = PermShkMinNext*TranShkMinNext
WorstIncPrb = np.sum(ShkPrbsNext[(PermShkValsNext*TranShkValsNext)==WorstIncNext])
# Calculate human wealth and the infinite horizon natural borrowing constraint
hNrm = (ExIncNext*self.PermGroFac[0]/self.Rsave)/(1.0-self.PermGroFac[0]/self.Rsave)
temp = self.PermGroFac[0]*PermShkMinNext/self.Rboro
BoroCnstNat = -TranShkMinNext*temp/(1.0-temp)
PatFacTop = (self.DiscFac*self.LivPrb[0]*self.Rsave)**(1.0/self.CRRA)/self.Rsave
PatFacBot = (self.DiscFac*self.LivPrb[0]*self.Rboro)**(1.0/self.CRRA)/self.Rboro
if BoroCnstNat < self.BoroCnstArt:
MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1
else:
MPCmax = 1.0 - WorstIncPrb**(1.0/self.CRRA)*PatFacBot
MPCmin = 1.0 - PatFacTop
# Store the results as attributes of self
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def makeEulerErrorFunc(self,mMax=100,approx_inc_dstn=True):
'''
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncomeDstn
or to use a (temporary) very dense approximation.
SHOULD BE INHERITED FROM ConsIndShockModel
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncomeDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncomeDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
'''
raise NotImplementedError()
def getRfree(self):
'''
Returns an array of size self.AgentCount with self.Rboro or self.Rsave in each entry, based
on whether self.aNrmNow >< 0.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
'''
RfreeNow = self.Rboro*np.ones(self.AgentCount)
RfreeNow[self.aNrmNow > 0] = self.Rsave
return RfreeNow
def checkConditions(self,verbose=False):
'''
This method checks whether the instance's type satisfies the Growth Impatience Condition
(GIC), Return Impatience Condition (RIC), Absolute Impatience Condition (AIC), Weak Return
Impatience Condition (WRIC), Finite Human Wealth Condition (FHWC) and Finite Value of
Autarky Condition (FVAC). These are the conditions that are sufficient for nondegenerate
infinite horizon solutions with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. To check which conditions are
relevant to the model at hand, a reference to the relevant theoretical literature is made.
SHOULD BE INHERITED FROM ConsIndShockModel
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
'''
raise NotImplementedError()
# ==================================================================================
# = Functions for generating discrete income processes and simulated income shocks =
# ==================================================================================
def constructLognormalIncomeProcessUnemployment(parameters):
'''
Generates a list of discrete approximations to the income process for each
life period, from end of life to beginning of life. Permanent shocks are mean
one lognormally distributed with standard deviation PermShkStd[t] during the
working life, and degenerate at 1 in the retirement period. Transitory shocks
are mean one lognormally distributed with a point mass at IncUnemp with
probability UnempPrb while working; they are mean one with a point mass at
IncUnempRet with probability UnempPrbRet. Retirement occurs
after t=T_retire periods of working.
Note 1: All time in this function runs forward, from t=0 to t=T
Note 2: All parameters are passed as attributes of the input parameters.
Parameters (passed as attributes of the input parameters)
----------
PermShkStd : [float]
List of standard deviations in log permanent income uncertainty during
the agent's life.
PermShkCount : int
The number of approximation points to be used in the discrete approxima-
tion to the permanent income shock distribution.
TranShkStd : [float]
List of standard deviations in log transitory income uncertainty during
the agent's life.
TranShkCount : int
The number of approximation points to be used in the discrete approxima-
tion to the permanent income shock distribution.
UnempPrb : float
The probability of becoming unemployed during the working period.
UnempPrbRet : float
The probability of not receiving typical retirement income when retired.
T_retire : int
The index value for the final working period in the agent's life.
If T_retire <= 0 then there is no retirement.
IncUnemp : float
Transitory income received when unemployed.
IncUnempRet : float
Transitory income received while "unemployed" when retired.
T_cycle : int
Total number of non-terminal periods in the consumer's sequence of periods.
Returns
-------
IncomeDstn : [[np.array]]
A list with T_cycle elements, each of which is a list of three arrays
representing a discrete approximation to the income process in a period.
Order: probabilities, permanent shocks, transitory shocks.
PermShkDstn : [[np.array]]
A list with T_cycle elements, each of which is a list of two arrays
representing a discrete approximation to the permanent income shocks.
TranShkDstn : [[np.array]]
A list with T_cycle elements, each of which is a list of two arrays
representing a discrete approximation to the transitory income shocks.
'''
# Unpack the parameters from the input
PermShkStd = parameters.PermShkStd
PermShkCount = parameters.PermShkCount
TranShkStd = parameters.TranShkStd
TranShkCount = parameters.TranShkCount
T_cycle = parameters.T_cycle
T_retire = parameters.T_retire
UnempPrb = parameters.UnempPrb
IncUnemp = parameters.IncUnemp
UnempPrbRet = parameters.UnempPrbRet
IncUnempRet = parameters.IncUnempRet
IncomeDstn = [] # Discrete approximations to income process in each period
PermShkDstn = [] # Discrete approximations to permanent income shocks
TranShkDstn = [] # Discrete approximations to transitory income shocks
# Fill out a simple discrete RV for retirement, with value 1.0 (mean of shocks)
# in normal times; value 0.0 in "unemployment" times with small prob.
if T_retire > 0:
if UnempPrbRet > 0:
PermShkValsRet = np.array([1.0, 1.0]) # Permanent income is deterministic in retirement (2 states for temp income shocks)
TranShkValsRet = np.array([IncUnempRet,
(1.0-UnempPrbRet*IncUnempRet)/(1.0-UnempPrbRet)])
ShkPrbsRet = np.array([UnempPrbRet, 1.0-UnempPrbRet])
else:
PermShkValsRet = np.array([1.0])
TranShkValsRet = np.array([1.0])
ShkPrbsRet = np.array([1.0])
IncomeDstnRet = [ShkPrbsRet,PermShkValsRet,TranShkValsRet]
# Loop to fill in the list of IncomeDstn random variables.
for t in range(T_cycle): # Iterate over all periods, counting forward
if T_retire > 0 and t >= T_retire:
# Then we are in the "retirement period" and add a retirement income object.
IncomeDstn.append(deepcopy(IncomeDstnRet))
PermShkDstn.append([np.array([1.0]),np.array([1.0])])
TranShkDstn.append([ShkPrbsRet,TranShkValsRet])
else:
# We are in the "working life" periods.
TranShkDstn_t = approxMeanOneLognormal(N=TranShkCount, sigma=TranShkStd[t], tail_N=0)
if UnempPrb > 0:
TranShkDstn_t = addDiscreteOutcomeConstantMean(TranShkDstn_t, p=UnempPrb, x=IncUnemp)
PermShkDstn_t = approxMeanOneLognormal(N=PermShkCount, sigma=PermShkStd[t], tail_N=0)
IncomeDstn.append(combineIndepDstns(PermShkDstn_t,TranShkDstn_t)) # mix the independent distributions
PermShkDstn.append(PermShkDstn_t)
TranShkDstn.append(TranShkDstn_t)
return IncomeDstn, PermShkDstn, TranShkDstn
def applyFlatIncomeTax(IncomeDstn,tax_rate,T_retire,unemployed_indices=[],transitory_index=2):
'''
Applies a flat income tax rate to all employed income states during the working
period of life (those before T_retire). Time runs forward in this function.
Parameters
----------
IncomeDstn : [income distributions]
The discrete approximation to the income distribution in each time period.
tax_rate : float
A flat income tax rate to be applied to all employed income.
T_retire : int
The time index after which the agent retires.
unemployed_indices : [int]
Indices of transitory shocks that represent unemployment states (no tax).
transitory_index : int
The index of each element of IncomeDstn representing transitory shocks.
Returns
-------
IncomeDstn_new : [income distributions]
The updated income distributions, after applying the tax.
'''
IncomeDstn_new = deepcopy(IncomeDstn)
i = transitory_index
for t in range(len(IncomeDstn)):
if t < T_retire:
for j in range((IncomeDstn[t][i]).size):
if j not in unemployed_indices:
IncomeDstn_new[t][i][j] = IncomeDstn[t][i][j]*(1-tax_rate)
return IncomeDstn_new
# =======================================================
# ================ Other useful functions ===============
# =======================================================
def constructAssetsGrid(parameters):
'''
Constructs the base grid of post-decision states, representing end-of-period
assets above the absolute minimum.
All parameters are passed as attributes of the single input parameters. The
input can be an instance of a ConsumerType, or a custom Parameters class.
Parameters
----------
aXtraMin: float
Minimum value for the a-grid
aXtraMax: float
Maximum value for the a-grid
aXtraCount: int
Size of the a-grid
aXtraExtra: [float]
Extra values for the a-grid.
exp_nest: int
Level of nesting for the exponentially spaced grid
Returns
-------
aXtraGrid: np.ndarray
Base array of values for the post-decision-state grid.
'''
# Unpack the parameters
aXtraMin = parameters.aXtraMin
aXtraMax = parameters.aXtraMax
aXtraCount = parameters.aXtraCount
aXtraExtra = parameters.aXtraExtra
grid_type = 'exp_mult'
exp_nest = parameters.aXtraNestFac
# Set up post decision state grid:
aXtraGrid = None
if grid_type == "linear":
aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)
elif grid_type == "exp_mult":
aXtraGrid = makeGridExpMult(ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest)
else:
raise Exception("grid_type not recognized in __init__." + \
"Please ensure grid_type is 'linear' or 'exp_mult'")
# Add in additional points for the grid:
for a in aXtraExtra:
if (a is not None):
if a not in aXtraGrid:
j = aXtraGrid.searchsorted(a)
aXtraGrid = np.insert(aXtraGrid, j, a)
return aXtraGrid
####################################################################################################
def main():
import HARK.ConsumptionSaving.ConsumerParameters as Params
from HARK.utilities import plotFuncsDer, plotFuncs
from time import clock
mystr = lambda number : "{:.4f}".format(number)
do_simulation = True
# Make and solve an example perfect foresight consumer
PFexample = PerfForesightConsumerType(**Params.init_perfect_foresight)
PFexample.cycles = 0 # Make this type have an infinite horizon
start_time = clock()
PFexample.solve()
end_time = clock()
print('Solving a perfect foresight consumer took ' + mystr(end_time-start_time) + ' seconds.')
PFexample.unpackcFunc()
PFexample.timeFwd()
# Plot the perfect foresight consumption function
print('Linear consumption function:')
mMin = PFexample.solution[0].mNrmMin
plotFuncs(PFexample.cFunc[0],mMin,mMin+10)
if do_simulation:
PFexample.T_sim = 120 # Set number of simulation periods
PFexample.track_vars = ['mNrmNow']
PFexample.initializeSim()
PFexample.simulate()
###############################################################################
# Make and solve an example consumer with idiosyncratic income shocks
IndShockExample = IndShockConsumerType(**Params.init_idiosyncratic_shocks)
IndShockExample.cycles = 0 # Make this type have an infinite horizon
start_time = clock()
IndShockExample.solve()
end_time = clock()
print('Solving a consumer with idiosyncratic shocks took ' + mystr(end_time-start_time) + ' seconds.')
IndShockExample.unpackcFunc()
IndShockExample.timeFwd()
# Plot the consumption function and MPC for the infinite horizon consumer
print('Concave consumption function:')
plotFuncs(IndShockExample.cFunc[0],IndShockExample.solution[0].mNrmMin,5)
print('Marginal consumption function:')
plotFuncsDer(IndShockExample.cFunc[0],IndShockExample.solution[0].mNrmMin,5)
# Compare the consumption functions for the perfect foresight and idiosyncratic
# shock types. Risky income cFunc asymptotically approaches perfect foresight cFunc.
print('Consumption functions for perfect foresight vs idiosyncratic shocks:')
plotFuncs([PFexample.cFunc[0],IndShockExample.cFunc[0]],IndShockExample.solution[0].mNrmMin,100)
# Compare the value functions for the two types
if IndShockExample.vFuncBool:
print('Value functions for perfect foresight vs idiosyncratic shocks:')
plotFuncs([PFexample.solution[0].vFunc,IndShockExample.solution[0].vFunc],
IndShockExample.solution[0].mNrmMin+0.5,10)
# Simulate some data; results stored in mNrmNow_hist, cNrmNow_hist, and pLvlNow_hist
if do_simulation:
IndShockExample.T_sim = 120
IndShockExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow']
IndShockExample.makeShockHistory() # This is optional, simulation will draw shocks on the fly if it isn't run.
IndShockExample.initializeSim()
IndShockExample.simulate()
###########################################################################
# Make and solve an idiosyncratic shocks consumer with a finite lifecycle
LifecycleExample = IndShockConsumerType(**Params.init_lifecycle)
LifecycleExample.cycles = 1 # Make this consumer live a sequence of periods exactly once
start_time = clock()
LifecycleExample.solve()
end_time = clock()
print('Solving a lifecycle consumer took ' + mystr(end_time-start_time) + ' seconds.')
LifecycleExample.unpackcFunc()
LifecycleExample.timeFwd()
# Plot the consumption functions during working life
print('Consumption functions while working:')
mMin = min([LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)])
plotFuncs(LifecycleExample.cFunc[:LifecycleExample.T_retire],mMin,5)
# Plot the consumption functions during retirement
print('Consumption functions while retired:')
plotFuncs(LifecycleExample.cFunc[LifecycleExample.T_retire:],0,5)
LifecycleExample.timeRev()
# Simulate some data; results stored in mNrmNow_hist, cNrmNow_hist, pLvlNow_hist, and t_age_hist
if do_simulation:
LifecycleExample.T_sim = 120
LifecycleExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow','t_age']
LifecycleExample.initializeSim()
LifecycleExample.simulate()
###############################################################################
# Make and solve a "cyclical" consumer type who lives the same four quarters repeatedly.
# The consumer has income that greatly fluctuates throughout the year.
CyclicalExample = IndShockConsumerType(**Params.init_cyclical)
CyclicalExample.cycles = 0
start_time = clock()
CyclicalExample.solve()
end_time = clock()
print('Solving a cyclical consumer took ' + mystr(end_time-start_time) + ' seconds.')
CyclicalExample.unpackcFunc()
CyclicalExample.timeFwd()
# Plot the consumption functions for the cyclical consumer type
print('Quarterly consumption functions:')
mMin = min([X.mNrmMin for X in CyclicalExample.solution])
plotFuncs(CyclicalExample.cFunc,mMin,5)
# Simulate some data; results stored in cHist, mHist, bHist, aHist, MPChist, and pHist
if do_simulation:
CyclicalExample.T_sim = 480
CyclicalExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow','t_cycle']
CyclicalExample.initializeSim()
CyclicalExample.simulate()
###############################################################################
# Make and solve an agent with a kinky interest rate
KinkyExample = KinkedRconsumerType(**Params.init_kinked_R)
KinkyExample.cycles = 0 # Make the Example infinite horizon
start_time = clock()
KinkyExample.solve()
end_time = clock()
print('Solving a kinky consumer took ' + mystr(end_time-start_time) + ' seconds.')
KinkyExample.unpackcFunc()
print('Kinky consumption function:')
KinkyExample.timeFwd()
plotFuncs(KinkyExample.cFunc[0],KinkyExample.solution[0].mNrmMin,5)
if do_simulation:
KinkyExample.T_sim = 120
KinkyExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow']
KinkyExample.initializeSim()
KinkyExample.simulate()
if __name__ == '__main__':
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .domain_service import *
from .get_domain_service import *
from .get_ou_container import *
from .ou_container import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.aad.v20170101 as v20170101
import pulumi_azure_native.aad.v20170601 as v20170601
import pulumi_azure_native.aad.v20200101 as v20200101
import pulumi_azure_native.aad.v20210301 as v20210301
else:
v20170101 = _utilities.lazy_import('pulumi_azure_native.aad.v20170101')
v20170601 = _utilities.lazy_import('pulumi_azure_native.aad.v20170601')
v20200101 = _utilities.lazy_import('pulumi_azure_native.aad.v20200101')
v20210301 = _utilities.lazy_import('pulumi_azure_native.aad.v20210301')
|
from ..db import engine, BaseModel
from ..models import *
async def db_startup():
async with engine.begin() as conn:
await conn.run_sync(BaseModel.metadata.create_all)
async def db_shutdown():
await engine.dispose()
|
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""execution script."""
import code
import argparse
import os, warnings
import time
import pandas as pd
import pickle
import torch
import utils.exp_utils as utils
from evaluator import Evaluator
from predictor import Predictor
from plotting import plot_batch_prediction
for msg in ["Attempting to set identical bottom==top results",
"This figure includes Axes that are not compatible with tight_layout",
"Data has no positive values, and therefore cannot be log-scaled.",
".*invalid value encountered in double_scalars.*",
".*Mean of empty slice.*"]:
warnings.filterwarnings("ignore", msg)
def train(logger):
"""
perform the training routine for a given fold. saves plots and selected parameters to the experiment dir
specified in the configs.
"""
logger.info('performing training in {}D over fold {} on experiment {} with model {}'.format(
cf.dim, cf.fold, cf.exp_dir, cf.model))
net = model.net(cf, logger).cuda()
if hasattr(cf, "optimizer") and cf.optimizer.lower() == "adam":
logger.info("Using Adam optimizer.")
optimizer = torch.optim.Adam(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay,
exclude_from_wd=cf.exclude_from_wd),
lr=cf.learning_rate[0])
else:
logger.info("Using AdamW optimizer.")
optimizer = torch.optim.AdamW(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay,
exclude_from_wd=cf.exclude_from_wd),
lr=cf.learning_rate[0])
if cf.dynamic_lr_scheduling:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=cf.scheduling_mode, factor=cf.lr_decay_factor,
patience=cf.scheduling_patience)
model_selector = utils.ModelSelector(cf, logger)
train_evaluator = Evaluator(cf, logger, mode='train')
val_evaluator = Evaluator(cf, logger, mode=cf.val_mode)
starting_epoch = 1
# prepare monitoring
monitor_metrics = utils.prepare_monitoring(cf)
if cf.resume:
checkpoint_path = os.path.join(cf.fold_dir, "last_checkpoint")
starting_epoch, net, optimizer, monitor_metrics = \
utils.load_checkpoint(checkpoint_path, net, optimizer)
logger.info('resumed from checkpoint {} to epoch {}'.format(checkpoint_path, starting_epoch))
####### Use this to create hdf5
logger.info('loading dataset and initializing batch generators...')
print ("Start data loading...",time.time())
batch_gen = data_loader.get_train_generators(cf, logger)
print ("Finished batch gen data loading...",time.time())
####### Writing out train data to file
#train_data = dict()
#print ('Write training data to json')
#for bix in range(cf.num_train_batches):
# batch = next(batch_gen['train'])
# train_data.update(batch)
#with open('train_data.json', 'w') as outfile:
# json.dump(train_data, outfile)
#####################################
for epoch in range(starting_epoch, cf.num_epochs + 1):
logger.info('starting training epoch {}'.format(epoch))
start_time = time.time()
net.train()
train_results_list = []
for bix in range(cf.num_train_batches):
######### Insert call to grab right training data fold from hdf5
print ("Grab next batch from batch gen data loader ...",time.time())
##Stalled
batch = next(batch_gen['train']) ######## Instead of this line, grab a batch from training data fold
tic_fw = time.time()
print ("Start forward pass...",time.time())
results_dict = net.train_forward(batch)
tic_bw = time.time()
optimizer.zero_grad()
print ("Start backward pass..",time.time())
results_dict['torch_loss'].backward()
print ("Start optimizing...",time.time())
optimizer.step()
print('\rtr. batch {0}/{1} (ep. {2}) fw {3:.2f}s / bw {4:.2f} s / total {5:.2f} s || '.format(
bix + 1, cf.num_train_batches, epoch, tic_bw - tic_fw, time.time() - tic_bw,
time.time() - tic_fw) + results_dict['logger_string'], flush=True, end="")
train_results_list.append(({k:v for k,v in results_dict.items() if k != "seg_preds"}, batch["pid"]))
print()
_, monitor_metrics['train'] = train_evaluator.evaluate_predictions(train_results_list, monitor_metrics['train'])
logger.info('generating training example plot.')
utils.split_off_process(plot_batch_prediction, batch, results_dict, cf, outfile=os.path.join(
cf.plot_dir, 'pred_example_{}_train.png'.format(cf.fold)))
train_time = time.time() - start_time
logger.info('starting validation in mode {}.'.format(cf.val_mode))
with torch.no_grad():
net.eval()
if cf.do_validation:
val_results_list = []
val_predictor = Predictor(cf, net, logger, mode='val')
for _ in range(batch_gen['n_val']):
########## Insert call to grab right validation data fold from hdf5
batch = next(batch_gen[cf.val_mode])
if cf.val_mode == 'val_patient':
results_dict = val_predictor.predict_patient(batch)
elif cf.val_mode == 'val_sampling':
results_dict = net.train_forward(batch, is_validation=True)
#val_results_list.append([results_dict['boxes'], batch['pid']])
val_results_list.append(({k:v for k,v in results_dict.items() if k != "seg_preds"}, batch["pid"]))
_, monitor_metrics['val'] = val_evaluator.evaluate_predictions(val_results_list, monitor_metrics['val'])
model_selector.run_model_selection(net, optimizer, monitor_metrics, epoch)
# update monitoring and prediction plots
monitor_metrics.update({"lr":
{str(g): group['lr'] for (g, group) in enumerate(optimizer.param_groups)}})
logger.metrics2tboard(monitor_metrics, global_step=epoch)
epoch_time = time.time() - start_time
logger.info('trained epoch {}: took {} ({} train / {} val)'.format(
epoch, utils.get_formatted_duration(epoch_time, "ms"), utils.get_formatted_duration(train_time, "ms"),
utils.get_formatted_duration(epoch_time-train_time, "ms")))
########### Insert call to grab right validation data fold from hdf5
batch = next(batch_gen['val_sampling'])
results_dict = net.train_forward(batch, is_validation=True)
logger.info('generating validation-sampling example plot.')
utils.split_off_process(plot_batch_prediction, batch, results_dict, cf, outfile=os.path.join(
cf.plot_dir, 'pred_example_{}_val.png'.format(cf.fold)))
# -------------- scheduling -----------------
if cf.dynamic_lr_scheduling:
scheduler.step(monitor_metrics["val"][cf.scheduling_criterion][-1])
else:
for param_group in optimizer.param_groups:
param_group['lr'] = cf.learning_rate[epoch-1]
def test(logger):
"""
perform testing for a given fold (or hold out set). save stats in evaluator.
"""
logger.info('starting testing model of fold {} in exp {}'.format(cf.fold, cf.exp_dir))
net = model.net(cf, logger).cuda()
test_predictor = Predictor(cf, net, logger, mode='test')
test_evaluator = Evaluator(cf, logger, mode='test')
################ Insert call to grab right test data (fold?) from hdf5
batch_gen = data_loader.get_test_generator(cf, logger)
####code.interact(local=locals())
test_results_list = test_predictor.predict_test_set(batch_gen, return_results=True)
test_evaluator.evaluate_predictions(test_results_list)
test_evaluator.score_test_df()
if __name__ == '__main__':
stime = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, default='train_test',
help='one out of: train / test / train_test / analysis / create_exp')
parser.add_argument('-f','--folds', nargs='+', type=int, default=None,
help='None runs over all folds in CV. otherwise specify list of folds.')
parser.add_argument('--exp_dir', type=str, default='/path/to/experiment/directory',
help='path to experiment dir. will be created if non existent.')
parser.add_argument('--server_env', default=False, action='store_true',
help='change IO settings to deploy models on a cluster.')
parser.add_argument('--data_dest', type=str, default=None, help="path to final data folder if different from config.")
parser.add_argument('--use_stored_settings', default=False, action='store_true',
help='load configs from existing exp_dir instead of source dir. always done for testing, '
'but can be set to true to do the same for training. useful in job scheduler environment, '
'where source code might change before the job actually runs.')
parser.add_argument('--resume', action="store_true", default=False,
help='if given, resume from checkpoint(s) of the specified folds.')
parser.add_argument('--exp_source', type=str, default='experiments/toy_exp',
help='specifies, from which source experiment to load configs and data_loader.')
parser.add_argument('--no_benchmark', action='store_true', help="Do not use cudnn.benchmark.")
parser.add_argument('--cuda_device', type=int, default=0, help="Index of CUDA device to use.")
parser.add_argument('-d', '--dev', default=False, action='store_true', help="development mode: shorten everything")
args = parser.parse_args()
folds = args.folds
torch.backends.cudnn.benchmark = not args.no_benchmark
########### Creating hdf5
#if args.mode = 'create_hdf5':
# if folds is None:
# folds = range(cf.n_cv_splits)
# for fold in folds:
# create_hdf_foldwise_with_batch_generator_for_train/val/test
if args.mode == 'train' or args.mode == 'train_test':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, args.use_stored_settings)
if args.dev:
folds = [0,1]
cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim==2 else 1, 1, 0, 2
cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1
cf.test_n_epochs = cf.save_n_models
cf.max_test_patients = 2
cf.data_dest = args.data_dest
logger = utils.get_logger(cf.exp_dir, cf.server_env)
logger.info("cudnn benchmark: {}, deterministic: {}.".format(torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic))
logger.info("sending tensors to CUDA device: {}.".format(torch.cuda.get_device_name(args.cuda_device)))
data_loader = utils.import_module('dl', os.path.join(args.exp_source, 'data_loader.py'))
model = utils.import_module('model', cf.model_path)
logger.info("loaded model from {}".format(cf.model_path))
if folds is None:
folds = range(cf.n_cv_splits)
with torch.cuda.device(args.cuda_device):
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
cf.resume = args.resume
if not os.path.exists(cf.fold_dir):
os.mkdir(cf.fold_dir)
logger.set_logfile(fold=fold)
train(logger)
cf.resume = False
if args.mode == 'train_test':
test(logger)
#Concatenate test results by detection
if cf.hold_out_test_set == False:
test_frames = [pd.read_pickle(os.path.join(cf.test_dir,f)) for f in os.listdir(cf.test_dir) if '_test_df.pickle' in f]
all_preds = pd.concat(test_frames)
all_preds.to_csv(os.path.join(cf.test_dir,"all_folds_test.csv"))
#Concatenate detection raw boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'raw_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_raw_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
#Concatenate detection wbc boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'wbc_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_wbc_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
elif args.mode == 'test':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, is_training=False, use_stored_settings=True)
if args.dev:
folds = [0,1]
cf.test_n_epochs = 2; cf.max_test_patients = 2
cf.data_dest = args.data_dest
logger = utils.get_logger(cf.exp_dir, cf.server_env)
data_loader = utils.import_module('dl', os.path.join(args.exp_source, 'data_loader.py'))
model = utils.import_module('model', cf.model_path)
logger.info("loaded model from {}".format(cf.model_path))
if folds is None:
folds = range(cf.n_cv_splits)
with torch.cuda.device(args.cuda_device):
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
logger.set_logfile(fold=fold)
test(logger)
if cf.hold_out_test_set == False:
test_frames = [pd.read_pickle(os.path.join(cf.test_dir,f)) for f in os.listdir(cf.test_dir) if '_test_df.pickle' in f]
all_preds = pd.concat(test_frames)
all_preds.to_csv(os.path.join(cf.test_dir,"all_folds_test.csv"))
#Concatenate detection raw boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'raw_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_raw_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
#Concatenate detection wbc boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'wbc_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_wbc_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
# load raw predictions saved by predictor during testing, run aggregation algorithms and evaluation.
elif args.mode == 'analysis':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, is_training=False, use_stored_settings=True)
logger = utils.get_logger(cf.exp_dir, cf.server_env)
if args.dev:
cf.test_n_epochs = 2
if cf.hold_out_test_set and cf.ensemble_folds:
# create and save (unevaluated) predictions across all folds
predictor = Predictor(cf, net=None, logger=logger, mode='analysis')
results_list = predictor.load_saved_predictions(apply_wbc=True)
utils.create_csv_output([(res_dict["boxes"], pid) for res_dict, pid in results_list], cf, logger)
logger.info('starting evaluation...')
cf.fold = 'overall_hold_out'
evaluator = Evaluator(cf, logger, mode='test')
evaluator.evaluate_predictions(results_list)
evaluator.score_test_df()
else:
fold_dirs = sorted([os.path.join(cf.exp_dir, f) for f in os.listdir(cf.exp_dir) if
os.path.isdir(os.path.join(cf.exp_dir, f)) and f.startswith("fold")])
if folds is None:
folds = range(cf.n_cv_splits)
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
logger.set_logfile(fold=fold)
if cf.fold_dir in fold_dirs:
predictor = Predictor(cf, net=None, logger=logger, mode='analysis')
results_list = predictor.load_saved_predictions(apply_wbc=True)
logger.info('starting evaluation...')
evaluator = Evaluator(cf, logger, mode='test')
evaluator.evaluate_predictions(results_list)
evaluator.score_test_df()
else:
logger.info("Skipping fold {} since no model parameters found.".format(fold))
# create experiment folder and copy scripts without starting job.
# useful for cloud deployment where configs might change before job actually runs.
elif args.mode == 'create_exp':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, use_stored_settings=False)
logger = utils.get_logger(cf.exp_dir)
logger.info('created experiment directory at {}'.format(cf.exp_dir))
else:
raise RuntimeError('mode specified in args is not implemented...')
t = utils.get_formatted_duration(time.time() - stime)
logger.info("{} total runtime: {}".format(os.path.split(__file__)[1], t))
del logger
|
# coding: utf-8
from uuid import uuid4
import hashlib
import re
import unicodedata
import urllib
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
import flask
import config
###############################################################################
# Request Parameters
###############################################################################
def param(name, cast=None):
value = None
if flask.request.json:
return flask.request.json.get(name, None)
if value is None:
value = flask.request.args.get(name, None)
if value is None and flask.request.form:
value = flask.request.form.get(name, None)
if cast and value is not None:
if cast is bool:
return value.lower() in ['true', 'yes', 'y', '1', '']
if cast is list:
return value.split(',') if len(value) > 0 else []
if cast is ndb.Key:
return ndb.Key(urlsafe=value)
return cast(value)
return value
def get_next_url(next_url=''):
next_url = next_url or param('next') or param('next_url')
do_not_redirect_urls = [flask.url_for(u) for u in [
'signin', 'signup', 'user_forgot', 'user_reset',
]]
if next_url:
if any(url in next_url for url in do_not_redirect_urls):
return flask.url_for('welcome')
return next_url
referrer = flask.request.referrer
if referrer and referrer.startswith(flask.request.host_url):
return referrer
return flask.url_for('welcome')
###############################################################################
# Model manipulations
###############################################################################
def get_dbs(
query, order=None, limit=None, cursor=None, keys_only=None, **filters
):
limit = limit or config.DEFAULT_DB_LIMIT
cursor = Cursor.from_websafe_string(cursor) if cursor else None
model_class = ndb.Model._kind_map[query.kind]
if order:
for o in order.split(','):
if o.startswith('-'):
query = query.order(-model_class._properties[o[1:]])
else:
query = query.order(model_class._properties[o])
for prop in filters:
if filters.get(prop, None) is None:
continue
if isinstance(filters[prop], list):
for value in filters[prop]:
query = query.filter(model_class._properties[prop] == value)
else:
query = query.filter(model_class._properties[prop] == filters[prop])
model_dbs, next_cursor, more = query.fetch_page(
limit, start_cursor=cursor, keys_only=keys_only,
)
next_cursor = next_cursor.to_websafe_string() if more else None
return list(model_dbs), next_cursor
def get_keys(*arg, **kwargs):
return get_dbs(*arg, keys_only=True, **kwargs)
###############################################################################
# JSON Response Helpers
###############################################################################
def jsonpify(*args, **kwargs):
if param('callback'):
content = '%s(%s)' % (
param('callback'), flask.jsonify(*args, **kwargs).data,
)
mimetype = 'application/javascript'
return flask.current_app.response_class(content, mimetype=mimetype)
return flask.jsonify(*args, **kwargs)
###############################################################################
# Helpers
###############################################################################
def is_iterable(value):
return isinstance(value, (tuple, list))
def check_form_fields(*fields):
fields_data = []
for field in fields:
if is_iterable(field):
fields_data.extend([field.data for field in field])
else:
fields_data.append(field.data)
return all(fields_data)
def generate_next_url(next_cursor, base_url=None, cursor_name='cursor'):
if not next_cursor:
return None
base_url = base_url or flask.request.base_url
args = flask.request.args.to_dict()
args[cursor_name] = next_cursor
return '%s?%s' % (base_url, urllib.urlencode(args))
def uuid():
return uuid4().hex
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def slugify(text):
if not isinstance(text, unicode):
text = unicode(text)
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
text = unicode(_slugify_strip_re.sub('', text).strip().lower())
return _slugify_hyphenate_re.sub('-', text)
_username_re = re.compile(r'^[a-z0-9]+(?:[\.][a-z0-9]+)*$')
def is_valid_username(username):
return bool(_username_re.match(username))
def create_name_from_email(email):
return re.sub(r'_+|-+|\.+|\++', ' ', email.split('@')[0]).title()
def password_hash(user_db, password):
m = hashlib.sha256()
m.update(user_db.key.urlsafe())
m.update(user_db.created.isoformat())
m.update(m.hexdigest())
m.update(password.encode('utf-8'))
m.update(config.CONFIG_DB.salt)
return m.hexdigest()
def update_query_argument(name, value=None, ignore='cursor', is_list=False):
ignore = ignore.split(',') if isinstance(ignore, str) else ignore or []
arguments = {}
for key, val in flask.request.args.items():
if key not in ignore and (is_list and value is not None or key != name):
arguments[key] = val
if value is not None:
if is_list:
values = []
if name in arguments:
values = arguments[name].split(',')
del arguments[name]
if value in values:
values.remove(value)
else:
values.append(value)
if values:
arguments[name] = ','.join(values)
else:
arguments[name] = value
query = '&'.join('%s=%s' % item for item in sorted(arguments.items()))
return '%s%s' % (flask.request.path, '?%s' % query if query else '')
###############################################################################
# Lambdas
###############################################################################
strip_filter = lambda x: x.strip() if x else ''
email_filter = lambda x: x.lower().strip() if x else ''
sort_filter = lambda x: sorted(x) if x else []
|
#!/usr/bin/env python
"""Tests for `sktools` package."""
import unittest
import sktools
import pandas as pd
from category_encoders import MEstimateEncoder
import numpy as np
class TestQuantileEncoder(unittest.TestCase):
"""Tests for percentile encoder."""
def setUp(self):
"""Create dataframe with categories and a target variable"""
self.df = pd.DataFrame(
{"categories": ["a", "b", "c", "a", "b", "c", "a", "b"]}
)
self.target = np.array([1, 2, 0, 4, 5, 0, 6, 7])
def test_median_works(self):
"""
Expected output of percentile 50 in df:
- a median is 4 (a values are 1, 4, 6)
- b median is 5 (b values are 2, 5, 7)
- c median is 0 (c values are 0)
"""
expected_output_median = pd.DataFrame(
{"categories": [4.0, 5, 0, 4, 5, 0, 4, 5]}
)
pd.testing.assert_frame_equal(
sktools.QuantileEncoder(quantile=0.5, m=0.0).fit_transform(
self.df, self.target
),
expected_output_median,
)
def test_max_works(self):
"""
Expected output of percentile 100 in df:
- a max is 6
- b max is 7
- c max is 0
"""
expected_output_max = pd.DataFrame(
{"categories": [6.0, 7, 0, 6, 7, 0, 6, 7]}
)
pd.testing.assert_frame_equal(
sktools.QuantileEncoder(quantile=1, m=0.0).fit_transform(
self.df, self.target
),
expected_output_max,
)
def test_new_category(self):
"""
The global median of the target is 3. If new categories are passed to
the transformer, then the output should be 3
"""
transformer_median = sktools.QuantileEncoder(quantile=0.5, m=0.0)
transformer_median.fit(self.df, self.target)
new_df = pd.DataFrame({"categories": ["d", "e"]})
new_medians = pd.DataFrame({"categories": [3.0, 3.0]})
pd.testing.assert_frame_equal(
transformer_median.transform(new_df), new_medians
)
class TestNestedTargetEncoder(unittest.TestCase):
"""Tests for nested target encoder."""
def setUp(self):
"""Create dataframe with categories and a target variable"""
self.col = "col_1"
self.parent_col = "parent_col_1"
self.X = pd.DataFrame(
{
self.col: ["a", "a", "b", "b", "b", "c", "c", "d", "d", "d"],
self.parent_col: [
"e",
"e",
"e",
"e",
"e",
"f",
"f",
"f",
"f",
"f",
],
}
)
self.X_array = pd.DataFrame(
{
self.col: ["a", "a", "b", "b", "b", "c", "c", "d", "d", "d"],
self.parent_col: [
"e",
"e",
"e",
"e",
"e",
"f",
"f",
"f",
"f",
"f",
],
}
).values
self.y = pd.Series([1, 2, 3, 1, 2, 4, 4, 5, 4, 4.5])
self.parent_means = list(self.y.groupby(self.X[self.parent_col]).mean())
self.parents = ["e", "f"]
def test_parent_prior(self):
"""
Simple case:
There is no prior from the global to the group mean (m_prior = 0).
As the m_parent is 1, the mean for group a is (as mean_group_e = 1.8):
(1 + 2 + mean_group_e ) / 3 = (1 + 2 + 1.8) / 3 = 1.6
The same works for b, c and d
"""
expected_output = pd.DataFrame(
dict(
col_1=[1.6, 1.6, 1.95, 1.95, 1.95, 4.1, 4.1, 4.45, 4.45, 4.45],
parent_col_1=self.X[self.parent_col],
)
)
te = sktools.NestedTargetEncoder(
cols=self.col,
feature_mapping=dict(col_1=self.parent_col),
m_prior=0,
)
pd.testing.assert_frame_equal(
te.fit_transform(self.X, self.y), expected_output
)
def test_numpy_array(self):
"""
Check that nested target encoder also works for numpy arrays
"""
expected_output = pd.DataFrame(
dict(
col_1=[1.6, 1.6, 1.95, 1.95, 1.95, 4.1, 4.1, 4.45, 4.45, 4.45],
parent_col_1=self.X[self.parent_col],
)
).values
te = sktools.NestedTargetEncoder(
cols=0, feature_mapping={0: 1}, m_prior=0
)
te.fit(self.X_array, self.y)
output = te.transform(self.X_array).values
np.testing.assert_almost_equal(output[:, 0], expected_output[:, 0])
np.testing.assert_equal(output[:, 1], expected_output[:, 1])
def test_no_parent(self):
"""
When using no priors, the functionalities should be the same as for
m estimator.
"""
te = sktools.NestedTargetEncoder(
cols=self.col,
feature_mapping=dict(col_1=self.parent_col),
m_prior=0,
m_parent=0,
)
m_te = MEstimateEncoder(cols=self.col, m=0)
pd.testing.assert_frame_equal(
te.fit_transform(self.X, self.y), m_te.fit_transform(self.X, self.y)
)
def test_unknown_missing_imputation(self):
"""
When new categories and unknown values are given, we expect the encoder
to give the parent means (at least with default configuration).
"""
# First two rows are new categories
# Last two rows are missing values
# Parents are e, f, e, f
new_x = pd.DataFrame(
{
self.col: ["x", "y", np.NaN, np.NaN],
self.parent_col: self.parents + self.parents,
}
)
# We expect to get parent means
expected_output_df = pd.DataFrame(
{
self.col: self.parent_means + self.parent_means,
self.parent_col: self.parents + self.parents,
}
)
te = sktools.NestedTargetEncoder(
cols=self.col,
feature_mapping=dict(col_1=self.parent_col),
m_prior=0,
)
te.fit(self.X, self.y)
pd.testing.assert_frame_equal(te.transform(new_x), expected_output_df)
def test_missing_na(self):
"""
When new categories and unknown values are given, we expect the encoder
to give the parent means. If we specify return_nan, we want it to
return nan
"""
# First two rows are new categories
# Last two rows are missing values
# Parents are e, f, e, f
new_x = pd.DataFrame(
{
self.col: ["x", "y", np.nan, np.nan],
self.parent_col: self.parents + self.parents,
}
)
# In the transformer we specify unknown -> return nan
# We expect to get:
# - nan for the unknown
# - parent means for the missing
expected_output_df = pd.DataFrame(
{
self.col: [np.nan, np.nan] + self.parent_means,
self.parent_col: self.parents + self.parents,
}
)
te = sktools.NestedTargetEncoder(
cols=self.col,
feature_mapping=dict(col_1=self.parent_col),
m_prior=0,
handle_missing="value",
handle_unknown="return_nan",
)
te.fit(self.X, self.y)
pd.testing.assert_frame_equal(te.transform(new_x), expected_output_df)
def test_all_missing(self):
"""
If everything's missing or unknow , we expect by default to return
global mean
"""
new_x = pd.DataFrame(
{
self.col: ["x", np.nan, "x", np.nan],
self.parent_col: ["z", "z", np.nan, np.nan],
}
)
te = sktools.NestedTargetEncoder(
cols=self.col,
feature_mapping=dict(col_1=self.parent_col),
m_prior=0,
)
te.fit(self.X, self.y)
expected_output_df = pd.DataFrame(
{
self.col: self.y.mean(),
self.parent_col: ["z", "z", np.nan, np.nan],
}
)
pd.testing.assert_frame_equal(te.transform(new_x), expected_output_df)
class TestSummaryEncoder(unittest.TestCase):
"""Tests for percentile encoder."""
def setUp(self):
"""Create dataframe with categories and a target variable"""
self.df = pd.DataFrame(
{"categories": ["a", "b", "c", "a", "b", "c", "a", "b"]}
)
self.target = np.array([1, 2, 0, 4, 5, 0, 6, 7])
self.col = 'categories'
def assert_same_quantile(self, quantile):
quantile_results = sktools.QuantileEncoder(
cols=[self.col],
quantile=quantile
).fit_transform(self.df, self.target)
summary_results = sktools.SummaryEncoder(
cols=[self.col],
quantiles=[quantile]
).fit_transform(self.df, self.target)
percentile = round(quantile * 100)
np.testing.assert_allclose(
quantile_results[self.col].values,
summary_results[f"{self.col}_{percentile}"].values
)
def test_several_quantiles(self):
for quantile in [0.1, 0.5, 0.9]:
self.assert_same_quantile(quantile)
def test_several_quantiles(self):
quantile_list = [0.2, 0.1, 0.8]
summary_results = sktools.SummaryEncoder(
cols=[self.col],
quantiles=quantile_list
).fit_transform(self.df, self.target)
for quantile in quantile_list:
quantile_results = sktools.QuantileEncoder(
cols=[self.col],
quantile=quantile
).fit_transform(self.df, self.target)
percentile = round(quantile * 100)
np.testing.assert_allclose(
quantile_results[self.col].values,
summary_results[f"{self.col}_{percentile}"].values
)
|
import dataclasses
import json
from glob import glob
import os
from multiprocessing import Pool
from typing import Tuple, List
import music21 as m21
from datatypes import Chord
def process(path):
out_name = "preprocessed-json/" + os.path.basename(path) + ".json"
if os.path.exists(out_name):
return
print(path)
try:
notes, chords, bars, duration = load_wikifonia_score(path)
except Exception as e:
print("Error processing %s: %s" % (path, e))
return
out_name = "preprocessed-json/" + os.path.basename(path) + ".json"
try:
with open(out_name, "w") as f:
json.dump(
{
"notes": notes,
"chords": [(t, dataclasses.asdict(c)) for t, c in chords],
"bars": bars,
"duration": duration,
},
f,
)
except Exception as e:
try:
os.unlink(out_name)
except:
pass
print("Failed to save %s: %s" % (out_name, e))
def load_wikifonia_score(
path: str,
) -> Tuple[
List[Tuple[float, int]], List[Tuple[float, Chord]], List[Tuple[float, int]], float
]:
s = m21.converter.parse(path)
if len(s.parts) != 1:
print(f"Wikifonia {path} has {len(s.parts)} parts")
part = s.parts[0]
return process_m21_part(part)
def process_m21_part(
part,
) -> Tuple[
List[Tuple[float, int]], List[Tuple[float, Chord]], List[Tuple[float, int]], float
]:
notes: List[Tuple[float, int]] = []
chords: List[Tuple[float, Chord]] = []
bars: List[Tuple[float, int]] = []
measures = part.getElementsByClass(m21.stream.Measure)
duration = 0
for measure in measures:
bars.append((measure.offset, measure.number))
offsets = measure.offsetMap()
for offset in offsets:
t = float(offset.offset + measure.offset)
el = offset.element
if isinstance(el, m21.harmony.ChordSymbol):
chords.append((t, Chord.from_m21_chord(el)))
elif isinstance(el, m21.note.Note):
ties_prev = (
el.tie is not None
and el.tie.type == "stop"
and el.pitch.midi == notes[-1][1]
)
if len(notes) > 0:
if notes[-1][0] >= t:
raise Exception("Notes are not sorted")
if not ties_prev and not el.duration.isGrace:
notes.append((t, el.pitch.midi))
duration = t + offset.endTime - offset.offset
return notes, chords, bars, duration
def main():
paths = list(sorted(glob("Wikifonia/*.mxl")))
with Pool(32) as p:
p.map(process, paths)
if __name__ == "__main__":
main()
|
from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina-executors",
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors for Jina",
url="https://github.com/jina-ai/executors",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
packages=find_packages(where='.', include=['jinahub.*']),
python_requires=">=3.7",
)
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profilePic = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self):
super().save()
img = Image.open(self.profilePic.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.profilePic.path)
|
# Taken from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/theano_utils.py
import numpy as np
import theano
def intX(X):
return np.asarray(X, dtype=np.int32)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def sharedX(X, dtype=theano.config.floatX, name=None):
return theano.shared(np.asarray(X, dtype=dtype), name=name, borrow=True)
def shared0s(shape, dtype=theano.config.floatX, name=None):
return sharedX(np.zeros(shape), dtype=dtype, name=name)
def sharedNs(shape, n, dtype=theano.config.floatX, name=None):
return sharedX(np.ones(shape)*n, dtype=dtype, name=name)
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'other@gmail.com',
'testpass'
)
Ingredient.objects.create(user=user2, name="Vinegar")
Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], 'Tumeric')
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
from __future__ import absolute_import, division, print_function
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from future.utils import native_str
from io import StringIO
import pandas as pd
# pylint: disable=import-error
from .base import ServerBase, BiomartException, DEFAULT_SCHEMA
# pylint: enable=import-error
# xml
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
class Dataset(ServerBase):
"""Class representing a biomart dataset.
This class is responsible for handling queries to biomart
datasets. Queries can select a subset of attributes and can be filtered
using any available filters. A list of valid attributes is available in
the attributes property. If no attributes are given, a set of default
attributes is used. A list of valid filters is available in the filters
property. The type of value that can be specified for a given filter
depends on the filter as some filters accept single values, whilst others
can take lists of values.
Args:
name (str): Id of the dataset.
display_name (str): Display name of the dataset.
host (str): Url of host to connect to.
path (str): Path on the host to access to the biomart service.
port (int): Port to use for the connection.
use_cache (bool): Whether to cache requests.
virtual_schema (str): The virtual schema of the dataset.
Examples:
Directly connecting to a dataset:
>>> dataset = Dataset(name='hsapiens_gene_ensembl',
>>> host='http://www.ensembl.org')
Querying the dataset:
>>> dataset.query(attributes=['ensembl_gene_id',
>>> 'external_gene_name'],
>>> filters={'chromosome_name': ['1','2']})
Listing available attributes:
>>> dataset.attributes
>>> dataset.list_attributes()
Listing available filters:
>>> dataset.filters
>>> dataset.list_filters()
"""
def __init__(self,
name,
display_name='',
host=None,
path=None,
port=None,
use_cache=True,
virtual_schema=DEFAULT_SCHEMA):
super().__init__(host=host, path=path, port=port, use_cache=use_cache)
self._name = name
self._display_name = display_name
self._virtual_schema = virtual_schema
self._filters = None
self._attributes = None
self._default_attributes = None
@property
def name(self):
"""Name of the dataset (used as dataset id)."""
return self._name
@property
def display_name(self):
"""Display name of the dataset."""
return self._display_name
@property
def filters(self):
"""List of filters available for the dataset."""
if self._filters is None:
self._filters, self._attributes = self._fetch_configuration()
return self._filters
@property
def attributes(self):
"""List of attributes available for the dataset (cached)."""
if self._attributes is None:
self._filters, self._attributes = self._fetch_configuration()
return self._attributes
@property
def default_attributes(self):
"""List of default attributes for the dataset."""
if self._default_attributes is None:
self._default_attributes = {
name: attr
for name, attr in self.attributes.items()
if attr.default is True
}
return self._default_attributes
def list_attributes(self):
"""Lists available attributes in a readable DataFrame format.
Returns:
pd.DataFrame: Frame listing available attributes.
"""
def _row_gen(attributes):
for attr in attributes.values():
yield (attr.name, attr.display_name, attr.description)
return pd.DataFrame.from_records(
_row_gen(self.attributes),
columns=['name', 'display_name', 'description'])
def list_filters(self):
"""Lists available filters in a readable DataFrame format.
Returns:
pd.DataFrame: Frame listing available filters.
"""
def _row_gen(attributes):
for attr in attributes.values():
yield (attr.name, attr.type, attr.description)
return pd.DataFrame.from_records(
_row_gen(self.filters), columns=['name', 'type', 'description'])
def _fetch_configuration(self):
# Get datasets using biomart.
response = self.get(type='configuration', dataset=self._name)
# Check response for problems.
if 'Problem retrieving configuration' in response.text:
raise BiomartException('Failed to retrieve dataset configuration, '
'check the dataset name and schema.')
# Get filters and attributes from xml.
xml = ET.fromstring(response.content)
filters = {f.name: f for f in self._filters_from_xml(xml)}
attributes = {a.name: a for a in self._attributes_from_xml(xml)}
return filters, attributes
@staticmethod
def _filters_from_xml(xml):
for node in xml.iter('FilterDescription'):
attrib = node.attrib
# remove the id_list and boolean_list type
if attrib.get('type', 'None') in ['id_list', 'boolean_list']:
continue
if attrib.get('type') is None:
yield Filter(name=attrib['pointerFilter'], type='text')
continue
yield Filter(
name=attrib['internalName'], type=attrib['type'])
# tag:Option with type
# python2.7 iter() takes no keyword arguments
for node in xml.iter('Option'):
if node.attrib.get('type'):
yield Filter(name=node.attrib['internalName'], type=node.attrib['type'])
@staticmethod
def _attributes_from_xml(xml):
for page_index, page in enumerate(xml.iter('AttributePage')):
for desc in page.iter('AttributeDescription'):
attrib = desc.attrib
# Default attributes can only be from the first page.
default = (page_index == 0 and
attrib.get('default', '') == 'true')
yield Attribute(
name=attrib['internalName'],
display_name=attrib.get('displayName', ''),
description=attrib.get('description', ''),
default=default)
def query(self,
attributes=None,
filters=None,
only_unique=True,
use_attr_names=False,
dtypes = None
):
"""Queries the dataset to retrieve the contained data.
Args:
attributes (list[str]): Names of attributes to fetch in query.
Attribute names must correspond to valid attributes. See
the attributes property for a list of valid attributes.
filters (dict[str,any]): Dictionary of filters --> values
to filter the dataset by. Filter names and values must
correspond to valid filters and filter values. See the
filters property for a list of valid filters.
only_unique (bool): Whether to return only rows containing
unique values (True) or to include duplicate rows (False).
use_attr_names (bool): Whether to use the attribute names
as column names in the result (True) or the attribute
display names (False).
dtypes (dict[str,any]): Dictionary of attributes --> data types
to describe to pandas how the columns should be handled
Returns:
pandas.DataFrame: DataFrame containing the query results.
"""
# Example query from Ensembl biomart:
#
# <?xml version="1.0" encoding="UTF-8"?>
# <!DOCTYPE Query>
# <Query virtualSchemaName = "default" formatter = "TSV" header = "0"
# uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
# <Dataset name = "hsapiens_gene_ensembl" interface = "default" >
# <Filter name = "chromosome_name" value = "1,2"/>
# <Filter name = "end" value = "10000000"/>
# <Filter name = "start" value = "1"/>
# <Attribute name = "ensembl_gene_id" />
# <Attribute name = "ensembl_transcript_id" />
# </Dataset>
# </Query>
# Setup query element.
root = ET.Element('Query')
root.set('virtualSchemaName', self._virtual_schema)
root.set('formatter', 'TSV')
root.set('header', '1')
root.set('uniqueRows', native_str(int(only_unique)))
root.set('datasetConfigVersion', '0.6')
# Add dataset element.
dataset = ET.SubElement(root, 'Dataset')
dataset.set('name', self.name)
dataset.set('interface', 'default')
# Default to default attributes if none requested.
if attributes is None:
attributes = list(self.default_attributes.keys())
# Add attribute elements.
for name in attributes:
try:
attr = self.attributes[name]
self._add_attr_node(dataset, attr)
except KeyError:
raise BiomartException(
'Unknown attribute {}, check dataset attributes '
'for a list of valid attributes.'.format(name))
if filters is not None:
# Add filter elements.
for name, value in filters.items():
try:
filter_ = self.filters[name]
self._add_filter_node(dataset, filter_, value)
except KeyError:
raise BiomartException(
'Unknown filter {}, check dataset filters '
'for a list of valid filters.'.format(name))
# Fetch response.
response = self.get(query=ET.tostring(root))
# Raise exception if an error occurred.
if 'Query ERROR' in response.text:
raise BiomartException(response.text)
# Parse results into a DataFrame.
try:
result = pd.read_csv(StringIO(response.text), sep='\t', dtype=dtypes)
# Type error is raised of a data type is not understood by pandas
except TypeError as err:
raise ValueError("Non valid data type is used in dtypes")
if use_attr_names:
# Rename columns with attribute names instead of display names.
column_map = {
self.attributes[attr].display_name: attr
for attr in attributes
}
result.rename(columns=column_map, inplace=True)
return result
@staticmethod
def _add_attr_node(root, attr):
attr_el = ET.SubElement(root, 'Attribute')
attr_el.set('name', attr.name)
@staticmethod
def _add_filter_node(root, filter_, value):
"""Adds filter xml node to root."""
filter_el = ET.SubElement(root, 'Filter')
filter_el.set('name', filter_.name)
# Set filter value depending on type.
if filter_.type == 'boolean':
# Boolean case.
if value is True or value.lower() in {'included', 'only'}:
filter_el.set('excluded', '0')
elif value is False or value.lower() == 'excluded':
filter_el.set('excluded', '1')
else:
raise ValueError('Invalid value for boolean filter ({})'
.format(value))
elif isinstance(value, list) or isinstance(value, tuple):
# List case.
filter_el.set('value', ','.join(map(str, value)))
else:
# Default case.
filter_el.set('value', str(value))
def __repr__(self):
return ('<biomart.Dataset name={!r}, display_name={!r}>'
.format(self._name, self._display_name))
class Attribute(object):
"""Biomart dataset attribute.
Attributes:
name (str): Attribute name.
display_name (str): Attribute display name.
description (str): Attribute description.
"""
def __init__(self, name, display_name='', description='', default=False):
"""Attribute constructor.
Args:
name (str): Attribute name.
display_name (str): Attribute display name.
description (str): Attribute description.
default (bool): Whether the attribute is a default
attribute of the corresponding datasets.
"""
self._name = name
self._display_name = display_name
self._description = description
self._default = default
@property
def name(self):
"""Name of the attribute."""
return self._name
@property
def display_name(self):
"""Display name of the attribute."""
return self._display_name
@property
def description(self):
"""Description of the attribute."""
return self._description
@property
def default(self):
"""Whether this is a default attribute."""
return self._default
def __repr__(self):
return (('<biomart.Attribute name={!r},'
' display_name={!r}, description={!r}>')
.format(self._name, self._display_name, self._description))
class Filter(object):
"""Biomart dataset filter.
Attributes:
name (str): Filter name.
type (str): Type of the filter (boolean, int, etc.).
description (str): Filter description.
"""
def __init__(self, name, type, description=''):
""" Filter constructor.
Args:
name (str): Filter name.
type (str): Type of the filter (boolean, int, etc.).
description (str): Filter description.
"""
self._name = name
self._type = type
self._description = description
@property
def name(self):
"""Filter name."""
return self._name
@property
def type(self):
"""Filter type."""
return self._type
@property
def description(self):
"""Filter description."""
return self._description
def __repr__(self):
return ('<biomart.Filter name={!r}, type={!r}>'
.format(self.name, self.type))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
if __name__ == '__main__':
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from .ufxtract_hcalendar1 import hCalendar1
from .ufxtract_hcalendar3 import hCalendar3
if __name__ == '__main__':
unittest.main()
|
from django.db.models import Q
from .pagination import PostLimitOffsetPagination, PostPageNumberPagination
from rest_framework.authentication import TokenAuthentication, BasicAuthentication
from rest_framework.filters import (
SearchFilter,
OrderingFilter
)
from rest_framework.permissions import (
IsAuthenticatedOrReadOnly,
IsAuthenticated,
)
from rest_framework.generics import (ListAPIView,
CreateAPIView,
RetrieveAPIView,
DestroyAPIView,
UpdateAPIView,
RetrieveUpdateAPIView)
from django.contrib.auth import get_user_model
User = get_user_model()
from django.contrib.auth.models import Permission
from ...product.models import (
Product,
ProductVariant,
Stock,
)
from .serializers import (
CreateStockSerializer,
ProductStockListSerializer,
ProductListSerializer,
UserListSerializer,
UserCreateSerializer,
PermissionListSerializer,
)
from rest_framework import generics
class UserCreateAPIView(CreateAPIView):
serializer_class = UserCreateSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
queryset = User.objects.all()
class CreateStockAPIView(CreateAPIView):
serializer_class = CreateStockSerializer
#permission_classes = [IsAuthenticatedOrReadOnly]
queryset = Stock.objects.all()
# class UserCreateAPIView(CreateAPIView):
# queryset = User.objects.all()
# serializer_class = UserCreateSerializer
#
# def perform_create(self, serializer):
# #user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
# serializer.save(user=self.request.user)
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserListSerializer
class UserDeleteAPIView(DestroyAPIView):
queryset = User.objects.all()
serializer_class = UserListSerializer
class ProductListAPIView(generics.ListAPIView):
#permission_classes = [IsAuthenticatedOrReadOnly]
pagination_class = PostLimitOffsetPagination
serializer_class = ProductListSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Product.objects.all().select_related()
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(name__icontains=query)|
Q(variants__sku__icontains=query)|
Q(categories__name__icontains=query)
).distinct()
return queryset_list
class ProductStockListAPIView(generics.ListAPIView):
#permission_classes = [IsAuthenticatedOrReadOnly]
pagination_class = PostLimitOffsetPagination
serializer_class = ProductStockListSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = ProductVariant.objects.all().select_related()
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(sku__icontains=query) |
Q(product__name__icontains=query)
).distinct()
return queryset_list
class UserListAPIView(generics.ListAPIView):
#permission_classes = [IsAuthenticatedOrReadOnly]
queryset = User.objects.all()
serializer_class = UserListSerializer
# Permissions views
class PermissionListView(generics.ListAPIView):
serializer_class = PermissionListSerializer
queryset = Permission.objects.all()
class PermissionDetailAPIView(generics.RetrieveAPIView):
serializer_class = PermissionListSerializer
queryset = Permission.objects.all()
|
from collections import defaultdict
from src.abstract_classifier import AbstractClassifier
import lib.sequence_lib as seq_lib
class AlignmentAbutsLeft(AbstractClassifier):
"""
Does the alignment extend off the 3' end of a scaffold?
(regardless of transcript orientation)
aligned: # unaligned: - whatever: . edge: |
query |---#####....
target |#####....
Since sqlite3 lacks a BOOL type, reports 1 if TRUE and 0 if FALSE
"""
@staticmethod
def __type__():
return "INTEGER"
def run(self):
self.get_alignment_dict()
s_dict = defaultdict(int)
for a_id, aln in self.alignment_dict.iteritems():
if aln.strand == "+" and aln.tStart == 0 and aln.qStart != 0:
s_dict[a_id] = 1
elif aln.strand == "-" and aln.tEnd == aln.tSize and aln.qEnd != aln.qSize:
s_dict[a_id] = 1
self.upsert_dict_wrapper(s_dict)
|
# Copyright 2014 Rackspace Inc.
#
# Author: Tim Simmons <tim.simmons@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.register_group(cfg.OptGroup(
name='service:agent', title="Configuration for the Agent Service"
))
OPTS = [
cfg.IntOpt('workers', default=None,
help='Number of agent worker processes to spawn'),
cfg.IntOpt('threads', default=1000,
help='Number of agent greenthreads to spawn'),
cfg.StrOpt('host', default='0.0.0.0',
help='The Agent Bind Host'),
cfg.IntOpt('port', default=5358,
help='mDNS Port Number'),
cfg.IntOpt('tcp-backlog', default=100,
help='The Agent TCP Backlog'),
cfg.FloatOpt('tcp-recv-timeout', default=0.5,
help='Agent TCP Receive Timeout'),
cfg.ListOpt('allow-notify', default=[],
help='List of IP addresses allowed to NOTIFY The Agent'),
cfg.ListOpt('masters', default=[],
help='List of masters for the Agent, format ip:port'),
cfg.StrOpt('backend-driver', default='bind9',
help='The backend driver to use'),
cfg.StrOpt('transfer-source', default=None,
help='An IP address to be used to fetch zones transferred in'),
]
cfg.CONF.register_opts(OPTS, group='service:agent')
|
"""
Basic data structure used for general trading function in VN Trader.
"""
from dataclasses import dataclass
from datetime import datetime
from logging import INFO
from .constant import Direction, Exchange, Interval, Offset, Status, Product, OptionType, OrderType
ACTIVE_STATUSES = set([Status.SUBMITTING, Status.NOTTRADED, Status.PARTTRADED])
@dataclass
class BaseData:
"""
Any data object needs a gateway_name as source
and should inherit base data.
"""
gateway_name: str
@dataclass
class TickData(BaseData):
"""
Tick data contains information about:
* last trade in market
* orderbook snapshot
* intraday market statistics.
"""
symbol: str
exchange: Exchange
datetime: datetime
name: str = ""
volume: float = 0
turnover: float = 0
open_interest: float = 0
last_price: float = 0
last_volume: float = 0
limit_up: float = 0
limit_down: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
pre_close: float = 0
bid_price_1: float = 0
bid_price_2: float = 0
bid_price_3: float = 0
bid_price_4: float = 0
bid_price_5: float = 0
ask_price_1: float = 0
ask_price_2: float = 0
ask_price_3: float = 0
ask_price_4: float = 0
ask_price_5: float = 0
bid_volume_1: float = 0
bid_volume_2: float = 0
bid_volume_3: float = 0
bid_volume_4: float = 0
bid_volume_5: float = 0
ask_volume_1: float = 0
ask_volume_2: float = 0
ask_volume_3: float = 0
ask_volume_4: float = 0
ask_volume_5: float = 0
localtime: datetime = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class BarData(BaseData):
"""
Candlestick bar data of a certain trading period.
"""
symbol: str
exchange: Exchange
datetime: datetime
interval: Interval = None
volume: float = 0
turnover: float = 0
open_interest: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
close_price: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderData(BaseData):
"""
Order data contains information for tracking lastest status
of a specific order.
"""
symbol: str
exchange: Exchange
orderid: str
type: OrderType = OrderType.LIMIT
direction: Direction = None
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
traded: float = 0
status: Status = Status.SUBMITTING
datetime: datetime = None
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
def is_active(self) -> bool:
"""
Check if the order is active.
"""
return self.status in ACTIVE_STATUSES
def create_cancel_request(self) -> "CancelRequest":
"""
Create cancel request object from order.
"""
req = CancelRequest(
orderid=self.orderid, symbol=self.symbol, exchange=self.exchange
)
return req
@dataclass
class TradeData(BaseData):
"""
Trade data contains information of a fill of an order. One order
can have several trade fills.
"""
symbol: str
exchange: Exchange
orderid: str
tradeid: str
direction: Direction = None
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
datetime: datetime = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
self.vt_tradeid = f"{self.gateway_name}.{self.tradeid}"
@dataclass
class PositionData(BaseData):
"""
Positon data is used for tracking each individual position holding.
"""
symbol: str
exchange: Exchange
direction: Direction
volume: float = 0
frozen: float = 0
price: float = 0
pnl: float = 0
yd_volume: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_positionid = f"{self.vt_symbol}.{self.direction.value}"
@dataclass
class AccountData(BaseData):
"""
Account data contains information about balance, frozen and
available.
"""
accountid: str
balance: float = 0
frozen: float = 0
def __post_init__(self):
""""""
self.available = self.balance - self.frozen
self.vt_accountid = f"{self.gateway_name}.{self.accountid}"
@dataclass
class LogData(BaseData):
"""
Log data is used for recording log messages on GUI or in log files.
"""
msg: str
level: int = INFO
def __post_init__(self):
""""""
self.time = datetime.now()
@dataclass
class ContractData(BaseData):
"""
Contract data contains basic information about each contract traded.
"""
symbol: str
exchange: Exchange
name: str
product: Product
size: float
pricetick: float
min_volume: float = 1 # minimum trading volume of the contract
stop_supported: bool = False # whether server supports stop order
net_position: bool = False # whether gateway uses net position volume
history_data: bool = False # whether gateway provides bar history data
option_strike: float = 0
option_underlying: str = "" # vt_symbol of underlying contract
option_type: OptionType = None
option_expiry: datetime = None
option_portfolio: str = ""
option_index: str = "" # for identifying options with same strike price
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class QuoteData(BaseData):
"""
Quote data contains information for tracking lastest status
of a specific quote.
"""
symbol: str
exchange: Exchange
quoteid: str
bid_price: float = 0.0
bid_volume: int = 0
ask_price: float = 0.0
ask_volume: int = 0
bid_offset: Offset = Offset.NONE
ask_offset: Offset = Offset.NONE
status: Status = Status.SUBMITTING
datetime: datetime = None
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_quoteid = f"{self.gateway_name}.{self.quoteid}"
def is_active(self) -> bool:
"""
Check if the quote is active.
"""
return self.status in ACTIVE_STATUSES
def create_cancel_request(self) -> "CancelRequest":
"""
Create cancel request object from quote.
"""
req = CancelRequest(
orderid=self.quoteid, symbol=self.symbol, exchange=self.exchange
)
return req
@dataclass
class SubscribeRequest:
"""
Request sending to specific gateway for subscribing tick data update.
"""
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderRequest:
"""
Request sending to specific gateway for creating a new order.
"""
symbol: str
exchange: Exchange
direction: Direction
type: OrderType
volume: float
price: float = 0
offset: Offset = Offset.NONE
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
def create_order_data(self, orderid: str, gateway_name: str) -> OrderData:
"""
Create order data from request.
"""
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=orderid,
type=self.type,
direction=self.direction,
offset=self.offset,
price=self.price,
volume=self.volume,
reference=self.reference,
gateway_name=gateway_name,
)
return order
@dataclass
class CancelRequest:
"""
Request sending to specific gateway for canceling an existing order.
"""
orderid: str
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class HistoryRequest:
"""
Request sending to specific gateway for querying history data.
"""
symbol: str
exchange: Exchange
start: datetime
end: datetime = None
interval: Interval = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class QuoteRequest:
"""
Request sending to specific gateway for creating a new quote.
"""
symbol: str
exchange: Exchange
bid_price: float
bid_volume: int
ask_price: float
ask_volume: int
bid_offset: Offset = Offset.NONE
ask_offset: Offset = Offset.NONE
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
def create_quote_data(self, quoteid: str, gateway_name: str) -> QuoteData:
"""
Create quote data from request.
"""
quote = QuoteData(
symbol=self.symbol,
exchange=self.exchange,
quoteid=quoteid,
bid_price=self.bid_price,
bid_volume=self.bid_volume,
ask_price=self.ask_price,
ask_volume=self.ask_volume,
bid_offset=self.bid_offset,
ask_offset=self.ask_offset,
reference=self.reference,
gateway_name=gateway_name,
)
return quote
|
from askapdev.rbuild.setup import setup
from askapdev.rbuild.dependencies import Dependency
from setuptools import find_packages
dep = Dependency()
dep.add_package()
ROOTPKG = 'askap'
COMPONENT = 'analysis'
PKGNAME = 'data'
setup(name = '%s.%s.%s' % (ROOTPKG, COMPONENT, PKGNAME),
version = 'current',
description = 'Scripts to create simulated data catalogues and images',
author = 'MatthewWhiting',
author_email = 'Matthew.Whiting@csiro.au',
url = 'http://svn.atnf.csiro.au/askap',
keywords = ['ASKAP', ],
long_description = '''
This package contains some simulated data catalogues,
as well as scripts to make more from the SKADS simulations.
There are also scripts to make subsets of the catalogues and
annotation file for use with the karma package.
''',
packages = find_packages(),
namespace_packages = [ROOTPKG, '%s.%s' % (ROOTPKG, COMPONENT)],
license = 'GPL',
zip_safe = 0,
dependency = dep,
# Uncomment if using scripts (applications which go in bin)
scripts = ["scripts/createSKADS.py",
"scripts/createSKADSspectralline.py",
"scripts/createSubLists.py",
"scripts/setupAllModels.py",
"scripts/convolveModel.py",
"scripts/createComparisonCatalogue.py",
"scripts/build_S3SAX_mySQLdb.py",
"scripts/makeScintillationLightcurves.py",
"scripts/scintillationLightcurveTest.py"],
# Uncomment if using unit tests
# test_suite = "nose.collector",
)
|
from .sgns import SGNS # noqa
|
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Job filter rule commands"""
# pylint: disable=W0401,W0614
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
from ganeti.cli import *
from ganeti import constants
from ganeti import utils
#: default list of fields for L{ListFilters}
_LIST_DEF_FIELDS = ["uuid", "watermark", "priority",
"predicates", "action", "reason_trail"]
def AddFilter(opts, args):
"""Add a job filter rule.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
assert args == []
reason = []
if opts.reason:
reason = [(constants.OPCODE_REASON_SRC_USER,
opts.reason,
utils.EpochNano())]
cl = GetClient()
result = cl.ReplaceFilter(None, opts.priority, opts.predicates, opts.action,
reason)
print(result) # Prints the UUID of the replaced/created filter
def ListFilters(opts, args):
"""List job filter rules and their properties.
@param opts: the command line options selected by the user
@type args: list
@param args: filters to list, or empty for all
@rtype: int
@return: the desired exit code
"""
desired_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
cl = GetClient()
return GenericList(constants.QR_FILTER, desired_fields, args, None,
opts.separator, not opts.no_headers,
verbose=opts.verbose, cl=cl, namefield="uuid")
def ListFilterFields(opts, args):
"""List filter rule fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
return GenericListFields(constants.QR_FILTER, args, opts.separator,
not opts.no_headers, cl=cl)
def ReplaceFilter(opts, args):
"""Replaces a job filter rule with the given UUID, or creates it, if it
doesn't exist already.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the UUID of the filter
@rtype: int
@return: the desired exit code
"""
(uuid,) = args
reason = []
if opts.reason:
reason = [(constants.OPCODE_REASON_SRC_USER,
opts.reason,
utils.EpochNano())]
cl = GetClient()
result = cl.ReplaceFilter(uuid,
priority=opts.priority,
predicates=opts.predicates,
action=opts.action,
reason=reason)
print(result) # Prints the UUID of the replaced/created filter
return 0
def ShowFilter(_, args):
"""Show filter rule details.
@type args: list
@param args: should either be an empty list, in which case
we show information about all filters, or should contain
a list of filter UUIDs to be queried for information
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
result = cl.QueryFilters(fields=["uuid", "watermark", "priority",
"predicates", "action", "reason_trail"],
uuids=args)
for (uuid, watermark, priority, predicates, action, reason_trail) in result:
ToStdout("UUID: %s", uuid)
ToStdout(" Watermark: %s", watermark)
ToStdout(" Priority: %s", priority)
ToStdout(" Predicates: %s", predicates)
ToStdout(" Action: %s", action)
ToStdout(" Reason trail: %s", reason_trail)
return 0
def DeleteFilter(_, args):
"""Remove a job filter rule.
@type args: list
@param args: a list of length 1 with the UUID of the filter to remove
@rtype: int
@return: the desired exit code
"""
(uuid,) = args
cl = GetClient()
result = cl.DeleteFilter(uuid)
assert result is None
return 0
FILTER_PRIORITY_OPT = \
cli_option("--priority",
dest="priority", action="store", default=0, type="int",
help="Priority for filter processing")
FILTER_PREDICATES_OPT = \
cli_option("--predicates",
dest="predicates", action="store", default=[], type="json",
help="List of predicates in the Ganeti query language,"
" given as a JSON list.")
FILTER_ACTION_OPT = \
cli_option("--action",
dest="action", action="store", default="CONTINUE",
type="filteraction",
help="The effect of the filter. Can be one of 'ACCEPT',"
" 'PAUSE', 'REJECT', 'CONTINUE' and '[RATE_LIMIT, n]',"
" where n is a positive integer.")
commands = {
"add": (
AddFilter, ARGS_NONE,
[FILTER_PRIORITY_OPT, FILTER_PREDICATES_OPT, FILTER_ACTION_OPT],
"",
"Adds a new filter rule"),
"list": (
ListFilters, ARGS_MANY_FILTERS,
[NOHDR_OPT, SEP_OPT, FIELDS_OPT, VERBOSE_OPT],
"[<filter-uuid>...]",
"Lists the job filter rules. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)),
"list-fields": (
ListFilterFields, [ArgUnknown()],
[NOHDR_OPT, SEP_OPT],
"[<fields>...]",
"Lists all available fields for filters"),
"info": (
ShowFilter, ARGS_MANY_FILTERS,
[],
"[<filter-uuid>...]",
"Shows information about the filter(s)"),
"replace": (
ReplaceFilter, ARGS_ONE_FILTER,
[FILTER_PRIORITY_OPT, FILTER_PREDICATES_OPT, FILTER_ACTION_OPT],
"<filter-uuid>",
"Replaces a filter"),
"delete": (
DeleteFilter, ARGS_ONE_FILTER,
[],
"<filter-uuid>",
"Removes a filter"),
}
def Main():
return GenericMain(commands)
|
import numpy as np
import random
from collections import defaultdict
from numpy import *
from random import random
counts = defaultdict(int)
height =32
width = 32
M=2
std = 0.05
for i in range(0,1000):
print(i)
zp=[complex(height*random(),width*random()), complex(height*random(),width*random()),complex(height*random(),width*random())]
wa=[complex(height*random(),width*random()), complex(height*random(),width*random()),complex(height*random(),width*random())]
# transformation parameters
a = linalg.det([[zp[0]*wa[0], wa[0], 1],
[zp[1]*wa[1], wa[1], 1],
[zp[2]*wa[2], wa[2], 1]]);
b = linalg.det([[zp[0]*wa[0], zp[0], wa[0]],
[zp[1]*wa[1], zp[1], wa[1]],
[zp[2]*wa[2], zp[2], wa[2]]]);
c = linalg.det([[zp[0], wa[0], 1],
[zp[1], wa[1], 1],
[zp[2], wa[2], 1]]);
d = linalg.det([[zp[0]*wa[0], zp[0], 1],
[zp[1]*wa[1], zp[1], 1],
[zp[2]*wa[2], zp[2], 1]]);
# cond1
v1 = np.absolute(a) ** 2 / np.absolute(a*d - b*c)
if not (v1 < M and v1 > 1/M):
counts['failed'] += 1
continue
v2 = np.absolute(a-32*c) ** 2 / (np.absolute(a*d -b*c))
if not (v1 < M and v1 > 1/M):
counts['failed'] += 1
continue
v3 = np.absolute(complex(a,-32*c)) ** 2 / np.absolute(a*d-b*c)
if not (v1 < M and v1 > 1/M):
counts['failed'] += 1
continue
v4 = np.absolute(complex(a-32*c,-32*c)) ** 2 / np.absolute(a*d-b*c)
if not (v1 < M and v1 > 1/M):
counts['failed'] += 1
continue
v5 = np.absolute(complex(a-16*c,-16*c)) ** 2 / (np.absolute(a*d-b*c))
if not (v1 < M and v1 > 1/M):
counts['failed'] += 1
continue
v6 = real(complex(16-b,16*d)/complex(a-16*c,-16*c))
if not( v6 > 0 and v6 < 32):
counts['failed'] += 1
continue
v7 = imag(complex(16-b,16*d)/complex(a-16*c,-16*c))
if not( v6 > 0 and v6 < 32):
counts['failed'] += 1
continue
counts['passed'] += 1
print(counts)
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example JSON segments and filings that are known to work.
These can be used in other tests as basis for the JSON filings.
"""
from .schema_data import ANNUAL_REPORT, BUSINESS, CHANGE_OF_ADDRESS, CHANGE_OF_DIRECTORS, FILING_HEADER
|
"""TEMP."""
from pyisc.shared.nodes import RootNode, Node, PropertyNode
expected_bind = RootNode('Root')
child1 = Node(
type='options',
value=None,
parameters=None,
children=[
PropertyNode(
type='directory',
value='"/var/lib/named"',
parameters=None),
PropertyNode(
type='dump-file',
value='"/var/log/named_dump.db"',
parameters=None),
PropertyNode(
type='statistics-file',
value='"/var/log/named.stats"',
parameters=None),
Node(
type='forwarders',
value=None,
parameters=None,
children=[
PropertyNode(
type='62.31.176.39',
value=None,
parameters=None),
PropertyNode(
type='193.38.113.3',
value=None,
parameters=None)
]
),
Node(
type='listen-on-v6',
value=None,
parameters=None,
children=[
PropertyNode(
type='any',
value=None,
parameters=None)
]
),
PropertyNode(
type='notify',
value='no',
parameters=None)
])
child2 = Node(
type='zone',
value='"."',
parameters='in',
children=[
PropertyNode(
type='type',
value='hint',
parameters=None),
PropertyNode(
type='file',
value='"root.hint"',
parameters=None)
]
)
child3 = Node(
type='zone',
value='"localhost"',
parameters='in',
children=[
PropertyNode(
type='type',
value='master',
parameters=None),
PropertyNode(
type='file',
value='"localhost.zone"',
parameters=None)
]
)
child4 = Node(
type='zone',
value='"0.0.127.in-addr.arpa"',
parameters='in',
children=[
PropertyNode(
type='type',
value='master',
parameters=None),
PropertyNode('file', '"127.0.0.zone"', None)
]
)
child5 = Node(
type='zone',
value='"spring.wellho.net"',
parameters='in',
children=[
PropertyNode(
type='type',
value='master',
parameters=None),
PropertyNode(
type='file',
value='"/var/lib/named/wellho.zone"',
parameters=None)
]
)
child6 = PropertyNode(
type='include',
value='"/etc/named.conf.include"',
parameters=None
)
expected_bind.extend(
[
child1,
child2,
child3,
child4,
child5,
child6
]
)
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
from copy import deepcopy
from tqdm import tqdm
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
import datetime
import sys
import math
import util
import config
import indicators
weight_decay_beta = float('10e-9')
run_set = ['goldman', 'index', '^BVSP', '^TWII', '^IXIC', 'index_sampled']
stocks = ['^BVSP', '^TWII', '^IXIC']
# stocks = ['GGSIX', 'GOIIX', 'GIPIX']
path = "data/rl/rl_model_1/lagged"
choose_set_num = 1
price_period = 20
risk_level = 1
load_model = False
save_rl_data = True
save_passive = True
save_algo_data = True
df_list, date_range, trend_list, _ = util.get_algo_dataset(choose_set_num)
max_ep_length = len(trend_list)
action_taken_list = []
batch_size = 32
update_freq = 10
gamma = .99
start_e = 1
end_e = 0.1
annealing_steps = 5000
num_episodes = 350
pre_train_steps = 84400 #160000
max_epLength = len(trend_list) - 1
h_size = 100
tau = 0.0005
num_actions = 4
state_dimension = 5
df_list, date_range, trend_list, _ = util.get_algo_dataset(choose_set_num)
max_ep_length = len(trend_list)
action_taken_list = []
#Set the rate of random action decrease.
e_rate = start_e
step_drop = (start_e - end_e)/annealing_steps
# #BVSP
# stock_pred_model = tf.keras.models.load_model(f'data/rl/{run_set[choose_set_num]}/stock_pred.hdf5')
# scaler = MinMaxScaler(feature_range=(0,1))
class Qnetwork():
def __init__(self, H):
sum_regularization=0
self.x = tf.compat.v1.placeholder(tf.float32, [1, state_dimension])
self.W0 = tf.Variable(tf.random.uniform([state_dimension, H], 0, 1))
self.b0 = tf.Variable(tf.constant(0.1, shape=[H]))
self.y_hidden = tf.nn.relu(tf.matmul(self.x, self.W0) + self.b0)
sum_regularization += weight_decay_beta * tf.nn.l2_loss(self.W0)
self.W1 = tf.Variable(tf.random.uniform([H, num_actions], 0, 1))
self.b1 = tf.Variable(tf.constant(0.1, shape=[num_actions]))
sum_regularization += weight_decay_beta * tf.nn.l2_loss(self.W1)
# q out
self.q_values = tf.matmul(self.y_hidden, self.W1) + self.b1
# predict
self.best_action = tf.argmax(self.q_values, 1)
# next q
self.target = tf.compat.v1.placeholder(tf.float32, [1 ,num_actions])
self.loss = tf.reduce_sum(tf.square(self.target - self.q_values) + sum_regularization)
# self.update = tf.compat.v1.train.AdamOptimizer(learning_rate=0.00005).minimize(self.loss)
self.update = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
# TODO: try higher lr or dropout
# self.update = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.00005).minimize(self.loss)
def norm_state(state):
temp = deepcopy(state)
return np.reshape(np.hstack(temp), (1, state_dimension))
def process_action(action, portfolio_composition):
new_portfolio_composition = deepcopy(portfolio_composition)
####################### Full switch ##############################
# high risk up, med risk up
if action == 0:
for _ in range(3):
new_portfolio_composition[0] = 0.8
new_portfolio_composition[1] = 0.1
new_portfolio_composition[2] = 0.1
elif action ==1:
for _ in range(3):
new_portfolio_composition[0] = 0.1
new_portfolio_composition[1] = 0.8
new_portfolio_composition[2] = 0.1
elif action == 2:
for _ in range(3):
new_portfolio_composition[0] = 0.45
new_portfolio_composition[1] = 0.45
new_portfolio_composition[2] = 0.1
elif action == 3:
for _ in range(3):
new_portfolio_composition[0] = 0.1
new_portfolio_composition[1] = 0.1
new_portfolio_composition[2] = 0.8
###############################################################
####################### Gradual ##############################
# # high risk up, med risk up
# if action == 0:
# for _ in range(3):
# # low risk base rate enough, L -> H
# if new_portfolio_composition[2] - 0.1 >= 0.1:
# new_portfolio_composition[0] += 0.1
# new_portfolio_composition[2] -= 0.1
# # med risk base rate enough, M -> H
# if new_portfolio_composition[1] - 0.1 >= 0.1:
# new_portfolio_composition[0] += 0.1
# new_portfolio_composition[1] -= 0.1
# elif action ==1:
# for _ in range(3):
# # high risk base rate enough, H -> M
# if new_portfolio_composition[0] - 0.1 >= 0.1:
# new_portfolio_composition[1] += 0.1
# new_portfolio_composition[0] -= 0.1
# # low risk base rate enough, L -> M
# if new_portfolio_composition[2] - 0.1 >= 0.1:
# new_portfolio_composition[1] += 0.1
# new_portfolio_composition[2] -= 0.1
# elif action == 2:
# for _ in range(3):
# # low risk base rate enough, L -> H
# if new_portfolio_composition[0] - 0.1 >= 0.1:
# new_portfolio_composition[2] += 0.1
# new_portfolio_composition[0] -= 0.1
# # low risk base rate enough, L -> M
# if new_portfolio_composition[1] - 0.1 >= 0.1:
# new_portfolio_composition[2] += 0.1
# new_portfolio_composition[1] -= 0.1
# elif action == 3:
# for _ in range(3):
# # high risk base rate enough, H -> L
# if new_portfolio_composition[0] - 0.1 >= 0.1:
# new_portfolio_composition[2] += 0.1
# new_portfolio_composition[0] -= 0.1
# # med risk base rate enough, M -> L
# if new_portfolio_composition[1] - 0.1 >= 0.1:
# new_portfolio_composition[2] += 0.1
# new_portfolio_composition[1] -= 0.1
################################################
return new_portfolio_composition
def get_next_state(current_index, trend_list, date_range, df_list):
date = trend_list[current_index+1]
date_idx = [i for i, cur_date in enumerate(date_range) if date == cur_date][0]
state_ = ()
for i in range(2):
# Check price data for state. Get the price_period num of days price before period
price_list = []
if date_idx - price_period >= 0:
price_dates = date_range[date_idx-price_period:date_idx]
else:
price_dates = date_range[0:date_idx]
for _ in range(price_period - date_idx):
price_list.append(0)
for date in price_dates:
price_list.append(df_list[i][df_list[i]['Date'] == date]['Close'].values[0])
df = pd.DataFrame({'Close': price_list})
df['EMA'] = indicators.exponential_moving_avg(df, window_size=6, center=False)
df['MACD_Line'] = indicators.macd_line(df, ema1_window_size=3, ema2_window_size=6, center=False)
df['MACD_Signal'] = indicators.macd_signal(df, window_size=6, ema1_window_size=3, ema2_window_size=6, center=False)
# df['Rsi'] = indicators.rsi(df, window_size=10, center=False)
# Predict using lstm model and get centered indicators
# if i == 0:
# df = get_predicted_indicator_df(df, price_list, scaler, high_risk_pred_model)
# elif i == 1:
# df = get_predicted_indicator_df(df, price_list, scaler, med_risk_pred_model)
ema_price = util.z_score_normalization(df.iloc[-1]['EMA'], df['EMA'].tolist())
macd_line = df['MACD_Line']
macd_signal = df['MACD_Signal']
macd = [macd_line.iloc[i] - macd_signal.iloc[i] for i in range(len(macd_line))]
macd = util.scale(macd[-1], macd)
if (math.isnan(ema_price) or math.isnan(macd)):
print(f'nan encountered: ema = {ema_price}, macd = {macd}')
state_ += (ema_price, macd)
if current_index == -1:
last_date_delta = 0
else:
last_date_delta = (trend_list[current_index+1] - trend_list[current_index]).days
state_ += (last_date_delta,)
return state_
def get_predicted_indicator_df(df, price_list, scaler, model):
scaled_df = scaler.fit_transform(df)
temp_data1 = []
# predict 3 days after. There must be a better way!
for j in range(3):
temp_data2 =[]
# lookback period of 7
for k in range(7):
temp_data3 = scaled_df[-j-k-1].tolist()[1:]
temp_data2.append(temp_data3)
temp_data1.append(temp_data2)
temp_data1 = np.array(temp_data1)
prediction = model.predict(temp_data1)
temp_data = np.concatenate((prediction, temp_data1[:,0,:]), axis=1)
pred_close = scaler.inverse_transform(temp_data)[:, 0]
# print(pred_close, price_list[-1])
for close in pred_close:
price_list.append(close)
df = pd.DataFrame({'Close': price_list})
df['EMA'] = indicators.exponential_moving_avg(df, window_size=6, center=True)
df['MACD_Line'] = indicators.macd_line(df, ema1_window_size=3, ema2_window_size=6, center=True)
df['MACD_Signal'] = indicators.macd_signal(df, window_size=6, ema1_window_size=3, ema2_window_size=6, center=True)
# print(df.iloc[3:-3])
return df.iloc[3:-3]
def get_reward(asset_list, action, current_index, trend_list, date_range, portfolio_composition, df_list):
new_asset_list = deepcopy(asset_list)
reward_period = 10
commisson_rate= 1.0/800
# get reward from reward_period number of days
date = trend_list[current_index]
date_idx = [i for i, cur_date in enumerate(date_range) if date == cur_date][0]
# Check price data for state. Get the price_period num of days price before period
if date_idx + reward_period < len(date_range):
reward_date = date_range[date_idx+reward_period]
else:
# Not checked in gradual approach. Fix if time allows
reward_date = date_range[-1]
passive_asset_sum, _ = get_reward_asset_sum(new_asset_list, portfolio_composition, date, reward_date, commisson_rate)
# ####### Gradual ##################
# changed_asset_sum = 0
# changed_composition_rates = portfolio_composition
# changed_asset_list = deepcopy(asset_list)
# for i, cur_date in enumerate(date_range[date_idx:date_idx+reward_period]):
# if cur_date != trend_list[current_index + 1] and i < 3:
# # Composition rate 1 day after
# changed_composition_rates = process_action(action, portfolio_composition)
# changed_asset_sum, changed_asset_list = get_reward_asset_sum(changed_asset_list, changed_composition_rates, cur_date, date_range[date_idx+i+1], commisson_rate)
# # print('From: ',cur_date,' to ',date_range[date_idx+i+1])
# else:
# date = cur_date
# break
# if changed_asset_sum - passive_asset_sum == 0 or passive_asset_sum == 0:
# new_asset_list, nav_reward = calc_actions_nav(asset_list, portfolio_composition, trend_list, current_index, date_range)
# return 0 , changed_composition_rates, new_asset_list
# new_asset_list, nav_reward = changed_asset_list, changed_asset_sum
##########################
# #### Full swing #############
changed_composition_rates = process_action(action, portfolio_composition)
changed_asset_sum, _ = get_reward_asset_sum(new_asset_list, changed_composition_rates, date, reward_date, commisson_rate)
if changed_asset_sum - passive_asset_sum == 0 or passive_asset_sum == 0:
new_asset_list, nav_reward = calc_actions_nav(asset_list, portfolio_composition, trend_list, current_index, date_range)
return 0 , changed_composition_rates, new_asset_list
new_asset_list, nav_reward = calc_actions_nav(asset_list, portfolio_composition, trend_list, current_index, date_range)
# ########################
trend_list_len = len(trend_list)
# scale to 0.5-1 depending on trend position
time_scaling_factor = 0.5 * (trend_list_len - current_index) / trend_list_len + 0.5
# print('Asset difference: ', changed_asset_sum - passive_asset_sum)
# print('old portfolio: ', portfolio_composition, 'New: ', changed_composition_rates)
# print('Date: ', date, 'Change:', (changed_asset_sum - passive_asset_sum) / passive_asset_sum * 100)
# print('Changed sum: ', changed_asset_sum, 'Passive sum: ', passive_asset_sum)
reward = (changed_asset_sum - passive_asset_sum) / passive_asset_sum * time_scaling_factor
# print('Reward: ', reward)
# print('nav Reward: ', nav_reward/10000000)
return reward + nav_reward/10000000, changed_composition_rates, new_asset_list
def get_reward_asset_sum(asset_list, changed_composition_rates, current_date, reward_date, commisson_rate):
temp_asset_list = deepcopy(asset_list)
# print(temp_asset_list)
for i in range(3):
# Update asset values
previous_close_price = df_list[i][df_list[i]['Date'] == current_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == reward_date]['Close'].values[0]
# print('Prev_close',previous_close_price,'Current_close',current_close_price)
temp_asset_list[i] = temp_asset_list[i] * current_close_price / previous_close_price
# print(temp_asset_list[i])
total_assets = sum(temp_asset_list)
# print('total assets',total_assets)
for i in range(3):
amount_change = changed_composition_rates[i] * total_assets - asset_list[i]
if amount_change <= 0:
temp_asset_list[i] = temp_asset_list[i] + amount_change
else:
temp_asset_list[i] = temp_asset_list[i] + amount_change * (1 - commisson_rate)**2
return sum(temp_asset_list), temp_asset_list
def get_action_counts(action_taken_list: list) -> list:
counts = [0] * num_actions
for action in action_taken_list:
counts[action] += 1
return counts
def calc_actions_nav(asset_list, portfolio_composition, trend_list, index, date_range, final_nav=False, commisson_rate= 1.0/800):
new_asset_list = deepcopy(asset_list)
if final_nav:
# Update for end of date
for j in range(3):
# Update asset values
previous_close_price = df_list[j][df_list[j]['Date'] == trend_list[-1]]['Close'].values[0]
current_close_price = df_list[j][df_list[j]['Date'] == date_range[-1]]['Close'].values[0]
new_asset_list[j] = new_asset_list[j] * current_close_price / previous_close_price
return new_asset_list, sum(new_asset_list)
# start of training at trend_list idx 10
if index == 10 -1:
prev_date = date_range[0]
else:
prev_date = trend_list[index-1]
date = trend_list[index]
# Update asset values by passive market movement
for j in range(3):
previous_close_price = df_list[j][df_list[j]['Date'] == prev_date]['Close'].values[0]
current_close_price = df_list[j][df_list[j]['Date'] == date]['Close'].values[0]
new_asset_list[j] = new_asset_list[j] * current_close_price / previous_close_price
total_assets = sum(new_asset_list)
# Update asset values by portfolio adjustment
for j in range(3):
amount_change = portfolio_composition[j] * total_assets - new_asset_list[j]
# Reduce composition
if amount_change <= 0:
new_asset_list[j] = new_asset_list[j] + amount_change
# Increase composition. Incur buy and sell commission
else:
new_asset_list[j] = new_asset_list[j] + amount_change * (1 - commisson_rate)**2
return new_asset_list, sum(new_asset_list)
def get_action(q_values: list) -> int:
return np.argmax(q_values)
main_QN = Qnetwork(h_size)
saver = tf.compat.v1.train.Saver()
#Make a path for our model to be saved in.
Path(path).mkdir(parents=True, exist_ok=True)
with tf.compat.v1.Session() as sess:
if load_model:
print('Loading model')
ckpt = tf.train.get_checkpoint_state(path)
print(ckpt.model_checkpoint_path)
saver.restore(sess,ckpt.model_checkpoint_path)
# #BVSP
# high_risk_pred_model = tf.keras.models.load_model(f'data/rl/{run_set[choose_set_num]}/stock_pred_^BVSP.hdf5')
# med_risk_pred_model = tf.keras.models.load_model(f'data/rl/{run_set[choose_set_num]}/stock_pred_^TWII.hdf5')
# scaler = MinMaxScaler(feature_range=(0,1))
ep_reward = 0
start = 10
end = len(trend_list) -1
state = get_next_state(start - 1, trend_list, date_range, df_list)
base_rate_list = []
portfolio_composition = [0.1+0.3, 0.1+0.2, 0.1+0.2]
portfolio_composition_list = []
action_taken_list=[]
reward_list=[]
asset_list = [100000, 100000, 100000]
# prev_action = ACTION_CLOSE
for i in range(start, end):
# print(norm_state(s))
qv = sess.run(main_QN.q_values, feed_dict={main_QN.x: norm_state(state)})
# Remove the extra [] for action
action = get_action(qv)
# action = action[0]
action_taken_list.append(action)
state_ = get_next_state(i, trend_list, date_range, df_list)
step_reward, portfolio_composition, asset_list = get_reward(asset_list, action, i, trend_list, date_range, portfolio_composition, df_list)
reward_list.append(step_reward)
portfolio_composition_list.append(portfolio_composition)
state = state_
nav, asset_list = calc_actions_nav(asset_list, portfolio_composition, trend_list, i, date_range, final_nav=True)
print(get_action_counts(action_taken_list))
print(nav)
action_df = pd.DataFrame({'Date': trend_list[start:-1], 'Reward': reward_list, 'Action': action_taken_list, 'Portfolio_composition': portfolio_composition_list})
action_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/actions_taken.csv')
nav_daily_dates_list = []
nav_daily_composition_list = [[], [], []]
nav_daily_net_list = []
daily_price_list = []
commisson_rate = 1.0/800
asset_list = [100000, 100000, 100000]
changed = []
for date in date_range:
changed.append(date in trend_list[:-1])
nav_daily_adjust_list = [change for change in changed]
j = 0
last_trade_date = date_range[0]
for date in date_range:
# Generate daily NAV value for visualisation
current_nav_list = []
if date in trend_list[start:-1]:
# Update asset composition
for i in range(3):
previous_close_price = df_list[i][df_list[i]['Date'] == last_trade_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
asset_list[i] = asset_list[i] * current_close_price / previous_close_price
total_assets = sum(asset_list)
# Rebalance portfolio
for i in range(3):
amount_change = portfolio_composition_list[j][i] * total_assets - asset_list[i]
# Reduce composition
if amount_change <= 0:
asset_list[i] = asset_list[i] + amount_change
# Increase composition. Incur buy and sell commission
else:
asset_list[i] = asset_list[i] + amount_change * (1 - commisson_rate)**2
current_nav_list.append(asset_list[i])
last_trade_date = date
j+=1
else:
for i in range(3):
previous_close_price = df_list[i][df_list[i]['Date'] == last_trade_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
current_nav_list.append(asset_list[i] * current_close_price / previous_close_price)
nav_daily_dates_list.append(date)
for i in range(3):
nav_daily_composition_list[i].append(current_nav_list[i])
daily_price_list.append(sum(current_nav_list)/300000 *100)
nav_daily_net_list.append(sum(current_nav_list))
# Note that we are using the Laspeyres Price Index for calculation
daily_price_df = pd.DataFrame({'Date': nav_daily_dates_list, 'Close': daily_price_list})
daily_df = pd.DataFrame({'Date': nav_daily_dates_list,\
stocks[0]: nav_daily_composition_list[0],\
stocks[1]: nav_daily_composition_list[1],\
stocks[2]: nav_daily_composition_list[2],\
'Net': nav_daily_net_list,\
'Adjusted': nav_daily_adjust_list})
# Generate quarterly NAV returns for visualisation
quarterly_df = util.cal_fitness_with_quarterly_returns(daily_df, [], price_col='Net')
# Generate passive NAV returns for comparison (buy and hold)
# assets are all 300000 to be able to compare to algo
asset_list = [300000, 300000, 300000]
last_date = nav_daily_dates_list[0]
passive_nav_daily_composition_list = [[],[],[]]
for date in nav_daily_dates_list:
for i in range(len(stocks)):
previous_close_price = df_list[i][df_list[i]['Date'] == last_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
asset_list[i] = asset_list[i] * current_close_price / previous_close_price
passive_nav_daily_composition_list[i].append(asset_list[i])
last_date = date
passive_daily_df = pd.DataFrame({'Date': nav_daily_dates_list,\
stocks[0]: passive_nav_daily_composition_list[0],\
stocks[1]: passive_nav_daily_composition_list[1],\
stocks[2]: passive_nav_daily_composition_list[2]})
passive_quarterly_df = pd.DataFrame()
for i in range(len(stocks)):
if i == 0:
passive_quarterly_df = util.cal_fitness_with_quarterly_returns(passive_daily_df, [], price_col=stocks[i])
passive_quarterly_df = passive_quarterly_df.rename(columns={"quarterly_return": stocks[i]})
else:
passive_quarterly_df[stocks[i]] = util.cal_fitness_with_quarterly_returns(passive_daily_df, [], price_col=stocks[i])['quarterly_return']
# print(passive_quarterly_df)
# Print some quarterly difference statistics
for symbol in stocks:
difference = quarterly_df['quarterly_return'].values - passive_quarterly_df[symbol].values
# print('Stock {}: {}'.format(symbol, difference))
print('Stock {} total return difference = {}'.format(symbol,sum(difference)))
for symbol in stocks:
symbol_cvar = abs(util.cvar_percent(passive_daily_df, len(passive_daily_df)-1, len(passive_daily_df)-1, price_col=symbol))
print('Stock cvar {}: {}'.format(symbol, symbol_cvar))
# print('Stock {} cvar difference = {}'.format(symbol, cvar - symbol_cvar))
path_str = 'data/rl/{}'.format(run_set[choose_set_num])
path = Path(path_str)
path.mkdir(parents=True, exist_ok=True)
if save_passive:
passive_daily_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/passive_daily_nav.csv')
passive_quarterly_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/passive_quarterly_nav_return.csv')
print('Passive data saved for {}'.format(run_set[choose_set_num]))
if save_rl_data:
daily_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/daily_nav.csv')
quarterly_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/quarterly_nav_return.csv')
daily_price_df.to_csv(f'data/rl/{run_set[choose_set_num]}/lagged/daily_price.csv')
print('Data saved for {}'.format(run_set[choose_set_num]))
sys.exit(0)
sess.run(tf.compat.v1.global_variables_initializer())
# #BVSP
# high_risk_pred_model = tf.keras.models.load_model(f'data/rl/{run_set[choose_set_num]}/stock_pred_^BVSP.hdf5')
# med_risk_pred_model = tf.keras.models.load_model(f'data/rl/{run_set[choose_set_num]}/stock_pred_^TWII.hdf5')
# scaler = MinMaxScaler(feature_range=(0,1))
total_steps = 0
reward_list = []
position_idx = 0
portfolio_composition = [0.1, 0.1, 0.1+ 0.7]
ep_reward = 0
ep_10_reward = 0
nav = 0
nav_10_eps = 0
for j in tqdm(range(num_episodes)):
print(f'Total Steps taken: {total_steps}')
# reward_list.append(ep_reward / ((i % 10) + 1))
if j % 10 == 0:
print(f"Episode {j}, Total Steps: {total_steps} Average Reward {ep_10_reward / 10}, Average Nav {nav_10_eps / 10}")
print(f'exploration rate: {e_rate}')
ep_10_reward = 0
nav_10_eps = 0
# episode_buffer = experience_buffer()
start = 10
position_idx = start
state = get_next_state(position_idx-1, trend_list, date_range, df_list)
portfolio_composition = [0.1, 0.1, 0.8]
portfolio_composition_list = []
action_taken_list = []
reward_list = []
nav = 0
ep_reward = 0
asset_list = [100000,100000,100000]
while position_idx < max_ep_length - 1:
action, q_values = sess.run([main_QN.best_action, main_QN.q_values],feed_dict={main_QN.x:norm_state(state)})
# Remove the extra [] for action
action = action[0]
# print(f'Action: {action}, q_values: {q_values}')
# Explore
if np.random.rand(1) < e_rate or total_steps < pre_train_steps:
action = np.random.randint(0, num_actions)
action_taken_list.append(action)
step_reward, portfolio_composition, asset_list = get_reward(asset_list, action, position_idx, trend_list, date_range, portfolio_composition, df_list)
portfolio_composition_list.append(portfolio_composition)
ep_reward += step_reward
reward_list.append(step_reward)
# print(f'Reward for step: {step_reward}')
# print(state_)
# Feed new state to obtain new q_value
state_ = get_next_state(position_idx, trend_list, date_range, df_list)
q_values_ = sess.run(main_QN.q_values,feed_dict={main_QN.x:norm_state(state_)})
# print(f'New q_values {q_values_}')
# Get max q_value
max_q_value = np.max(q_values_)
position_idx += 1
# print(f'Norm state: {norm_state(state)}')
total_steps += 1
target_q = q_values
target_q[0, action] = step_reward + gamma*max_q_value
# print(f'Target q: {target_q}')
_,W1 = sess.run([main_QN.update, main_QN.W1],feed_dict={main_QN.x:norm_state(state), main_QN.target:target_q})
# Calculate profit at end of episode
# Reduce exploration rate
if total_steps > pre_train_steps:
if e_rate > end_e:
e_rate -= step_drop
state = state_
asset_list, nav = calc_actions_nav(asset_list, portfolio_composition, trend_list, position_idx, date_range, final_nav=True)
print(nav)
print(get_action_counts(action_taken_list))
nav_10_eps += nav
ep_10_reward += ep_reward
print(f'Reward for episode: {ep_reward}')
# Save every 50 steps from 200 onwards and before end of training
if (j % 50 == 0 and j >= 200) or j == num_episodes - 1:
saver.save(sess,path+'/model.cptk')
print("Saved model")
# df = pd.DataFrame({'Date': trend_list[start:-1], 'Reward': reward_list, 'Action': action_taken_list, 'Portfolio_composition': portfolio_composition_list})
# df.to_csv(f'data/rl/{run_set[choose_set_num]}/iteration{j}.csv')
|
""" Financial Modeling Prep Controller """
__docformat__ = "numpy"
import argparse
import os
from typing import List
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal.fundamental_analysis.financial_modeling_prep import fmp_view
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import (
get_flair,
)
from gamestonk_terminal.menu import session
class FinancialModelingPrepController:
"""Financial Modeling Prep Controller"""
# Command choices
CHOICES = [
"cls",
"?",
"help",
"q",
"quit",
"profile",
"quote",
"enterprise",
"dcf",
"income",
"balance",
"cash",
"metrics",
"ratios",
"growth",
]
def __init__(self, ticker: str, start: str, interval: str):
"""Constructor
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
start : str
Stat date of the stock data
interval : str
Stock data interval
"""
self.ticker = ticker
self.start = start
self.interval = interval
self.fmp_parser = argparse.ArgumentParser(add_help=False, prog="fmp")
self.fmp_parser.add_argument(
"cmd",
choices=self.CHOICES,
)
def print_help(self):
"""Print help"""
print(
"https://github.com/GamestonkTerminal/GamestonkTerminal/"
"tree/main/gamestonk_terminal/fundamental_analysis/financial_modeling_prep"
)
intraday = (f"Intraday {self.interval}", "Daily")[self.interval == "1440min"]
if self.start:
print(
f"\n{intraday} Stock: {self.ticker} (from {self.start.strftime('%Y-%m-%d')})"
)
else:
print(f"\n{intraday} Stock: {self.ticker}")
print("\nFinancial Modeling Prep API")
print(" cls clear screen")
print(" ?/help show this menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print(" profile profile of the company")
print(" quote quote of the company")
print(" enterprise enterprise value of the company over time")
print(" dcf discounted cash flow of the company over time")
print(" income income statements of the company")
print(" balance balance sheet of the company")
print(" cash cash flow statement of the company")
print(" metrics key metrics of the company")
print(" ratios financial ratios of the company")
print(" growth financial statement growth of the company")
print("")
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
# Empty command
if not an_input:
print("")
return None
(known_args, other_args) = self.fmp_parser.parse_known_args(an_input.split())
# Help menu again
if known_args.cmd == "?":
self.print_help()
return None
# Clear screen
if known_args.cmd == "cls":
os.system("cls||clear")
return None
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
def call_profile(self, other_args: List[str]):
"""Process profile command"""
fmp_view.profile(other_args, self.ticker)
def call_quote(self, other_args: List[str]):
"""Process quote command"""
fmp_view.quote(other_args, self.ticker)
def call_enterprise(self, other_args: List[str]):
"""Process income command"""
fmp_view.enterprise(other_args, self.ticker)
def call_dcf(self, other_args: List[str]):
"""Process dcf command"""
fmp_view.discounted_cash_flow(other_args, self.ticker)
def call_income(self, other_args: List[str]):
"""Process income command"""
fmp_view.income_statement(other_args, self.ticker)
def call_balance(self, other_args: List[str]):
"""Process balance command"""
fmp_view.balance_sheet(other_args, self.ticker)
def call_cash(self, other_args: List[str]):
"""Process cash command"""
fmp_view.cash_flow(other_args, self.ticker)
def call_metrics(self, other_args: List[str]):
"""Process metrics command"""
fmp_view.key_metrics(other_args, self.ticker)
def call_ratios(self, other_args: List[str]):
"""Process cash command"""
fmp_view.financial_ratios(other_args, self.ticker)
def call_growth(self, other_args: List[str]):
"""Process cash command"""
fmp_view.financial_statement_growth(other_args, self.ticker)
def menu(ticker: str, start: str, interval: str):
"""Financial Modeling Prep menu
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
start : str
Start date of the stock data
interval : str
Stock data interval
"""
fmp_controller = FinancialModelingPrepController(ticker, start, interval)
fmp_controller.call_help(None)
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in fmp_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (fa)>(fmp)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (fa)>(fmp)> ")
try:
process_input = fmp_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
continue
|
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional, Tuple, TypeVar
ProfilingResult = Tuple[str, float]
T = TypeVar("T")
class AbstractProfiler(ABC):
@abstractmethod
def time(self,
name: str,
handler: Callable[..., T],
*args: Any
) -> T:
...
@abstractmethod
def dump(self) -> str:
...
@abstractmethod
def is_alive(self) -> bool:
...
@abstractmethod
def join_from(self, joiner: str) -> None:
...
ProfilerGetter = Callable[[],
Optional[AbstractProfiler]]
|
#!/usr/bin/env python
# Copyright (C) 2016 the V8 project authors. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
from __future__ import print_function
import argparse
import glob, os, sys
from lib.expander import Expander
from lib.test import Test
# base name of the files to be ignored
ignored_files = [
'.DS_Store',
]
def print_error(*values):
print('ERROR:', *values, file=sys.stderr)
# When a directory contains at least one file with a `.case` extension, it
# should be interpreted as a "case directory"
def is_case_dir(location):
for file_name in os.listdir(location):
if file_name.lower().endswith('.case'):
return True
return False
def find_cases(location):
# single file
if os.path.isfile(location):
return location, [os.path.dirname(location)]
# directory with case files
if is_case_dir(location):
return None, [location]
# other directory, return all contents other than hidden "dot files"
return None, glob.glob(os.path.join(location, '*'))
def clean(args):
for (subdir, _, fileNames) in os.walk(args.directory):
for fileName in map(lambda x: os.path.join(subdir, x), fileNames):
if os.path.basename(fileName) in ignored_files:
continue
test = Test(fileName)
test.load()
if test.is_generated():
print('Deleting file "' + fileName + '"...')
os.remove(fileName)
def create(args):
caseFile, caseDirs = find_cases(args.cases)
for caseDir in caseDirs:
exp = Expander(caseDir)
for test in exp.expand('utf-8', caseFile):
if args.out:
try:
test_file = os.path.join(args.out, test.file_name)
test_mtime = os.path.getmtime(test_file)
if args.no_clobber:
print_error(
'Refusing to overwrite file: ' + test.file_name)
exit(1)
if not args.regenerate:
source_files = test.source_file_names
if all(test_mtime > os.path.getmtime(f) for f in source_files):
continue
existing = Test(test_file)
existing.load()
if not existing.is_generated():
print_error(
'Refusing to overwrite non-generated file: ' +
test.file_name)
exit(1)
except (OSError, IOError):
pass
test.write(args.out, parents=args.parents)
else:
print(test.to_string())
parser = argparse.ArgumentParser(description='Test262 test generator tool')
subparsers = parser.add_subparsers()
create_parser = subparsers.add_parser('create',
help='''Generate test material''')
create_parser.add_argument('-o', '--out', help='''The directory in which to write
compiled tests. If unspecified, tests will be written to standard output.''')
create_parser.add_argument('-p', '--parents', action='store_true',
help='''Create non-existent directories as necessary.''')
create_parser.add_argument('-n', '--no-clobber', action='store_true',
help='''Abort if any test file already exists.''')
create_parser.add_argument('-r', '--regenerate', action='store_true',
help='''Regenerate test files that are already newer than their source data.''')
create_parser.add_argument('cases',
help='''Test cases to generate. May be a file or a directory.''')
create_parser.set_defaults(func=create)
clean_parser = subparsers.add_parser('clean',
help='''Remove previously-generated files''')
clean_parser.add_argument('directory',
help='''Remove any generated tests from this directory''')
clean_parser.set_defaults(func=clean)
args = parser.parse_args()
args.func(args)
|
#!/usr/bin/env python
import logging as lg
import stau_utils as utils
# CONFIG
NAME = "job_goodbye_work"
LOG = lg.getLogger(NAME)
CHUNKSIZE = 2
STAU_CONFIG = utils.ReportConfig(
job_type=NAME,
chunk_size=CHUNKSIZE,
dependencies=[
# This job depends on having said hello first
utils.ReportDependencies(
"job_hello_work", # Name of job to call
0 # job must have executed 0s ago - i.e. always execute the dependency
),
],
main_func=f"{NAME}_func",
)
def select_work(**kwargs):
"""Get work IDs to run the Goodbye job against"""
return [0, 1]
def job_goodbye_work_func(work_ids=None, final_message=None, **kwargs):
"""This is the main entry point for the Goodbye job"""
if work_ids is None:
# Default to run against all work IDs
work_ids = select_work()
for wid in work_ids:
LOG.warning(f"Goodbye work ID {wid}")
if final_message is not None:
LOG.warning(final_message)
if __name__ == "__main__":
# All jobs in Stua simply run through the main function this file can execute locally without issue
job_goodbye_work_func()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
cd /Users/brunoflaven/Documents/02_copy/_random_is_all_about
python random_array_2.py
"""
import random
number_list = [7, 14, 21, 28, 35, 42, 49, 56, 63, 70]
print("Original list:", number_list)
random.shuffle(number_list)
print("List after first shuffle:", number_list)
random.shuffle(number_list)
print("List after second shuffle:", number_list)
|
import asyncio
import datetime
import re
import textwrap
from io import BytesIO
import sys
import json
import aiohttp
import discord
from utils import checks
DISCORD_INVITE = r'discord(?:app\.com|\.gg)[\/invite\/]?(?:(?!.*[Ii10OolL]).[a-zA-Z0-9]{5,6}|[a-zA-Z0-9\-]{2,32})'
INVITE_WHITELIST = [
"https://discord.gg/CMnKYPA"
"http://discord.gg/Sg3Wznq"
"https://discord.gg/AJMHQCM"
"https://discord.gg/atKZZ7y"
]
OPUS_LIBS = [
'libopus-0.x86.dll',
'libopus-o.x64.dll',
'libopus-0.dll',
'libopus.so.0',
'libopus.0.dylib',
'libopus.so'
]
class Core:
def __init__(self, bot):
self.bot = bot
self.mod_log = None
self.blacklist = []
@staticmethod
def get_invites(message):
"""Fetches all invites from message"""
regex = re.match(DISCORD_INVITE, message.content)
return regex
def check_testing(self):
if(len(sys.argv)>=2 and sys.argv[1] == "-test"):
return True
return False
@staticmethod
def load_opus_lib(opus_libs=OPUS_LIBS):
"""Loads LibOpus For `bot`."""
if discord.opus.is_loaded():
return True
for opus_lib in opus_libs:
try:
discord.opus.load_opus(opus_lib)
return True
except OSError:
pass
@staticmethod
async def filify(attachments):
"""Converts attachments of an instance of `discord.Message` to a list of instances of `discord.File`."""
ret = []
for file in attachments:
byte = BytesIO()
await file.save(byte)
byte.seek(0)
ret.append(discord.File(byte, filename=file.filename))
return ret
async def on_ready(self):
"""A `bot` event triggered when the bot authentication has been successful.
Notifies console when `bot` is ready."""
testvalue = self.check_testing()
if(testvalue == True):
testingservers = json.load(open('testingdata.json'))
self.bot.main_server = self.bot.get_guild(testingservers["main_server"])
self.bot.backup_server = self.bot.get_guild(testingservers["backup_server"])
else:
self.bot.main_server = self.bot.get_guild(212982046992105473)
self.bot.backup_server = self.bot.get_guild(349652162948759555)
self.bot.whitelisted_servers = [
self.bot.main_server,
self.bot.get_guild(173152280634327040), #avinchtest
self.bot.get_guild(338732924893659137) #cassbotpy
]
self.mod_log = discord.utils.get(self.bot.main_server.channels, name="mod-log")
self.bot.session = aiohttp.ClientSession()
print(textwrap.dedent(f"""
=====================================
discord.py Version: {discord.__version__}
Python Version: {sys.version}
Bot Username: {self.bot.user.name}
Bot User ID: {self.bot.user.id}
Started: {datetime.datetime.utcnow()} UTC
Opus: {'Loaded' if self.load_opus_lib() else 'Failed'}
====================================="""))
async def on_message(self, message):
"""A `bot` event triggered when a message is sent."""
ping_role = discord.utils.get(message.guild.roles, name="ping")
gamenight_role = discord.utils.get(message.guild.roles, name="gamenight")
if (message.content.find("leveled up") != -1) and message.author.id == "172002275412279296":
await message.channel.send("<:ding:230664475554873344>")
# Bot Checker
if message.author.bot:
return
# Main Server Checker
if message.guild not in self.bot.whitelisted_servers:
return
# Non-Mod Checker
if not checks.is_mod(message.guild, message.author):
# Non-Mod Ping Mention Checker
if ping_role.id in message.raw_role_mentions or gamenight_role.id in message.raw_role_mentions:
msg = f"**Do not abuse pingable roles!** {message.author.mention}"
# Ping Mention Consequence
await message.channel.send(msg)
await message.delete()
await message.author.edit(roles=[], reason="Pingable Role Mention")
# Ping Mention Mod Log Embed
alert_embed = discord.Embed(
title="Pingable Role Mention",
description=f'User: **{message.author.name}** \nChannel: {message.channel.name}',
color=discord.Color.red()
)
alert_embed.add_field(
name="User:",
value=message.author.name
)
alert_embed.add_field(
name="Channel",
value=f"{message.channel.name}({message.channel.id})"
)
alert_embed.set_thumbnail(url=message.author.avatar_url)
alert_embed.set_footer(text='Abuse Notification')
await discord.utils.get(message.guild.channels, name="mod-log").send(embed=alert_embed)
# Non-Mod Invite Checker
if self.get_invites(message):
msg = f"**Do not send invites!** {message.author.mention}"
await message.channel.send(msg)
await message.delete()
member_voice = message.author.voice
# Lower-Case System;Start 1
if str(message.content).lower() in ["cassandra can you hear me", "cassandra, can you hear me?", "cassandra can you hear me?", "cassandra, can you hear me"] : # todo: replace with regex
if member_voice:
vc = await message.author.voice.channel.connect()
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio('audio/ss1.mp3'))
vc.play(source)
await asyncio.sleep(4)
await vc.disconnect()
else:
await message.channel.send("Yes.")
# Lower-Case System;Start 2
elif str(message.content).lower() in ["cassandra are you ready to begin", "are you ready to begin", "cassandra, are you ready to begin", "cassandra are you ready to begin?", "cassandra, are you ready to begin?"]: # todo: replace with regex
if member_voice:
vc = await message.author.voice.channel.connect()
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio('audio/ss2.mp3'))
vc.play(source)
await asyncio.sleep(5)
await vc.disconnect()
else:
await message.channel.send("Yes,")
await asyncio.sleep(1)
await message.channel.send("I'm ready.")
# Archiver
if message.author.id in self.blacklist:
await message.channel.send(f'Thank you for your time at {message.guild.name}. Understandable, have a nice day, {message.author.mention}.')
await message.author.edit(roles=[], reason='r/Area11Banned Special')
await message.author.send(f'You have been banned from {message.guild.name}. Hope you have a good evening.')
await message.author.ban(reason='r/Area11Banned Special')
def setup(bot):
bot.add_cog(Core(bot))
|
# -----------------------------------------------------------------------------
# calclex.py
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.