text
stringlengths 2
999k
|
|---|
def create_registry(attr_name):
"""Create a new registry type that tracks objects by the given attribute name.
Arguments:
attr_name -- the string name of the attribute by which to key the registry
"""
class Registry(type):
"""An abstract registry for objects keyed by an attribute value."""
_registered = {}
def __init__(self, name, bases, attrs):
"""Update our object registry, keyed by an attribute value."""
super(Registry, self).__init__(name, bases, attrs)
if bases[0] == object:
return
if not attrs.get(attr_name, None):
raise TypeError("You must define a '%(attr)s' for the %(class)s class" % {'attr': attr_name, 'class': name})
attr_val = attrs[attr_name]
if attr_val in self._registered:
raise TypeError("You cannot have more than one %(class) class with a '%(attr)s' of '%(val)s'" % {'class': name, 'attr': attr_name, 'val': attr_val})
self._registered[attr_val] = self
@classmethod
def get_registry_item(self, attr_val):
"""Return the class registered under the given attribute name.
If the class instance cannot be found, a ValueError is raised.
"""
try:
return self._registered[attr_val]
except KeyError:
raise ValueError("No class instance could be found for '%(attr)s'" % {'attr': attr_val})
return Registry
|
import torch
import torch.nn as nn
from collections import deque
from mol_tree import Vocab, MolTree
from nnutils import create_var, GRU
MAX_NB = 8
class JTNNEncoder(nn.Module):
def __init__(self, vocab, hidden_size, embedding=None):
super(JTNNEncoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab.size()
self.vocab = vocab
if embedding is None:
self.embedding = nn.Embedding(self.vocab_size, hidden_size)
else:
self.embedding = embedding
self.W_z = nn.Linear(2 * hidden_size, hidden_size)
self.W_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.U_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear(2 * hidden_size, hidden_size)
self.W = nn.Linear(2 * hidden_size, hidden_size)
def forward(self, root_batch):
orders = []
for root in root_batch:
order = get_prop_order(root)
orders.append(order)
h = {}
max_depth = max([len(x) for x in orders])
padding = create_var(torch.zeros(self.hidden_size), False)
for t in xrange(max_depth):
prop_list = []
for order in orders:
if t < len(order):
prop_list.extend(order[t])
cur_x = []
cur_h_nei = []
for node_x,node_y in prop_list:
x,y = node_x.idx,node_y.idx
cur_x.append(node_x.wid)
h_nei = []
for node_z in node_x.neighbors:
z = node_z.idx
if z == y: continue
h_nei.append(h[(z,x)])
pad_len = MAX_NB - len(h_nei)
h_nei.extend([padding] * pad_len)
cur_h_nei.extend(h_nei)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_h_nei = torch.cat(cur_h_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
for i,m in enumerate(prop_list):
x,y = m[0].idx,m[1].idx
h[(x,y)] = new_h[i]
root_vecs = node_aggregate(root_batch, h, self.embedding, self.W)
return h, root_vecs
"""
Helper functions
"""
def get_prop_order(root):
queue = deque([root])
visited = set([root.idx])
root.depth = 0
order1,order2 = [],[]
while len(queue) > 0:
x = queue.popleft()
for y in x.neighbors:
if y.idx not in visited:
queue.append(y)
visited.add(y.idx)
y.depth = x.depth + 1
if y.depth > len(order1):
order1.append([])
order2.append([])
order1[y.depth-1].append( (x,y) )
order2[y.depth-1].append( (y,x) )
order = order2[::-1] + order1
return order
def node_aggregate(nodes, h, embedding, W):
x_idx = []
h_nei = []
hidden_size = embedding.embedding_dim
padding = create_var(torch.zeros(hidden_size), False)
for node_x in nodes:
x_idx.append(node_x.wid)
nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors ]
pad_len = MAX_NB - len(nei)
nei.extend([padding] * pad_len)
h_nei.extend(nei)
h_nei = torch.cat(h_nei, dim=0).view(-1,MAX_NB,hidden_size)
sum_h_nei = h_nei.sum(dim=1)
x_vec = create_var(torch.LongTensor(x_idx))
x_vec = embedding(x_vec)
node_vec = torch.cat([x_vec, sum_h_nei], dim=1)
return nn.ReLU()(W(node_vec))
|
import os
from src.models import _PROCESSED_DATA, _RAW_DATA, _MODEL_DIR
import torch
from torch import nn
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
from torch.nn import functional as F
from torchvision import transforms
from src.libs.utils import load_raw_data
from tests.test_data import test_all_labels_represented
class MNISTData(pl.LightningDataModule):
def __init__(self, loader_batch_size, normalize_mean, normalize_std):
self.loader_batch_size = loader_batch_size
self.normalize_mean = normalize_mean
self.normalize_std = normalize_std
def process_and_save_raw_data(self):
loaded_data = load_raw_data(_RAW_DATA)
norm = transforms.Normalize((self.normalize_mean,), (self.normalize_std,))
tensor_train = norm(torch.from_numpy(loaded_data['images_train']).float())
tensor_train_labels = torch.from_numpy(loaded_data['labels_train'])
tensor_test = norm(torch.from_numpy(loaded_data['images_test']).float())
tensor_test_labels = torch.from_numpy(loaded_data['labels_test'])
torch.save(tensor_train, os.path.join(_PROCESSED_DATA, 'tensor_train.pt'))
torch.save(tensor_train_labels, os.path.join(_PROCESSED_DATA, 'tensor_train_labels.pt'))
torch.save(tensor_test, os.path.join(_PROCESSED_DATA, 'tensor_test.pt'))
torch.save(tensor_test_labels, os.path.join(_PROCESSED_DATA, 'tensor_test_labels.pt'))
def setup(self, stage):
if stage == "fit" or stage is None:
data_path = os.path.join(_PROCESSED_DATA, 'tensor_train.pt')
label_path = os.path.join(_PROCESSED_DATA, 'tensor_train_labels.pt')
if not os.path.exists(data_path):
self.process_and_save_raw_data()
tensor = torch.load(data_path)
labels = torch.load(label_path)
self.trainloader_data = torch.utils.data.TensorDataset(tensor, labels)
if stage == "validate" or stage is None:
data_path = os.path.join(_PROCESSED_DATA, 'tensor_test.pt')
label_path = os.path.join(_PROCESSED_DATA, 'tensor_test_labels.pt')
if not os.path.exists(data_path):
self.process_and_save_raw_data()
tensor = torch.load(data_path)
labels = torch.load(label_path)
self.valloader_data = torch.utils.data.TensorDataset(tensor, labels)
if stage == "test" or stage is None:
data_path = os.path.join(_PROCESSED_DATA, 'tensor_test.pt')
label_path = os.path.join(_PROCESSED_DATA, 'tensor_test_labels.pt')
if not os.path.exists(data_path):
self.process_and_save_raw_data()
tensor = torch.load(data_path)
labels = torch.load(label_path)
self.testloader_data = torch.utils.data.TensorDataset(tensor, labels)
if stage == "predict" or stage is None:
data_path = os.path.join(_PROCESSED_DATA, 'tensor_test.pt')
label_path = os.path.join(_PROCESSED_DATA, 'tensor_test_labels.pt')
if not os.path.exists(data_path):
self.process_and_save_raw_data()
tensor = torch.load(data_path)
labels = torch.load(label_path)
self.predictloader_data = torch.utils.data.TensorDataset(tensor, labels)
def train_dataloader(self):
return DataLoader(self.trainloader_data, batch_size=self.loader_batch_size)
def val_dataloader(self):
return DataLoader(self.valloader_data, batch_size=self.loader_batch_size)
def test_dataloader(self):
return DataLoader(self.testloader_data, batch_size=self.loader_batch_size)
def predict_dataloader(self):
return DataLoader(self.predictloader_data, batch_size=self.loader_batch_size)
|
#!/usr/bin/env python
import sys
import time
import roslibpy
import rospy
from twisted.internet import reactor
from sensor_msgs.msg import JointState
from rospy_message_converter import message_converter
from robot_arm_dvrk import RobotArmDVRK
"""
Rosbridge node for PSM-s.
"""
class RobotArmPSM(RobotArmDVRK):
# Mapping between ROS and rosbridge joint names.
JOINT_DICT = {'outer_yaw': 'yaw_joint',
'outer_pitch': 'pitch_back_joint',
'outer_insertion': 'main_insertion_joint',
'outer_roll': 'tool_roll_joint',
'outer_wrist_pitch': 'tool_pitch_joint',
'outer_wrist_yaw': 'tool_yaw_joint',
'jaw': 'tool_gripper2_joint'}
# Array of the rosbridge joint names in an order to be publsihed
JOINT_NAMES_BRIDGE = ['rev_joint','yaw_joint','pitch_back_joint',
'pitch_bottom_joint','pitch_end_joint',
'main_insertion_joint', 'tool_roll_joint',
'tool_pitch_joint','tool_yaw_joint',
'tool_gripper1_joint', 'tool_gripper2_joint',
'pitch_top_joint','pitch_front_joint']
"""Constructor.
:param name: name of the arm
:param ros: ros object
"""
def __init__(self, name, ros):
RobotArmDVRK.__init__(self, name, ros,
self.JOINT_DICT, self.JOINT_NAMES_BRIDGE)
"""Overrided subscription method. The state_jaw_current topic
is needed besides the state_joint_current.
"""
def subscribe_to_topics(self):
RobotArmDVRK.subscribe_to_topics(self)
rospy.Subscriber("/dvrk/" + self.name + "/state_jaw_current", JointState, self.jaw_cb)
"""Callback for the jaw position.
"""
def jaw_cb(self, msg):
self.store_joint_states_from_msg(msg)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'allswap.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
import time
import array
import math
import audioio
import board
import digitalio
button = digitalio.DigitalInOut(board.A1)
button.switch_to_input(pull=digitalio.Pull.UP)
tone_volume = 0.1 # Increase this to increase the volume of the tone.
frequency = 440 # Set this to the Hz of the tone you want to generate.
length = 8000 // frequency
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int((1 + math.sin(math.pi * 2 * i / 18)) * tone_volume * (2 ** 15))
audio = audioio.AudioOut(board.A0)
sine_wave_sample = audioio.RawSample(sine_wave)
while True:
if not button.value:
audio.play(sine_wave_sample, loop=True)
time.sleep(1)
audio.stop()
|
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create e2e test definitions.
Usage example:
In $GOPATH/src/k8s.io/test-infra,
$ bazel run //releng:generate_tests -- \
--yaml-config-path=releng/test_config.yaml \
"""
import argparse
import hashlib
import os
import ruamel.yaml
yaml = ruamel.yaml.YAML(typ='rt')
yaml.width = float("inf")
PROW_CONFIG_TEMPLATE = """
tags:
- generated # AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT!
interval:
cron:
labels:
preset-service-account: "true"
preset-k8s-ssh: "true"
name:
spec:
containers:
- args:
env:
image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20211005-ca067fbd9f-master
resources:
requests:
cpu: 1000m
memory: 3Gi
limits:
cpu: 1000m
memory: 3Gi
"""
E2E_TESTGRID_CONFIG_TEMPLATE = """
name:
gcs_prefix:
column_header:
- configuration_value: node_os_image
- configuration_value: master_os_image
- configuration_value: Commit
- configuration_value: infra-commit
"""
GCS_LOG_PREFIX = "kubernetes-jenkins/logs/"
COMMENT = 'AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT.'
def get_sha1_hash(data):
"""Returns the SHA1 hash of the specified data."""
sha1_hash = hashlib.sha1()
sha1_hash.update(data.encode('utf-8'))
return sha1_hash.hexdigest()
def substitute(job_name, lines):
"""Replace '${job_name_hash}' in lines with the SHA1 hash of job_name."""
return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \
for line in lines]
def get_args(job_name, field):
"""Returns a list of args for the given field."""
if not field:
return []
return substitute(job_name, field.get('args', []))
def write_prow_configs_file(output_file, job_defs):
"""Writes the Prow configurations into output_file."""
print(f'writing prow configuration to: {output_file}')
with open(output_file, 'w') as fp:
yaml.dump(job_defs, fp)
def write_testgrid_config_file(output_file, testgrid_config):
"""Writes the TestGrid test group configurations into output_file."""
print(f'writing testgrid configuration to: {output_file}')
with open(output_file, 'w') as fp:
fp.write('# ' + COMMENT + '\n\n')
yaml.dump(testgrid_config, fp)
def apply_job_overrides(envs_or_args, job_envs_or_args):
'''Applies the envs or args overrides defined in the job level'''
original_envs_or_args = envs_or_args[:]
for job_env_or_arg in job_envs_or_args:
name = job_env_or_arg.split('=', 1)[0]
env_or_arg = next(
(x for x in original_envs_or_args if (x.strip().startswith('%s=' % name) or
x.strip() == name)), None)
if env_or_arg:
envs_or_args.remove(env_or_arg)
envs_or_args.append(job_env_or_arg)
class E2ENodeTest:
def __init__(self, job_name, job, config):
self.job_name = job_name
self.job = job
self.common = config['nodeCommon']
self.images = config['nodeImages']
self.k8s_versions = config['nodeK8sVersions']
self.test_suites = config['nodeTestSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite, k8s_version):
"""Returns the Prow config for the job from the given fields."""
prow_config = yaml.load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
# pull interval or cron from job
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['cron']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
if not container['env']:
container['env'] = []
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
container['args'].extend(k8s_version.get('args', []))
container['args'].append('--root=/go/src')
container['env'].extend([{'name':'GOPATH', 'value': '/go'}])
# Specify the appropriate kubekins-e2e image. This allows us to use a
# specific image (containing a particular Go version) to build and
# trigger the node e2e test to avoid issues like
# https://github.com/kubernetes/kubernetes/issues/43534.
if k8s_version.get('prowImage', None):
container['image'] = k8s_version['prowImage']
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
print(f'generating e2enode job: {self.job_name}')
fields = self.job_name.split('-')
if len(fields) != 6:
raise ValueError('Expected 6 fields in job name', self.job_name)
image = self.images[fields[3]]
k8s_version = self.k8s_versions[fields[4][3:]]
test_suite = self.test_suites[fields[5]]
# envs are disallowed in node e2e tests.
if 'envs' in self.common or 'envs' in image or 'envs' in test_suite:
raise ValueError(
'envs are disallowed in node e2e test', self.job_name)
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates prow config.
prow_config = self.__get_prow_config(test_suite, k8s_version)
# Combine --node-args
node_args = []
job_args = []
for arg in job_config['args']:
if '--node-args=' in arg:
node_args.append(arg.split('=', 1)[1])
else:
job_args.append(arg)
if node_args:
flag = '--node-args='
for node_arg in node_args:
flag += '%s ' % node_arg
job_args.append(flag.strip())
job_config['args'] = job_args
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[3],
fields[4])
annotations = prow_config.setdefault('annotations', {})
annotations['testgrid-dashboards'] = dashboard
tab_name = '%s-%s-%s' % (fields[3], fields[4], fields[5])
annotations['testgrid-tab-name'] = tab_name
return job_config, prow_config, None
class E2ETest:
def __init__(self, output_dir, job_name, job, config):
self.env_filename = os.path.join(output_dir, '%s.env' % job_name)
self.job_name = job_name
self.job = job
self.common = config['common']
self.cloud_providers = config['cloudProviders']
self.images = config['images']
self.k8s_versions = config['k8sVersions']
self.test_suites = config['testSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite):
"""Returns the Prow config for the e2e job from the given fields."""
prow_config = yaml.load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['interval']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
container['args'].append('--bare')
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
return prow_config
def __get_testgrid_config(self):
tg_config = yaml.load(E2E_TESTGRID_CONFIG_TEMPLATE)
tg_config['name'] = self.job_name
tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name
return tg_config
def initialize_dashboards_with_release_blocking_info(self, version):
dashboards = []
if self.job.get('releaseBlocking'):
dashboards.append('sig-release-%s-blocking' % version)
elif self.job.get('releaseInforming'):
dashboards.append('sig-release-%s-informing' % version)
else:
dashboards.append('sig-release-generated')
return dashboards
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
print(f'generating e2e job: {self.job_name}')
fields = self.job_name.split('-')
if len(fields) != 7:
raise ValueError('Expected 7 fields in job name', self.job_name)
cloud_provider = self.cloud_providers[fields[3]]
image = self.images[fields[4]]
k8s_version = self.k8s_versions[fields[5][3:]]
test_suite = self.test_suites[fields[6]]
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, cloud_provider))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, k8s_version))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates Prow config.
prow_config = self.__get_prow_config(test_suite)
tg_config = self.__get_testgrid_config()
annotations = prow_config.setdefault('annotations', {})
tab_name = '%s-%s-%s-%s' % (fields[3], fields[4], fields[5], fields[6])
annotations['testgrid-tab-name'] = tab_name
dashboards = self.initialize_dashboards_with_release_blocking_info(k8s_version['version'])
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[4],
fields[5])
dashboards.append(dashboard)
annotations['testgrid-dashboards'] = ', '.join(dashboards)
if 'testgridNumFailuresToAlert' in self.job:
annotations['testgrid-num-failures-to-alert'] = ('%s' %
self.job['testgridNumFailuresToAlert'])
return job_config, prow_config, tg_config
def for_each_job(output_dir, job_name, job, yaml_config):
"""Returns the job config and the Prow config for one test job."""
fields = job_name.split('-')
if len(fields) < 3:
raise ValueError('Expected at least 3 fields in job name', job_name)
job_type = fields[2]
# Generates configurations.
if job_type == 'e2e':
generator = E2ETest(output_dir, job_name, job, yaml_config)
elif job_type == 'e2enode':
generator = E2ENodeTest(job_name, job, yaml_config)
else:
raise ValueError(f'Job {job_name} has unexpected job type ', job_type)
job_config, prow_config, testgrid_config = generator.generate()
# Applies job-level overrides.
apply_job_overrides(job_config['args'], get_args(job_name, job))
# merge job_config into prow_config
args = prow_config['spec']['containers'][0]['args']
args.append('--scenario=' + job_config['scenario'])
args.append('--')
args.extend(job_config['args'])
return prow_config, testgrid_config
def main(yaml_config_path, output_dir, testgrid_output_path):
"""Creates test job definitions.
Converts the test configurations in yaml_config_path to the job definitions
in output_dir/generated.yaml.
"""
# TODO(yguo0905): Validate the configurations from yaml_config_path.
with open(yaml_config_path) as fp:
yaml_config = yaml.load(fp)
output_config = {}
output_config['periodics'] = []
testgrid_config = {'test_groups': []}
job_names = sorted(yaml_config['jobs'].keys())
for job_name in job_names:
# Get the envs and args for each job defined under "jobs".
prow, testgrid = for_each_job(
output_dir, job_name, yaml_config['jobs'][job_name], yaml_config)
output_config['periodics'].append(prow)
if testgrid is not None:
testgrid_config['test_groups'].append(testgrid)
# Write the job definitions to --output-dir/generated.yaml
write_prow_configs_file(output_dir + 'generated.yaml', output_config)
write_testgrid_config_file(testgrid_output_path, testgrid_config)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Create test definitions from the given yaml config')
PARSER.add_argument('--yaml-config-path', help='Path to config.yaml')
PARSER.add_argument(
'--output-dir',
help='Prowjob config output dir',
default='config/jobs/kubernetes/generated/')
PARSER.add_argument(
'--testgrid-output-path',
help='Path to testgrid output file',
default='config/testgrids/generated-test-config.yaml')
ARGS = PARSER.parse_args()
main(
ARGS.yaml_config_path,
ARGS.output_dir,
ARGS.testgrid_output_path)
|
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='flask-expects-json',
version='1.4.0',
description='Decorator for REST endpoints in flask. Validate JSON request data.',
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/fischerfredl/flask-expects-json',
author='Alfred Melch',
author_email='dev@melch.pro',
license='MIT',
classifiers=[
'Framework :: Flask',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
keywords=['flask', 'json', 'validation', 'schema', 'jsonschema'],
packages=find_packages(exclude=['tests.*', 'tests']),
install_requires=[
'flask>=1.0.2',
'jsonschema>=3.0.1'
],
test_suite='tests.test_suite'
)
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import acl
from atlas_utils.utils import *
class AclResource(object):
def __init__(self, device_id=0):
self.device_id = device_id
self.context = None
self.stream = None
self.run_mode = None
def init(self):
print("[Sample] init resource stage:")
ret = acl.init()
check_ret("acl.rt.set_device", ret)
ret = acl.rt.set_device(self.device_id)
check_ret("acl.rt.set_device", ret)
self.context, ret = acl.rt.create_context(self.device_id)
check_ret("acl.rt.create_context", ret)
self.stream, ret = acl.rt.create_stream()
check_ret("acl.rt.create_stream", ret)
self.run_mode, ret = acl.rt.get_run_mode()
check_ret("acl.rt.get_run_mode", ret)
print("Init resource success")
def __del__(self):
if self.stream:
acl.rt.destroy_stream(self.stream)
if self.context:
acl.rt.destroy_context(self.context)
acl.rt.reset_device(self.device_id)
acl.finalize()
print("Release acl resource success")
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Project information -----------------------------------------------------
project = 'bioutils'
copyright = '2019, bioutils Contributors'
author = 'bioutils Contributors'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
import datetime as dt
import time
import random
from termcolor import colored
def random_color_print(text='hello world'):
color_str = 'red, green, yellow, blue, magenta, cyan, white'
color_lis = color_str.split(', ')
color_dic = {i:color_lis[i] for i in range(len(color_lis))}
seed = int(dt.datetime.now().strftime('%s'))
random.seed(seed)
num = random.randint(0, len(color_dic)-1)
print colored(text, color_dic.get(num, 0))
for i in range(10):
random_color_print()
time.sleep(1)
|
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.4
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class OrderTrack(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'commission': 'float',
'create_date': 'datetime',
'_date': 'date',
'external_track_id': 'str',
'fee': 'float',
'id': 'str',
'metadata': 'dict(str, str)',
'order_id': 'str',
'order_status_id': 'str',
'price': 'float',
'quantity': 'float',
'secondary_id': 'str',
'update_date': 'datetime'
}
attribute_map = {
'commission': 'commission',
'create_date': 'create_date',
'_date': 'date',
'external_track_id': 'external_track_id',
'fee': 'fee',
'id': 'id',
'metadata': 'metadata',
'order_id': 'order_id',
'order_status_id': 'order_status_id',
'price': 'price',
'quantity': 'quantity',
'secondary_id': 'secondary_id',
'update_date': 'update_date'
}
def __init__(self, commission=None, create_date=None, _date=None, external_track_id=None, fee=None, id=None, metadata=None, order_id=None, order_status_id=None, price=None, quantity=None, secondary_id=None, update_date=None, _configuration=None): # noqa: E501
"""OrderTrack - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._commission = None
self._create_date = None
self.__date = None
self._external_track_id = None
self._fee = None
self._id = None
self._metadata = None
self._order_id = None
self._order_status_id = None
self._price = None
self._quantity = None
self._secondary_id = None
self._update_date = None
self.discriminator = None
if commission is not None:
self.commission = commission
if create_date is not None:
self.create_date = create_date
self._date = _date
if external_track_id is not None:
self.external_track_id = external_track_id
if fee is not None:
self.fee = fee
if id is not None:
self.id = id
if metadata is not None:
self.metadata = metadata
self.order_id = order_id
self.order_status_id = order_status_id
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if secondary_id is not None:
self.secondary_id = secondary_id
if update_date is not None:
self.update_date = update_date
@property
def commission(self):
"""Gets the commission of this OrderTrack. # noqa: E501
commission # noqa: E501
:return: The commission of this OrderTrack. # noqa: E501
:rtype: float
"""
return self._commission
@commission.setter
def commission(self, commission):
"""Sets the commission of this OrderTrack.
commission # noqa: E501
:param commission: The commission of this OrderTrack. # noqa: E501
:type: float
"""
self._commission = commission
@property
def create_date(self):
"""Gets the create_date of this OrderTrack. # noqa: E501
:return: The create_date of this OrderTrack. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this OrderTrack.
:param create_date: The create_date of this OrderTrack. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def _date(self):
"""Gets the _date of this OrderTrack. # noqa: E501
date # noqa: E501
:return: The _date of this OrderTrack. # noqa: E501
:rtype: date
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this OrderTrack.
date # noqa: E501
:param _date: The _date of this OrderTrack. # noqa: E501
:type: date
"""
if self._configuration.client_side_validation and _date is None:
raise ValueError("Invalid value for `_date`, must not be `None`") # noqa: E501
self.__date = _date
@property
def external_track_id(self):
"""Gets the external_track_id of this OrderTrack. # noqa: E501
externalTrackId # noqa: E501
:return: The external_track_id of this OrderTrack. # noqa: E501
:rtype: str
"""
return self._external_track_id
@external_track_id.setter
def external_track_id(self, external_track_id):
"""Sets the external_track_id of this OrderTrack.
externalTrackId # noqa: E501
:param external_track_id: The external_track_id of this OrderTrack. # noqa: E501
:type: str
"""
self._external_track_id = external_track_id
@property
def fee(self):
"""Gets the fee of this OrderTrack. # noqa: E501
fee # noqa: E501
:return: The fee of this OrderTrack. # noqa: E501
:rtype: float
"""
return self._fee
@fee.setter
def fee(self, fee):
"""Sets the fee of this OrderTrack.
fee # noqa: E501
:param fee: The fee of this OrderTrack. # noqa: E501
:type: float
"""
self._fee = fee
@property
def id(self):
"""Gets the id of this OrderTrack. # noqa: E501
:return: The id of this OrderTrack. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this OrderTrack.
:param id: The id of this OrderTrack. # noqa: E501
:type: str
"""
self._id = id
@property
def metadata(self):
"""Gets the metadata of this OrderTrack. # noqa: E501
metadata # noqa: E501
:return: The metadata of this OrderTrack. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this OrderTrack.
metadata # noqa: E501
:param metadata: The metadata of this OrderTrack. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def order_id(self):
"""Gets the order_id of this OrderTrack. # noqa: E501
orderId # noqa: E501
:return: The order_id of this OrderTrack. # noqa: E501
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this OrderTrack.
orderId # noqa: E501
:param order_id: The order_id of this OrderTrack. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`") # noqa: E501
self._order_id = order_id
@property
def order_status_id(self):
"""Gets the order_status_id of this OrderTrack. # noqa: E501
orderStatusId # noqa: E501
:return: The order_status_id of this OrderTrack. # noqa: E501
:rtype: str
"""
return self._order_status_id
@order_status_id.setter
def order_status_id(self, order_status_id):
"""Sets the order_status_id of this OrderTrack.
orderStatusId # noqa: E501
:param order_status_id: The order_status_id of this OrderTrack. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and order_status_id is None:
raise ValueError("Invalid value for `order_status_id`, must not be `None`") # noqa: E501
self._order_status_id = order_status_id
@property
def price(self):
"""Gets the price of this OrderTrack. # noqa: E501
price # noqa: E501
:return: The price of this OrderTrack. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this OrderTrack.
price # noqa: E501
:param price: The price of this OrderTrack. # noqa: E501
:type: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this OrderTrack. # noqa: E501
quantity # noqa: E501
:return: The quantity of this OrderTrack. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this OrderTrack.
quantity # noqa: E501
:param quantity: The quantity of this OrderTrack. # noqa: E501
:type: float
"""
self._quantity = quantity
@property
def secondary_id(self):
"""Gets the secondary_id of this OrderTrack. # noqa: E501
:return: The secondary_id of this OrderTrack. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this OrderTrack.
:param secondary_id: The secondary_id of this OrderTrack. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def update_date(self):
"""Gets the update_date of this OrderTrack. # noqa: E501
:return: The update_date of this OrderTrack. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this OrderTrack.
:param update_date: The update_date of this OrderTrack. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrderTrack, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderTrack):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OrderTrack):
return True
return self.to_dict() != other.to_dict()
|
from django.core.exceptions import PermissionDenied
from django.test import TestCase
from django_marina.db import DisableMigrations
from .models import ProtectedModel
class ProtectedModelMixinTestCase(TestCase):
def test_update_protected(self):
protected = ProtectedModel(name="protected", is_update_protected=True)
protected.save()
with self.assertRaises(PermissionDenied):
protected.save()
def test_update_unprotected(self):
unprotected = ProtectedModel(name="protected", is_update_protected=False)
unprotected.save()
unprotected.name = "changed"
unprotected.save()
self.assertEqual(unprotected.name, "changed")
def test_get_protected_against_update_message(self):
protected = ProtectedModel(name="protected", is_update_protected=True)
protected.save()
message = protected.get_update_protection_message()
self.assertEqual(message, "This object has update protection.")
def test_delete_protected(self):
protected = ProtectedModel(name="protected", is_delete_protected=True)
protected.save()
with self.assertRaises(PermissionDenied):
protected.delete()
def test_delete_unprotected(self):
unprotected = ProtectedModel(name="protected", is_delete_protected=False)
unprotected.save()
unprotected.delete()
self.assertIsNone(ProtectedModel.objects.filter(pk=unprotected.pk).first())
def test_get_protected_against_delete_message(self):
protected = ProtectedModel(name="protected", is_delete_protected=True)
protected.save()
message = protected.get_delete_protection_message()
self.assertEqual(message, "This object has delete protection.")
class DisableMigrationsTestCase(TestCase):
def test_disable_migrations(self):
instance = DisableMigrations()
self.assertTrue("anything" in instance)
self.assertIsNone(instance["anything"])
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from io import BytesIO
import six
from ._quick_query_helper import DataLakeFileQueryReader
from ._shared.base_client import parse_connection_str
from ._shared.request_handlers import get_length, read_length
from ._shared.response_handlers import return_response_headers
from ._shared.uploads import IterStreamer
from ._upload_helper import upload_datalake_file
from ._generated.models import StorageErrorException
from ._download import StorageStreamDownloader
from ._path_client import PathClient
from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers
from ._deserialize import process_storage_error
from ._models import FileProperties, DataLakeFileQueryError
class DataLakeFileClient(PathClient):
"""A client to interact with the DataLake file, even if the file may not yet exist.
:ivar str url:
The full endpoint URL to the file system, including SAS token if used.
:ivar str primary_endpoint:
The full primary endpoint URL.
:ivar str primary_hostname:
The hostname of the primary endpoint.
:param str account_url:
The URI to the storage account.
:param file_system_name:
The file system for the directory or files.
:type file_system_name: str
:param file_path:
The whole file path, so that to interact with a specific file.
eg. "{directory}/{subdirectory}/{file}"
:type file_path: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string, and account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the URL already has a SAS token, specifying an explicit credential will take priority.
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_instantiate_client.py
:start-after: [START instantiate_file_client_from_conn_str]
:end-before: [END instantiate_file_client_from_conn_str]
:language: python
:dedent: 4
:caption: Creating the DataLakeServiceClient from connection string.
"""
def __init__(
self, account_url, # type: str
file_system_name, # type: str
file_path, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
credential=credential, **kwargs)
@classmethod
def from_connection_string(
cls, conn_str, # type: str
file_system_name, # type: str
file_path, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
): # type: (...) -> DataLakeFileClient
"""
Create DataLakeFileClient from a Connection String.
:param str conn_str:
A connection string to an Azure Storage account.
:param file_system_name: The name of file system to interact with.
:type file_system_name: str
:param directory_name: The name of directory to interact with. The directory is under file system.
:type directory_name: str
:param file_name: The name of file to interact with. The file is under directory.
:type file_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token, or the connection string already has shared
access key values. The value can be a SAS token string, and account shared access
key, or an instance of a TokenCredentials class from azure.identity.
Credentials provided here will take precedence over those in the connection string.
:return a DataLakeFileClient
:rtype ~azure.storage.filedatalake.DataLakeFileClient
"""
account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
return cls(
account_url, file_system_name=file_system_name, file_path=file_path,
credential=credential, **kwargs)
def create_file(self, content_settings=None, # type: Optional[ContentSettings]
metadata=None, # type: Optional[Dict[str, str]]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime]]
"""
Create a new file.
:param ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword str umask:
Optional and only valid if Hierarchical Namespace is enabled for the account.
When creating a file or directory and the parent folder does not have a default ACL,
the umask restricts the permissions of the file or directory to be created.
The resulting permission is given by p & ^u, where p is the permission and u is the umask.
For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
The umask must be specified in 4-digit octal notation (e.g. 0766).
:keyword str permissions:
Optional and only valid if Hierarchical Namespace
is enabled for the account. Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: response dict (Etag and last modified).
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START create_file]
:end-before: [END create_file]
:language: python
:dedent: 4
:caption: Create file.
"""
return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
def delete_file(self, **kwargs):
# type: (...) -> None
"""
Marks the specified file for deletion.
:keyword lease:
Required if the file has an active lease. Value can be a LeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: None
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START delete_file]
:end-before: [END delete_file]
:language: python
:dedent: 4
:caption: Delete file.
"""
return self._delete(**kwargs)
def get_file_properties(self, **kwargs):
# type: (**Any) -> FileProperties
"""Returns all user-defined metadata, standard HTTP properties, and
system properties for the file. It does not return the content of the file.
:keyword lease:
Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: FileProperties
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START get_file_properties]
:end-before: [END get_file_properties]
:language: python
:dedent: 4
:caption: Getting the properties for a file.
"""
blob_properties = self._get_path_properties(**kwargs)
return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access
def _upload_options( # pylint:disable=too-many-statements
self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Dict[str, Any]
encoding = kwargs.pop('encoding', 'UTF-8')
if isinstance(data, six.text_type):
data = data.encode(encoding) # type: ignore
if length is None:
length = get_length(data)
if isinstance(data, bytes):
data = data[:length]
if isinstance(data, bytes):
stream = BytesIO(data)
elif hasattr(data, 'read'):
stream = data
elif hasattr(data, '__iter__'):
stream = IterStreamer(data, encoding=encoding)
else:
raise TypeError("Unsupported data type: {}".format(type(data)))
validate_content = kwargs.pop('validate_content', False)
content_settings = kwargs.pop('content_settings', None)
metadata = kwargs.pop('metadata', None)
max_concurrency = kwargs.pop('max_concurrency', 1)
kwargs['properties'] = add_metadata_headers(metadata)
kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
if content_settings:
kwargs['path_http_headers'] = get_path_http_headers(content_settings)
kwargs['stream'] = stream
kwargs['length'] = length
kwargs['validate_content'] = validate_content
kwargs['max_concurrency'] = max_concurrency
kwargs['client'] = self._client.path
return kwargs
def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
overwrite=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Any]
"""
Upload data to a file.
:param data: Content to be uploaded to file
:param int length: Size of the data in bytes.
:param bool overwrite: to overwrite an existing file or not.
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword metadata:
Name-value pairs associated with the blob as metadata.
:paramtype metadata: dict(str, str)
:keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
When creating a file or directory and the parent folder does not have a default ACL,
the umask restricts the permissions of the file or directory to be created.
The resulting permission is given by p & ^u, where p is the permission and u is the umask.
For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
The umask must be specified in 4-digit octal notation (e.g. 0766).
:keyword str permissions: Optional and only valid if Hierarchical Namespace
is enabled for the account. Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword int chunk_size:
The maximum chunk size for uploading a file in chunks.
Defaults to 100*1024*1024, or 100MB.
:return: response dict (Etag and last modified).
"""
options = self._upload_options(
data,
length=length,
overwrite=overwrite,
**kwargs)
return upload_datalake_file(**options)
@staticmethod
def _append_data_options(data, offset, length=None, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
if isinstance(data, six.text_type):
data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
if length is None:
length = get_length(data)
if length is None:
length, data = read_length(data)
if isinstance(data, bytes):
data = data[:length]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
options = {
'body': data,
'position': offset,
'content_length': length,
'lease_access_conditions': access_conditions,
'validate_content': kwargs.pop('validate_content', False),
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
offset, # type: int
length=None, # type: Optional[int]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""Append data to the file.
:param data: Content to be appended to file
:param offset: start position of the data to be appended to.
:param length: Size of the data in bytes.
:keyword bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:return: dict of the response header
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START append_data]
:end-before: [END append_data]
:language: python
:dedent: 4
:caption: Append data to the file.
"""
options = self._append_data_options(
data,
offset,
length=length,
**kwargs)
try:
return self._client.path.append_data(**options)
except StorageErrorException as error:
process_storage_error(error)
@staticmethod
def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_mod_conditions(kwargs)
path_http_headers = None
if content_settings:
path_http_headers = get_path_http_headers(content_settings)
options = {
'position': offset,
'content_length': 0,
'path_http_headers': path_http_headers,
'retain_uncommitted_data': retain_uncommitted_data,
'close': kwargs.pop('close', False),
'lease_access_conditions': access_conditions,
'modified_access_conditions': mod_conditions,
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def flush_data(self, offset, # type: int
retain_uncommitted_data=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime]]
""" Commit the previous appended data.
:param offset: offset is equal to the length of the file after commit the
previous appended data.
:param bool retain_uncommitted_data: Valid only for flush operations. If
"true", uncommitted data is retained after the flush operation
completes; otherwise, the uncommitted data is deleted after the flush
operation. The default is false. Data at offsets less than the
specified position are written to the file when flush succeeds, but
this optional parameter allows data after the flush position to be
retained for a future flush operation.
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword bool close: Azure Storage Events allow applications to receive
notifications when files change. When Azure Storage Events are
enabled, a file changed event is raised. This event has a property
indicating whether this is the final change to distinguish the
difference between an intermediate flush to a file stream and the
final close of a file stream. The close query parameter is valid only
when the action is "flush" and change notifications are enabled. If
the value of close is "true" and the flush operation completes
successfully, the service raises a file change notification with a
property indicating that this is the final update (the file stream has
been closed). If "false" a change notification is raised indicating
the file has changed. The default is false. This query parameter is
set to true by the Hadoop ABFS driver to indicate that the file stream
has been closed."
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:return: response header in dict
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_file_system.py
:start-after: [START upload_file_to_file_system]
:end-before: [END upload_file_to_file_system]
:language: python
:dedent: 8
:caption: Commit the previous appended data.
"""
options = self._flush_data_options(
offset,
retain_uncommitted_data=retain_uncommitted_data, **kwargs)
try:
return self._client.path.flush_data(**options)
except StorageErrorException as error:
process_storage_error(error)
def download_file(self, offset=None, length=None, **kwargs):
# type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
"""Downloads a file to the StorageStreamDownloader. The readall() method must
be used to read all the content, or readinto() must be used to download the file into
a stream.
:param int offset:
Start of byte range to use for downloading a section of the file.
Must be set if length is provided.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:keyword lease:
If specified, download only succeeds if the file's lease is active
and matches this ID. Required if the file has an active lease.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:returns: A streaming object (StorageStreamDownloader)
:rtype: ~azure.storage.filedatalake.StorageStreamDownloader
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START read_file]
:end-before: [END read_file]
:language: python
:dedent: 4
:caption: Return the downloaded data.
"""
downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
return StorageStreamDownloader(downloader)
def rename_file(self, new_name, # type: str
**kwargs):
# type: (**Any) -> DataLakeFileClient
"""
Rename the source file.
:param str new_name: the new file name the user want to rename to.
The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword source_lease: A lease ID for the source path. If specified,
the source path must have an active lease and the leaase ID must
match.
:paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword lease:
Required if the file/directory has an active lease. Value can be a LeaseClient object
or the lease ID as a string.
:type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: the renamed file client
:rtype: DataLakeFileClient
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START rename_file]
:end-before: [END rename_file]
:language: python
:dedent: 4
:caption: Rename the source file.
"""
new_name = new_name.strip('/')
new_file_system = new_name.split('/')[0]
path = new_name[len(new_file_system):]
new_directory_client = DataLakeFileClient(
self.url, new_file_system, file_path=path, credential=self._raw_credential,
_hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
_location_mode=self._location_mode, require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function)
new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access
**kwargs)
return new_directory_client
def query_file(self, query_expression, **kwargs):
# type: (str, **Any) -> DataLakeFileQueryReader
"""Enables users to select/project on datalake file data by providing simple query expressions.
This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data.
:param str query_expression:
Required. a query statement.
:keyword Callable[Exception] on_error:
A function to be called on any processing errors returned by the service.
:keyword file_format:
Optional. Defines the serialization of the data currently stored in the file. The default is to
treat the file data as CSV data formatted in the default dialect. This can be overridden with
a custom DelimitedTextDialect, or alternatively a DelimitedJSON.
:paramtype file_format:
~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJSON
:keyword output_format:
Optional. Defines the output serialization for the data stream. By default the data will be returned
as it is represented in the file. By providing an output format, the file data will be reformatted
according to that profile. This value can be a DelimitedTextDialect or a DelimitedJSON.
:paramtype output_format:
~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJSON
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A streaming object (DataLakeFileQueryReader)
:rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_query.py
:start-after: [START query]
:end-before: [END query]
:language: python
:dedent: 4
:caption: select/project on blob/or blob snapshot data by providing simple query expressions.
"""
blob_quick_query_reader = self._blob_client.query_blob(query_expression,
blob_format=kwargs.pop('file_format', None),
error_cls=DataLakeFileQueryError,
**kwargs)
return DataLakeFileQueryReader(blob_quick_query_reader)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import os
import re
import numpy
from .utils import logging
logger = logging.get_logger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
"""Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch state_dict in a TF 2.0 model."""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
missing_keys = []
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
missing_keys.append(name)
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
unexpected_keys = list(all_pytorch_weights)
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the PyTorch model were not used when "
f"initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task "
f"or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect "
f"to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.\n"
f"If your task is similar to the task the model of the ckeckpoint was trained on, "
f"you can already use {tf_model.__class__.__name__} for predictions without further training."
)
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
"""Load TF 2.0 model in a pytorch model"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
"""Load TF2.0 symbolic weights in a PyTorch model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the TF 2.0 model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect "
f"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model)."
)
else:
logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n"
f"If your task is similar to the task the model of the ckeckpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
|
# -----------------------------------
# import
# -----------------------------------
import os
import codecs
import re
from typing import Iterator, Any, List, Optional
# -----------------------------------
# define
# -----------------------------------
CUR_PATH = os.path.join(os.path.dirname(__file__))
# -----------------------------------
# function
# -----------------------------------
def list_to_file(fpath, list: Iterator[Any]) -> None:
with codecs.open(fpath, 'w', 'utf-8') as f:
for line in list:
f.write(line + '\n')
def list_from_file(fpath: str) -> List[str]:
with codecs.open(fpath, 'r', 'utf-8') as f:
lines = f.read().split()
return lines
def key_sort_by_num(x: str) -> List[int]:
re_list = re.findall(r"[0-9]+", x)
re_list = list(map(int, re_list))
return re_list
def list_from_dir(dir, target_ext: Optional[str] = None) -> List[str]:
ret_list = []
fnames = os.listdir(dir)
fnames = sorted(fnames, key=key_sort_by_num)
for fname in fnames:
if target_ext is None:
path = os.path.join(dir, fname)
ret_list.append(path)
else:
_, ext = os.path.splitext(fname)
if ext.lower() in target_ext:
path = os.path.join(dir, fname)
ret_list.append(path)
return ret_list
# -----------------------------------
# main
# -----------------------------------
if __name__ == '__main__':
pass
|
from functools import reduce
def maybe(func):
def inner(*args):
for arg in args:
if isinstance(arg, Exception):
return arg
try:
return func(*args)
except Exception as e:
return e
return inner
def repeat(func, until):
def inner(*args):
result = func(*args)
if until(result):
return result
return inner(*args)
return inner
pipe = lambda *funcs: lambda arg: reduce(lambda acc, func: func(acc), funcs,arg)
|
import pathlib
import os
import dotenv
from aiogram import Bot, Dispatcher
from aiogram.contrib.fsm_storage.files import PickleStorage
from config.logger import logger_init
from .handlers import start_handler
# Load dotenv
dotenv.load_dotenv()
# Configure logging.
# 4-levels for logging: INFO, DEBUG, WARNING, ERROR
logger_init("INFO")
async def main():
"""Main function"""
# Initialize bot and dispatcher
bot = Bot(token=os.getenv("API_TOKEN"))
try:
storage = PickleStorage(pathlib.Path("db"))
dp = Dispatcher(bot, storage=storage)
# start_handler register
dp.register_message_handler(
start_handler, commands={"start"}, state="*")
# text_handler register
# dp.register_message_handler(
# text_handler, content_types="text")
await dp.start_polling()
finally:
await bot.close()
|
# -*- coding: utf-8 -*-
import numpy as np
import logging
from scipy.stats import normaltest
class Diagnostic(object):
def __init__(self, parent):
self.parent = parent
self._logger = logging.getLogger("chainconsumer")
def gelman_rubin(self, chain=None, threshold=0.05):
r""" Runs the Gelman Rubin diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
threshold : float, optional
The maximum deviation permitted from 1 for the final value
:math:`\hat{R}`
Returns
-------
float
whether or not the chains pass the test
Notes
-----
I follow PyMC in calculating the Gelman-Rubin statistic, where,
having :math:`m` chains of length :math:`n`, we compute
.. math::
B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2
W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right]
where :math:`\theta` represents each model parameter. We then compute
:math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio
:math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters,
this ratio deviates from unity by less than the supplied threshold.
"""
if chain is None:
return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
parameters = chain.parameters
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker"
m = 1.0 * len(chains)
n = 1.0 * chains[0].shape[0]
all_mean = np.mean(data, axis=0)
chain_means = np.array([np.mean(c, axis=0) for c in chains])
chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains])
b = n / (m - 1) * ((chain_means - all_mean) ** 2).sum(axis=0)
w = (1 / m) * chain_var.sum(axis=0)
var = (n - 1) * w / n + b / n
v = var + b / (n * m)
R = np.sqrt(v / w)
passed = np.abs(R - 1) < threshold
print("Gelman-Rubin Statistic values for chain %s" % name)
for p, v, pas in zip(parameters, R, passed):
param = "Param %d" % p if isinstance(p, int) else p
print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed"))
return np.all(passed)
def geweke(self, chain=None, first=0.1, last=0.5, threshold=0.05):
""" Runs the Geweke diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
first : float, optional
The amount of the start of the chain to use
last : float, optional
The end amount of the chain to use
threshold : float, optional
The p-value to use when testing for normality.
Returns
-------
float
whether or not the chains pass the test
"""
if chain is None:
return np.all([self.geweke(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
assert num_walkers is not None and num_walkers > 0, "You need to specify the number of walkers to use the Geweke diagnostic."
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
n = 1.0 * chains[0].shape[0]
n_start = int(np.floor(first * n))
n_end = int(np.floor((1 - last) * n))
mean_start = np.array([np.mean(c[:n_start, i]) for c in chains for i in range(c.shape[1])])
var_start = np.array([self._spec(c[:n_start, i]) / c[:n_start, i].size for c in chains for i in range(c.shape[1])])
mean_end = np.array([np.mean(c[n_end:, i]) for c in chains for i in range(c.shape[1])])
var_end = np.array([self._spec(c[n_end:, i]) / c[n_end:, i].size for c in chains for i in range(c.shape[1])])
zs = (mean_start - mean_end) / (np.sqrt(var_start + var_end))
_, pvalue = normaltest(zs)
print("Gweke Statistic for chain %s has p-value %e" % (name, pvalue))
return pvalue > threshold
# Method of estimating spectral density following PyMC.
# See https://github.com/pymc-devs/pymc/blob/master/pymc/diagnostics.py
def _spec(self, x, order=2):
from statsmodels.regression.linear_model import yule_walker
beta, sigma = yule_walker(x, order)
return sigma ** 2 / (1.0 - np.sum(beta)) ** 2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base TFDecorator class and utility functions for working with decorators.
There are two ways to create decorators that TensorFlow can introspect into.
This is important for documentation generation purposes, so that function
signatures aren't obscured by the (*args, **kwds) signature that decorators
often provide.
1. Call `tf_decorator.make_decorator` on your wrapper function. If your
decorator is stateless, or can capture all of the variables it needs to work
with through lexical closure, this is the simplest option. Create your wrapper
function as usual, but instead of returning it, return
`tf_decorator.make_decorator(your_wrapper)`. This will attach some decorator
introspection metadata onto your wrapper and return it.
Example:
def print_hello_before_calling(target):
def wrapper(*args, **kwargs):
print('hello')
return target(*args, **kwargs)
return tf_decorator.make_decorator(wrapper)
2. Derive from TFDecorator. If your decorator needs to be stateful, you can
implement it in terms of a TFDecorator. Store whatever state you need in your
derived class, and implement the `__call__` method to do your work before
calling into your target. You can retrieve the target via
`super(MyDecoratorClass, self).decorated_target`, and call it with whatever
parameters it needs.
Example:
class CallCounter(tf_decorator.TFDecorator):
def __init__(self, target):
super(CallCounter, self).__init__('count_calls', target)
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return super(CallCounter, self).decorated_target(*args, **kwargs)
def count_calls(target):
return CallCounter(target)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools as _functools
import traceback as _traceback
def make_decorator(target,
decorator_func,
decorator_name=None,
decorator_doc='',
decorator_argspec=None):
"""Make a decorator from a wrapper and a target.
Args:
target: The final callable to be wrapped.
decorator_func: The wrapper function.
decorator_name: The name of the decorator. If `None`, the name of the
function calling make_decorator.
decorator_doc: Documentation specific to this application of
`decorator_func` to `target`.
decorator_argspec: The new callable signature of this decorator.
Returns:
The `decorator_func` argument with new metadata attached.
"""
if decorator_name is None:
frame = _traceback.extract_stack(limit=2)[0]
# frame name is tuple[2] in python2, and object.name in python3
decorator_name = getattr(frame, 'name', frame[2]) # Caller's name
decorator = TFDecorator(decorator_name, target, decorator_doc,
decorator_argspec)
setattr(decorator_func, '_tf_decorator', decorator)
decorator_func.__name__ = target.__name__
decorator_func.__module__ = target.__module__
decorator_func.__doc__ = decorator.__doc__
decorator_func.__wrapped__ = target
return decorator_func
def unwrap(maybe_tf_decorator):
"""Unwraps an object into a list of TFDecorators and a final target.
Args:
maybe_tf_decorator: Any callable object.
Returns:
A tuple whose first element is an list of TFDecorator-derived objects that
were applied to the final callable target, and whose second element is the
final undecorated callable target. If the `maybe_tf_decorator` parameter is
not decorated by any TFDecorators, the first tuple element will be an empty
list. The `TFDecorator` list is ordered from outermost to innermost
decorators.
"""
decorators = []
cur = maybe_tf_decorator
while True:
if isinstance(cur, TFDecorator):
decorators.append(cur)
elif hasattr(cur, '_tf_decorator'):
decorators.append(getattr(cur, '_tf_decorator'))
else:
break
cur = decorators[-1].decorated_target
return decorators, cur
class TFDecorator(object):
"""Base class for all TensorFlow decorators.
TFDecorator captures and exposes the wrapped target, and provides details
about the current decorator.
"""
def __init__(self,
decorator_name,
target,
decorator_doc='',
decorator_argspec=None):
self._decorated_target = target
self._decorator_name = decorator_name
self._decorator_doc = decorator_doc
self._decorator_argspec = decorator_argspec
self.__name__ = target.__name__
if self._decorator_doc:
self.__doc__ = self._decorator_doc
elif target.__doc__:
self.__doc__ = target.__doc__
else:
self.__doc__ = ''
def __get__(self, obj, objtype):
return _functools.partial(self.__call__, obj)
def __call__(self, *args, **kwargs):
return self._decorated_target(*args, **kwargs)
@property
def decorated_target(self):
return self._decorated_target
@property
def decorator_name(self):
return self._decorator_name
@property
def decorator_doc(self):
return self._decorator_doc
@property
def decorator_argspec(self):
return self._decorator_argspec
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0054_providerlocation_state_province'),
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('school', models.CharField(max_length=100)),
('major', models.CharField(max_length=100)),
('degree_type', models.CharField(null=True, blank=True, max_length=100, default='Not Available', choices=[('undergraduate', 'undergraduate'), ('graduate', 'graduate'), ('bachelors', 'bachelors'), ('masters', 'masters'), ('Not Available', 'Not Available')])),
('start_date', models.DateField(null=True, blank=True)),
('end_date', models.DateField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('company', models.CharField(max_length=200)),
('description', models.TextField(null=True, blank=True)),
('start_date', models.DateField(null=True, blank=True)),
('end_date', models.DateField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('profile_description', models.TextField(null=True, blank=True)),
],
),
migrations.RemoveField(
model_name='picconsumer',
name='healthcare_networks_used',
),
migrations.RemoveField(
model_name='picconsumerbackup',
name='healthcare_networks_used',
),
migrations.AddField(
model_name='navigators',
name='address',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.SET_NULL, null=True, to='picmodels.Address'),
),
migrations.AddField(
model_name='navigators',
name='healthcare_locations_worked',
field=models.ManyToManyField(blank=True, to='picmodels.ProviderLocation', related_name='navigators_working_here'),
),
migrations.AddField(
model_name='navigators',
name='healthcare_service_expertises',
field=models.ManyToManyField(blank=True, to='picmodels.ProviderLocation', related_name='navigators_with_expertise'),
),
migrations.AddField(
model_name='navigators',
name='insurance_carrier_specialties',
field=models.ManyToManyField(blank=True, to='picmodels.HealthcareCarrier'),
),
migrations.AddField(
model_name='navigators',
name='phone',
field=models.CharField(null=True, blank=True, max_length=1000),
),
migrations.AddField(
model_name='navigators',
name='reported_region',
field=models.CharField(null=True, blank=True, max_length=1000),
),
migrations.AddField(
model_name='navigators',
name='video_link',
field=models.TextField(null=True, validators=[django.core.validators.URLValidator()], blank=True),
),
migrations.AddField(
model_name='picconsumer',
name='healthcare_locations_used',
field=models.ManyToManyField(blank=True, to='picmodels.ProviderLocation'),
),
migrations.AddField(
model_name='picconsumerbackup',
name='healthcare_locations_used',
field=models.ManyToManyField(blank=True, to='picmodels.ProviderLocation'),
),
migrations.AddField(
model_name='resume',
name='navigator',
field=models.ForeignKey(to='picmodels.Navigators'),
),
migrations.AddField(
model_name='job',
name='Resume',
field=models.ForeignKey(to='picmodels.Resume'),
),
migrations.AddField(
model_name='education',
name='Resume',
field=models.ForeignKey(to='picmodels.Resume'),
),
]
|
"""
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
from posixpath import basename
from gradio.component import Component
import numpy as np
import json
from gradio import processing_utils
import operator
from numbers import Number
import warnings
import tempfile
import os
import pandas as pd
import PIL
from types import ModuleType
from ffmpy import FFmpeg
import requests
class OutputComponent(Component):
"""
Output Component. All output components subclass this.
"""
def postprocess(self, y):
"""
Any postprocessing needed to be performed on function output.
"""
return y
def deserialize(self, x):
"""
Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)
"""
return x
class Textbox(OutputComponent):
'''
Component creates a textbox to render output text or number.
Output type: Union[str, float, int]
Demos: hello_world, sentence_builder
'''
def __init__(self, type="auto", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "str" expects a string, "number" expects a float value, "auto" detects return type.
label (str): component name in interface.
'''
self.type = type
super().__init__(label)
def get_template_context(self):
return {
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {"type": "str"},
"textbox": {"type": "str"},
"number": {"type": "number"},
}
def postprocess(self, y):
"""
Parameters:
y (str): text output
Returns:
(Union[str, number]): output value
"""
if self.type == "str" or self.type == "auto":
return str(y)
elif self.type == "number":
return y
else:
raise ValueError("Unknown type: " + self.type +
". Please choose from: 'str', 'number'")
class Label(OutputComponent):
'''
Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1.
Output type: Union[Dict[str, float], str, int, float]
Demos: image_classifier, main_note, titanic_survival
'''
CONFIDENCES_KEY = "confidences"
def __init__(self, num_top_classes=None, type="auto", label=None):
'''
Parameters:
num_top_classes (int): number of most confident classes to show.
type (str): Type of value to be passed to component. "value" expects a single out label, "confidences" expects a dictionary mapping labels to confidence scores, "auto" detects return type.
label (str): component name in interface.
'''
self.num_top_classes = num_top_classes
self.type = type
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Dict[str, float]): dictionary mapping label to confidence value
Returns:
(Dict[label: str, confidences: List[Dict[label: str, confidence: number]]]): Object with key 'label' representing primary label, and key 'confidences' representing a list of label-confidence pairs
"""
if self.type == "label" or (self.type == "auto" and (isinstance(y, str) or isinstance(y, Number))):
return {"label": str(y)}
elif self.type == "confidences" or (self.type == "auto" and isinstance(y, dict)):
sorted_pred = sorted(
y.items(),
key=operator.itemgetter(1),
reverse=True
)
if self.num_top_classes is not None:
sorted_pred = sorted_pred[:self.num_top_classes]
return {
"label": sorted_pred[0][0],
"confidences": [
{
"label": pred[0],
"confidence": pred[1]
} for pred in sorted_pred
]
}
else:
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences.")
def deserialize(self, y):
# 5 cases: (1): {'label': 'lion'}, {'label': 'lion', 'confidences':...}, {'lion': 0.46, ...}, 'lion', '0.46'
if self.type == "label" or (self.type == "auto" and (isinstance(y, str) or isinstance(y, int) or isinstance(y, float) or ('label' in y and not('confidences' in y.keys())))):
if isinstance(y, str) or isinstance(y, int) or isinstance(y, float):
return y
else:
return y['label']
elif self.type == "confidences" or self.type == "auto":
if ('confidences' in y.keys()) and isinstance(y['confidences'], list):
return {k['label']:k['confidence'] for k in y['confidences']}
else:
return y
raise ValueError("Unable to deserialize output: {}".format(y))
@classmethod
def get_shortcut_implementations(cls):
return {
"label": {},
}
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (Union[str, Dict[str, number]]): Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels.
"""
if "confidences" in data:
return json.dumps({example["label"]: example["confidence"] for example in data["confidences"]})
else:
return data["label"]
def restore_flagged(self, data):
try:
data = json.loads(data)
return data
except:
return data
class Image(OutputComponent):
'''
Component displays an output image.
Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]]
Demos: image_mod, webcam
'''
def __init__(self, type="auto", plot=False, label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "numpy" expects a numpy array with shape (width, height, 3), "pil" expects a PIL image object, "file" expects a file path to the saved image or a remote URL, "plot" expects a matplotlib.pyplot object, "auto" detects return type.
plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function.
label (str): component name in interface.
'''
if plot:
warnings.warn(
"The 'plot' parameter has been deprecated. Set parameter 'type' to 'plot' instead.", DeprecationWarning)
self.type = "plot"
else:
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"plot": {"type": "plot"},
"pil": {"type": "pil"}
}
def postprocess(self, y):
"""
Parameters:
y (Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]]): image in specified format
Returns:
(str): base64 url data
"""
if self.type == "auto":
if isinstance(y, np.ndarray):
dtype = "numpy"
elif isinstance(y, PIL.Image.Image):
dtype = "pil"
elif isinstance(y, str):
dtype = "file"
elif isinstance(y, ModuleType):
dtype = "plot"
else:
raise ValueError(
"Unknown type. Please choose from: 'numpy', 'pil', 'file', 'plot'.")
else:
dtype = self.type
if dtype in ["numpy", "pil"]:
if dtype == "pil":
y = np.array(y)
out_y = processing_utils.encode_array_to_base64(y)
elif dtype == "file":
out_y = processing_utils.encode_url_or_file_to_base64(y)
elif dtype == "plot":
out_y = processing_utils.encode_plot_to_base64(y)
else:
raise ValueError("Unknown type: " + dtype +
". Please choose from: 'numpy', 'pil', 'file', 'plot'.")
return out_y
def deserialize(self, x):
y = processing_utils.decode_base64_to_file(x).name
return y
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (str) path to image file
"""
return self.save_flagged_file(dir, label, data, encryption_key)
class Video(OutputComponent):
'''
Used for video output.
Output type: filepath
Demos: video_flip
'''
def __init__(self, type=None, label=None):
'''
Parameters:
type (str): Type of video format to be passed to component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep returned format.
label (str): component name in interface.
'''
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"video": {},
"playable_video": {"type": "mp4"}
}
def postprocess(self, y):
"""
Parameters:
y (str): path to video
Returns:
(str): base64 url data
"""
returned_format = y.split(".")[-1].lower()
if self.type is not None and returned_format != self.type:
output_file_name = y[0: y.rindex(
".") + 1] + self.type
ff = FFmpeg(
inputs={y: None},
outputs={output_file_name: None}
)
ff.run()
y = output_file_name
return {
"name": os.path.basename(y),
"data": processing_utils.encode_file_to_base64(y, type="video")
}
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (str) path to image file
"""
return self.save_flagged_file(dir, label, data['data'], encryption_key)
class KeyValues(OutputComponent):
'''
Component displays a table representing values for multiple fields.
Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]
Demos: text_analysis
'''
def __init__(self, label=None):
'''
Parameters:
label (str): component name in interface.
'''
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List[Tuple[str, Union[str, int, float]]]]): dictionary or tuple list representing key value pairs
Returns:
(List[Tuple[str, Union[str, number]]]): list of key value pairs
"""
if isinstance(y, dict):
return list(y.items())
elif isinstance(y, list):
return y
else:
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values.")
@classmethod
def get_shortcut_implementations(cls):
return {
"key_values": {},
}
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, data):
return json.loads(data)
class HighlightedText(OutputComponent):
'''
Component creates text that contains spans that are highlighted by category or numerical value.
Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text.
Output type: List[Tuple[str, Union[float, str]]]
Demos: diff_texts, text_analysis
'''
def __init__(self, color_map=None, label=None):
'''
Parameters:
color_map (Dict[str, str]): Map between category and respective colors
label (str): component name in interface.
'''
self.color_map = color_map
super().__init__(label)
def get_template_context(self):
return {
"color_map": self.color_map,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"highlight": {},
}
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List[Tuple[str, Union[str, int, float]]]]): dictionary or tuple list representing key value pairs
Returns:
(List[Tuple[str, Union[str, number]]]): list of key value pairs
"""
return y
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, data):
return json.loads(data)
class Audio(OutputComponent):
'''
Creates an audio player that plays the output audio.
Output type: Union[Tuple[int, numpy.array], str]
Demos: generate_tone, reverse_audio
'''
def __init__(self, type="auto", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file path to the saved wav audio file, "auto" detects return type.
label (str): component name in interface.
'''
self.type = type
super().__init__(label)
def get_template_context(self):
return {
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"audio": {},
}
def postprocess(self, y):
"""
Parameters:
y (Union[Tuple[int, numpy.array], str]): audio data in requested format
Returns:
(str): base64 url data
"""
if self.type in ["numpy", "file", "auto"]:
if self.type == "numpy" or (self.type == "auto" and isinstance(y, tuple)):
sample_rate, data = y
file = tempfile.NamedTemporaryFile(prefix="sample", suffix=".wav", delete=False)
processing_utils.audio_to_file(sample_rate, data, file.name)
y = file.name
return processing_utils.encode_url_or_file_to_base64(y, type="audio", ext="wav")
else:
raise ValueError("Unknown type: " + self.type +
". Please choose from: 'numpy', 'file'.")
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (str) path to audio file
"""
return self.save_flagged_file(dir, label, data, encryption_key)
class JSON(OutputComponent):
'''
Used for JSON output. Expects a JSON string or a Python object that is JSON serializable.
Output type: Union[str, Any]
Demos: zip_to_json
'''
def __init__(self, label=None):
'''
Parameters:
label (str): component name in interface.
'''
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List, str]): JSON output
Returns:
(Union[Dict, List]): JSON output
"""
if isinstance(y, str):
return json.dumps(y)
else:
return y
@classmethod
def get_shortcut_implementations(cls):
return {
"json": {},
}
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, data):
return json.loads(data)
class HTML(OutputComponent):
'''
Used for HTML output. Expects an HTML valid string.
Output type: str
Demos: text_analysis
'''
def __init__(self, label=None):
'''
Parameters:
label (str): component name in interface.
'''
super().__init__(label)
def postprocess(self, x):
"""
Parameters:
y (str): HTML output
Returns:
(str): HTML output
"""
return x
@classmethod
def get_shortcut_implementations(cls):
return {
"html": {},
}
class File(OutputComponent):
'''
Used for file output.
Output type: Union[file-like, str]
Demos: zip_two_files
'''
def __init__(self, label=None):
'''
Parameters:
label (str): component name in interface.
'''
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
def postprocess(self, y):
"""
Parameters:
y (str): file path
Returns:
(Dict[name: str, size: number, data: str]): JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes
"""
return {
"name": os.path.basename(y),
"size": os.path.getsize(y),
"data": processing_utils.encode_file_to_base64(y, header=False)
}
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (str) path to image file
"""
return self.save_flagged_file(dir, label, data["data"], encryption_key)
class Dataframe(OutputComponent):
"""
Component displays 2D output through a spreadsheet interface.
Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
Demos: filter_records, matrix_transpose, fraud_detector
"""
def __init__(self, headers=None, max_rows=20, max_cols=None, overflow_row_behaviour="paginate", type="auto", label=None):
'''
Parameters:
headers (List[str]): Header names to dataframe. Only applicable if type is "numpy" or "array".
max_rows (int): Maximum number of rows to display at once. Set to None for infinite.
max_cols (int): Maximum number of columns to display at once. Set to None for infinite.
overflow_row_behaviour (str): If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows.
type (str): Type of value to be passed to component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for Python array, "auto" detects return type.
label (str): component name in interface.
'''
self.headers = headers
self.max_rows = max_rows
self.max_cols = max_cols
self.overflow_row_behaviour = overflow_row_behaviour
self.type = type
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
"max_rows": self.max_rows,
"max_cols": self.max_cols,
"overflow_row_behaviour": self.overflow_row_behaviour,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array"},
}
def postprocess(self, y):
"""
Parameters:
y (Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]): dataframe in given format
Returns:
(Dict[headers: List[str], data: List[List[Union[str, number]]]]): JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
"""
if self.type == "auto":
if isinstance(y, pd.core.frame.DataFrame):
dtype = "pandas"
elif isinstance(y, np.ndarray):
dtype = "numpy"
elif isinstance(y, list):
dtype = "array"
else:
dtype = self.type
if dtype == "pandas":
return {"headers": list(y.columns), "data": y.values.tolist()}
elif dtype in ("numpy", "array"):
if dtype == "numpy":
y = y.tolist()
if len(y) == 0 or not isinstance(y[0], list):
y = [y]
return {"data": y}
else:
raise ValueError("Unknown type: " + self.type +
". Please choose from: 'pandas', 'numpy', 'array'.")
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (List[List[Union[str, float]]]) 2D array
"""
return json.dumps(data["data"])
def restore_flagged(self, data):
return json.loads(data)
class Carousel(OutputComponent):
"""
Component displays a set of output components that can be scrolled through.
Output type: List[List[Any]]
Demos: disease_report
"""
def __init__(self, components, label=None):
'''
Parameters:
components (Union[List[OutputComponent], OutputComponent]): Classes of component(s) that will be scrolled through.
label (str): component name in interface.
'''
if not isinstance(components, list):
components = [components]
self.components = [get_output_instance(
component) for component in components]
super().__init__(label)
def get_template_context(self):
return {
"components": [component.get_template_context() for component in self.components],
**super().get_template_context()
}
def postprocess(self, y):
"""
Parameters:
y (List[List[Any]]): carousel output
Returns:
(List[List[Any]]): 2D array, where each sublist represents one set of outputs or 'slide' in the carousel
"""
if isinstance(y, list):
if len(y) != 0 and not isinstance(y[0], list):
y = [[z] for z in y]
output = []
for row in y:
output_row = []
for i, cell in enumerate(row):
output_row.append(self.components[i].postprocess(cell))
output.append(output_row)
return output
else:
raise ValueError(
"Unknown type. Please provide a list for the Carousel.")
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps([
[
component.save_flagged(
dir, f"{label}_{j}", data[i][j], encryption_key)
for j, component in enumerate(self.components)
] for i, sample in enumerate(data)])
def get_output_instance(iface):
if isinstance(iface, str):
shortcut = OutputComponent.get_all_shortcut_implementations()[iface]
return shortcut[0](**shortcut[1])
elif isinstance(iface, dict): # a dict with `name` as the output component type and other keys as parameters
name = iface.pop('name')
for component in OutputComponent.__subclasses__():
if component.__name__.lower() == name:
break
else:
raise ValueError("No such OutputComponent: {}".format(name))
return component(**iface)
elif isinstance(iface, OutputComponent):
return iface
else:
raise ValueError(
"Output interface must be of type `str` or `dict` or"
"`OutputComponent` but is {}".format(iface)
)
class Timeseries(OutputComponent):
"""
Component accepts pandas.DataFrame.
Output type: pandas.DataFrame
Demos: fraud_detector
"""
def __init__(self, x=None, y=None, label=None):
"""
Parameters:
x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series.
y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.
label (str): component name in interface.
"""
self.x = x
if isinstance(y, str):
y = [y]
self.y = y
super().__init__(label)
def get_template_context(self):
return {
"x": self.x,
"y": self.y,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"timeseries": {},
}
def postprocess(self, y):
"""
Parameters:
y (pandas.DataFrame): timeseries data
Returns:
(Dict[headers: List[str], data: List[List[Union[str, number]]]]): JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
"""
return {
"headers": y.columns.values.tolist(),
"data": y.values.tolist()
}
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (List[List[Union[str, float]]]) 2D array
"""
return json.dumps(data)
def restore_flagged(self, data):
return json.loads(data)
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next
import json
import logging
import re
import sys
from collections import OrderedDict
from django.http import Http404
from django.utils.html import escape
from django.views.decorators.http import require_POST
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode, smart_str
from metadata.catalog.base import get_api
from metadata.catalog.navigator_client import CatalogApiException, CatalogEntityDoesNotExistException, CatalogAuthException
from metadata.conf import has_catalog, CATALOG, has_catalog_file_search, NAVIGATOR
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
class MetadataApiException(Exception):
pass
def error_handler(view_fn):
def decorator(*args, **kwargs):
status = 500
response = {
'message': ''
}
try:
if has_catalog(args[0].user):
return view_fn(*args, **kwargs)
else:
raise MetadataApiException('Catalog API is not configured.')
except Http404 as e:
raise e
except CatalogEntityDoesNotExistException as e:
response['message'] = e.message
status = 404
except CatalogAuthException as e:
response['message'] = force_unicode(e.message)
status = 403
except CatalogApiException as e:
try:
response['message'] = json.loads(e.message)
except Exception:
response['message'] = force_unicode(e.message)
except Exception as e:
message = force_unicode(e)
response['message'] = message
LOG.exception(message)
return JsonResponse(response, status=status)
return decorator
@error_handler
def search_entities_interactive(request):
"""
For search autocomplete.
"""
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
query_s = json.loads(request.POST.get('query_s', ''))
prefix = request.POST.get('prefix')
offset = request.POST.get('offset', 0)
limit = int(request.POST.get('limit', 25))
field_facets = json.loads(request.POST.get('field_facets') or '[]')
sources = json.loads(request.POST.get('sources') or '[]')
api = get_api(request=request, interface=interface)
if sources and not has_catalog_file_search(request.user):
sources = ['sql']
response = api.search_entities_interactive(
query_s=query_s,
limit=limit,
offset=offset,
facetFields=field_facets,
facetPrefix=prefix,
facetRanges=None,
firstClassEntitiesOnly=None,
sources=sources
)
if response.get('facets'): # Remove empty facets
for fname, fvalues in list(response['facets'].items()):
# Should be a CATALOG option at some point for hidding table with no access / asking for access.
if interface == 'navigator' and NAVIGATOR.APPLY_SENTRY_PERMISSIONS.get():
fvalues = []
else:
fvalues = sorted([(k, v) for k, v in list(fvalues.items()) if v > 0], key=lambda n: n[1], reverse=True)
response['facets'][fname] = OrderedDict(fvalues)
if ':' in query_s and not response['facets'][fname]:
del response['facets'][fname]
_augment_highlighting(query_s, response.get('results'))
response['status'] = 0
return JsonResponse(response)
@error_handler
def search_entities(request):
"""
For displaying results.
"""
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
query_s = json.loads(request.POST.get('query_s', ''))
query_s = smart_str(query_s)
offset = request.POST.get('offset', 0)
limit = int(request.POST.get('limit', 100))
raw_query = request.POST.get('raw_query', False)
sources = json.loads(request.POST.get('sources') or '[]')
if sources and not has_catalog_file_search(request.user):
sources = ['sql']
query_s = query_s.strip() or '*'
api = get_api(request=request, interface=interface)
entities = api.search_entities(query_s, limit=limit, offset=offset, raw_query=raw_query, sources=sources)
if not raw_query:
_augment_highlighting(query_s, entities)
response = {
'entities': entities,
'count': len(entities),
'offset': offset,
'limit': limit,
'query_s': query_s,
'status': 0
}
return JsonResponse(response)
def _augment_highlighting(query_s, records):
fs = {}
ts = []
for term in query_s.split():
if ':' in term:
fname, fval = term.split(':', 1)
if fval and fval.strip('*'):
fs[fname] = fval.strip('*')
else:
if term.strip('*'):
ts.append(term.strip('*'))
for record in records:
name = record.get('originalName', '') or ''
record['hue_description'] = ''
record['hue_name'] = record.get('parentPath', '') if record.get('parentPath') else ''
if record.get('parentPath') is None:
record['parentPath'] = ''
if record['hue_name'] and record.get('sourceType', '') != 'S3':
record['hue_name'] = (record['hue_name'].replace('/', '.') + '.').lstrip('.')
record['originalName'] = record['hue_name'] + name # Inserted when selected in autocomplete, full path
record['selectionName'] = name # Use when hovering / selecting a search result
for term in ts:
name = _highlight(term, name)
if record.get('tags'):
_highlight_tags(record, term)
for fname, fval in fs.items(): # e.g. owner:<em>hu</em>e
if record.get(fname, ''):
if fname == 'tags':
_highlight_tags(record, fval)
else:
record['hue_description'] += ' %s:%s' % (fname, _highlight(fval, record[fname]))
originalDescription = record.get('originalDescription', '')
if not record['hue_description'] and originalDescription:
record['hue_description'] = _highlight(term, originalDescription)
record['hue_name'] += name
record['hue_name'] = escape(record['hue_name']).replace('<em>', '<em>').replace('</em>', '</em>')
record['hue_description'] = escape(record['hue_description']).replace('<em>', '<em>').replace('</em>', '</em>')
def _highlight(pattern, string):
pattern = re.escape(pattern)
return re.compile('(%s)' % pattern, re.IGNORECASE).sub('<em>\\1</em>', string, count=1)
def _highlight_tags(record, term):
for tag in record['tags']:
if re.match(term, tag):
record['hue_description'] += ' tags:%s' % _highlight(term, tag)
@error_handler
def list_tags(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
prefix = request.POST.get('prefix')
offset = request.POST.get('offset', 0)
limit = request.POST.get('limit', 25)
api = get_api(request=request, interface=interface)
data = api.search_entities_interactive(facetFields=['tags'], facetPrefix=prefix, limit=limit, offset=offset)
response = {
'tags': data['facets']['tags'],
'status': 0
}
return JsonResponse(response)
@error_handler
def find_entity(request):
response = {'status': -1}
interface = request.GET.get('interface', CATALOG.INTERFACE.get())
entity_type = request.GET.get('type', '')
database = request.GET.get('database', '')
table = request.GET.get('table', '')
name = request.GET.get('name', '')
path = request.GET.get('path', '')
api = get_api(request=request, interface=interface)
if not entity_type:
raise MetadataApiException("find_entity requires a type value, e.g. - 'database', 'table', 'file'")
if entity_type.lower() == 'database':
if not name:
raise MetadataApiException('get_database requires name param')
response['entity'] = api.get_database(name)
elif entity_type.lower() == 'table' or entity_type.lower() == 'view':
if not database or not name:
raise MetadataApiException('get_table requires database and name param')
is_view = entity_type.lower() == 'view'
response['entity'] = api.get_table(database, name, is_view=is_view)
elif entity_type.lower() == 'field':
if not database or not table or not name:
raise MetadataApiException('get_field requires database, table, and name params')
response['entity'] = api.get_field(database, table, name)
elif entity_type.lower() == 'directory':
if not path:
raise MetadataApiException('get_directory requires path param')
response['entity'] = api.get_directory(path)
elif entity_type.lower() == 'file':
if not path:
raise MetadataApiException('get_file requires path param')
response['entity'] = api.get_file(path)
else:
raise MetadataApiException("type %s is unrecognized" % entity_type)
# Prevent nulls later
if 'tags' in response['entity'] and not response['entity']['tags']:
response['entity']['tags'] = []
response['status'] = 0
return JsonResponse(response)
@error_handler
def suggest(request):
response = {'status': -1}
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
prefix = request.POST.get('prefix')
api = get_api(request=request, interface=interface)
suggest = api.suggest(prefix)
response['suggest'] = suggest
response['status'] = 0
return JsonResponse(response)
@error_handler
def get_entity(request):
response = {'status': -1}
interface = request.GET.get('interface', CATALOG.INTERFACE.get())
entity_id = request.GET.get('id')
api = get_api(request=request, interface=interface)
if not entity_id:
raise MetadataApiException("get_entity requires an 'id' parameter")
entity = api.get_entity(entity_id)
response['entity'] = entity
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def add_tags(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
entity_id = json.loads(request.POST.get('id', '""'))
tags = json.loads(request.POST.get('tags', "[]"))
api = get_api(request=request, interface=interface)
is_allowed = request.user.has_hue_permission(action='write', app='metadata')
request.audit = {
'allowed': is_allowed,
'operation': '%s_ADD_TAG' % interface.upper(),
'operationText': 'Adding tags %s to entity %s' % (tags, entity_id)
}
if not is_allowed:
raise Exception("The user does not have proper Hue permissions to add %s tags." % interface.title())
if not entity_id:
raise Exception("Missing required parameter 'id' in add_tags API.")
if not tags:
raise Exception("Missing required parameter 'tags' in add_tags API.")
return JsonResponse(api.add_tags(entity_id, tags))
@require_POST
@error_handler
def delete_tags(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
entity_id = json.loads(request.POST.get('id', '""'))
tags = json.loads(request.POST.get('tags', '[]'))
api = get_api(request=request, interface=interface)
is_allowed = request.user.has_hue_permission(action='write', app='metadata')
request.audit = {
'allowed': is_allowed,
'operation': '%s_DELETE_TAG' % interface.upper(),
'operationText': 'Removing tags %s to entity %s' % (tags, entity_id)
}
if not is_allowed:
raise Exception("The user does not have proper Hue permissions to delete %s tags." % interface.title())
if not entity_id:
raise Exception("Missing required parameter 'id' in delete_tags API.")
if not tags:
raise Exception("Missing required parameter 'tags' in delete_tags API.")
return JsonResponse(api.delete_tags(entity_id, tags))
@require_POST
@error_handler
def update_properties(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
entity_id = json.loads(request.POST.get('id', '""'))
properties = json.loads(request.POST.get('properties', '{}')) # Entity properties
modified_custom_metadata = json.loads(request.POST.get('modifiedCustomMetadata', '{}')) # Aka "Custom Metadata"
deleted_custom_metadata_keys = json.loads(request.POST.get('deletedCustomMetadataKeys', '[]'))
api = get_api(request=request, interface=interface)
is_allowed = request.user.has_hue_permission(action='write', app='metadata')
request.audit = {
'allowed': is_allowed,
'operation': '%s_UPDATE_PROPERTIES' % interface.upper(),
'operationText': 'Updating custom metadata %s, deleted custom metadata keys %s and properties %s of entity %s' % (modified_custom_metadata, deleted_custom_metadata_keys, properties, entity_id)
}
if not entity_id:
# TODO: raise HueApiException(message="Missing required parameter 'id' for update_properties", source="Hue")
# source so the user knows which service that failed right away, in UI: "[source] responded with error: [message]"
raise Exception("Missing required parameter 'id' in update_properties API.")
if not is_allowed:
# TODO: HueAuthException?
raise Exception("The user does not have proper Hue permissions to update %s properties." % interface.title())
return JsonResponse(api.update_properties(entity_id, properties, modified_custom_metadata, deleted_custom_metadata_keys))
@require_POST
@error_handler
def delete_metadata_properties(request):
response = {'status': -1}
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
entity_id = json.loads(request.POST.get('id', '""'))
keys = json.loads(request.POST.get('keys', '[]'))
api = get_api(request=request, interface=interface)
is_allowed = request.user.has_hue_permission(action='write', app='metadata')
request.audit = {
'allowed': is_allowed,
'operation': '%s_DELETE_METADATA_PROPERTIES' % interface.upper(),
'operationText': 'Deleting metadata %s of entity %s' % (keys, entity_id)
}
if not entity_id or not keys or not isinstance(keys, list):
response['error'] = _("update_properties requires an 'id' parameter and 'keys' parameter that is a non-empty list")
else:
response['entity'] = api.delete_metadata_properties(entity_id, keys)
response['status'] = 0
return JsonResponse(response)
@error_handler
def get_lineage(request):
response = {'status': -1, 'inputs': [], 'source_query': '', 'target_queries': [], 'targets': []}
interface = request.GET.get('interface', CATALOG.INTERFACE.get())
entity_id = request.GET.get('id')
api = get_api(request=request, interface=interface)
if not entity_id:
raise MetadataApiException("get_lineage requires an 'id' parameter")
lineage = api.get_lineage(entity_id)
entity_name = api.get_entity(entity_id)['originalName'].upper()
response['id'] = entity_id
# TODO: This is a cheat way to do to this for demo using filtering but we should really traverse relationships
parent_operation = next((entity for entity in lineage['entities'] if entity.get('outputs', []) == [entity_name]), None)
if parent_operation:
response['inputs'] = [input.lower() for input in parent_operation['inputs']]
response['source_query'] = parent_operation.get('queryText', '')
children = [entity for entity in lineage['entities'] if entity.get('inputs') is not None and entity_name in entity.get('inputs')]
if children is not None:
response['target_queries'] = [child['queryText'] for child in children if child.get('queryText') is not None]
outputs = [child['outputs'] for child in children if child.get('outputs') is not None]
response['targets'] = [target.lower() for output in outputs for target in output]
response['status'] = 0
return JsonResponse(response)
@error_handler
def create_namespace(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
namespace = request.POST.get('namespace')
description = request.POST.get('description')
api = get_api(request=request, interface=interface)
request.audit = {
'allowed': request.user.has_hue_permission(action='write', app='metadata'),
'operation': '%s_CREATE_NAMESPACE' % interface.upper(),
'operationText': 'Creating namespace %s' % namespace
}
namespace = api.create_namespace(namespace=namespace, description=description)
return JsonResponse(namespace)
@error_handler
def get_namespace(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
namespace = request.POST.get('namespace')
api = get_api(request=request, interface=interface)
namespace = api.get_namespace(namespace)
return JsonResponse(namespace)
@error_handler
def create_namespace_property(request):
"""
{
"name" : "relatedEntities",
"displayName" : "Related objects",
"creator" : "admin",
"description" : "My desc",,
"multiValued" : true,
"maxLength" : 50,
"pattern" : ".*",
"enumValues" : null,
"type" : "TEXT",
"createdDate" : "2018-04-02T22:36:19.001Z"
}
"""
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
namespace = request.POST.get('namespace')
properties = json.loads(request.POST.get('properties', '{}'))
api = get_api(request=request, interface=interface)
namespace = api.create_namespace_property(namespace, properties)
return JsonResponse(namespace)
@error_handler
def map_namespace_property(request):
"""
{
namespace: "huecatalog",
name: "relatedEntities"
}
"""
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
clazz = request.POST.get('class')
properties = json.loads(request.POST.get('properties', '[]'))
api = get_api(request=request, interface=interface)
namespace = api.map_namespace_property(clazz=clazz, properties=properties)
return JsonResponse(namespace)
@error_handler
def get_model_properties_mapping(request):
interface = request.POST.get('interface', CATALOG.INTERFACE.get())
api = get_api(request=request, interface=interface)
namespace = api.get_model_properties_mapping()
return JsonResponse(namespace)
|
__author__ = 'andrewfowler'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
################################################################################
# Copyright (c) 2006-2017 Franz Inc.
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the MIT License which accompanies
# this distribution, and is available at http://opensource.org/licenses/MIT
################################################################################
from __future__ import absolute_import
from __future__ import unicode_literals
from future.builtins import next, object
from past.builtins import unicode
from ..model import Statement, Value
try:
import franz.openrdf.query.pandas_support as pandas
has_pandas = True
except ImportError:
has_pandas = False
class RepositoryResult(object):
"""An iterable collection of statements.
A RepositoryResult is a result collection of objects (for example
:class:`org.openrdf.model.Statement`,
:class:`org.openrdf.model.Namespace`, or
:class:`org.openrdf.model.Resource` objects) that can be iterated
over. It keeps an open connection to the backend for lazy
retrieval of individual results. Additionally it has some utility
methods to fetch all results and add them to a collection.
By default, a RepositoryResult is not necessarily a (mathematical)
set: it may contain duplicate objects. Duplicate filtering can be
enabled using :meth:`enableDuplicateFilter`, but this should not
be used lightly as the filtering mechanism is potentially
memory-intensive.
A RepositoryResult needs to be closed using :meth:`close` after use
to free up any resources (open connections, read locks, etc.) it
has on the underlying repository.
"""
def __init__(self, string_tuples, subjectFilter=None, tripleIDs=False):
self.string_tuples = string_tuples
self.cursor = 0
self.nonDuplicateSet = None
#self.limit = limit
self.subjectFilter = subjectFilter
self.triple_ids = tripleIDs
def _createStatement(self, string_tuple):
"""
Allocate a Statement and fill it in from 'string_tuple'.
"""
return Statement(*string_tuple)
def __iter__(self): return self
def close(self):
"""
Shut down the iterator to be sure the resources are freed up.
It is safe to call this method multiple times.
"""
pass
def __next__(self):
"""
Return the next Statement in the answer, if there is one.
Otherwise raise StopIteration exception.
:return: The next statement.
:raises StopIteration: If there are no more statements.
"""
if self.nonDuplicateSet is not None:
try:
savedNonDuplicateSet = self.nonDuplicateSet
self.nonDuplicateSet = None
while (True):
stmt = next(self)
if not stmt in savedNonDuplicateSet:
savedNonDuplicateSet.add(stmt)
return stmt
finally:
self.nonDuplicateSet = savedNonDuplicateSet
# elif self.limit and self.cursor >= self.limit:
# raise StopIteration
elif self.cursor < len(self.string_tuples):
stringTuple = self.string_tuples[self.cursor]
if self.triple_ids:
stringTuple = RepositoryResult.normalize_quint(stringTuple)
self.cursor += 1
if self.subjectFilter and not stringTuple[0] == self.subjectFilter:
return next(self)
return self._createStatement(stringTuple);
else:
raise StopIteration
def enableDuplicateFilter(self):
"""
Switch on duplicate filtering while iterating over objects.
The RepositoryResult will keep track of the previously returned objects in a set
and on calling next() will ignore any objects that already occur in this set.
Caution: use of this filtering mechanism is potentially memory-intensive.
"""
self.nonDuplicateSet = set([])
def asList(self):
"""
Returns a list containing all objects of this RepositoryResult in
order of iteration.
The RepositoryResult is fully consumed and automatically closed by this operation.
:return: List of statements.
:rtype: list[Statement]
"""
result = []
self.addTo(result)
return result
def addTo(self, collection):
"""
Add all objects of this RepositoryResult to the supplied collection.
The RepositoryResult is fully consumed and automatically closed by this
operation.
:param collection: The collection to add the results to.
It can be a list or a set.
:type collection: set|list
"""
isList = isinstance(collection, list)
for stmt in self:
if isList: collection.append(stmt)
else: collection.add(stmt)
def __len__(self):
return len(self.string_tuples)
# Python-future breaks truth testing - length is not checked...
def __bool__(self):
return len(self) > 0
def rowCount(self):
"""
Get the number of statements in this result object.
:return: The number of results in this iterator.
"""
return len(self)
@staticmethod
def normalize_quint(stringTuple):
st = stringTuple
return (st[1], st[2], st[3], None if len(st) == 4 else st[4], unicode(st[0]))
@staticmethod
def normalize_quad(stringTuple):
st = stringTuple
if len(st) == 3:
return (st[0], st[1], st[2], None)
return st
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
del exc_type, exc_val, exc_tb
self.close()
def __del__(self):
self.close()
def toPandas(self, include_graph=True):
if not has_pandas:
raise Exception('Pandas not installed.')
return pandas.rows_to_pandas(
self,
['s', 'p', 'o', 'g'] if include_graph else False)
|
import mysql.connector
from model.group import Group
from model.contact import Contact
class DbFixture:
#все контакты
sel_all_cont = "select distinct a.id, a.firstname, a.middlename, a.lastname, a.nickname, a.address, " \
"a.email, a.email2, a.email3, a.home, a.mobile, a.work, a.phone2 " \
"from addressbook as a where a.deprecated = '0000-00-00 00:00:0'"
#все контакты, состоящие хотя бы в одной группе
sel_cont_in_groups = "select distinct a.id, a.firstname, a.middlename, a.lastname, a.nickname, a.address, " \
"a.email, a.email2, a.email3, a.home, a.mobile, a.work, a.phone2 " \
"from addressbook as a inner join address_in_groups on a.id = address_in_groups.id " \
"where a.deprecated = '0000-00-00 00:00:0'"
# id контактов, состоящих хотя бы в одной группе
sel_id_cont_in_groups = "select distinct a.id " \
"from addressbook as a inner join address_in_groups on a.id = address_in_groups.id " \
"where a.deprecated = '0000-00-00 00:00:0'"
# id групп для контакта c id = s
sel_id_groups_with_contacts = "select group_id from address_in_groups " \
"where id = %s"
# id контактов, не состоящих ни в одной группе
sel_cont_not_in_groups = sel_all_cont + " and a.id not in (" + sel_id_cont_in_groups + ")"
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor.fetchall():
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_address_in_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, group_id from address_in_groups")
for row in cursor.fetchall():
(contact_id, group_id) = row
list.append([str(contact_id), str(group_id)])
finally:
cursor.close()
return list
#функция возвращает список контактов из произвольного запроса
def get_contact_list_from_sel(self,sel):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(sel)
for row in cursor.fetchall():
(id, firstname, middlename, lastname, nickname, address, email, email2, email3, home, mobile, work, phone2) = row
list.append(Contact(id=str(id), firstname=firstname, middlename=middlename, lastname=lastname, nickname=nickname,
company_address=address, email1=email, email2=email2, email3=email3, home_phone=home, mobile_phone=mobile,
work_phone=work,home_phone2=phone2))
finally:
cursor.close()
return list
#все контакты
def get_contact_list(self):
return self.get_contact_list_from_sel(self.sel_all_cont)
# все контакты, состоящие в группах
def get_contact_list_in_group(self):
return self.get_contact_list_from_sel(self.sel_cont_in_groups)
# все контакты, не состоящие ни в каких группах
def get_contact_list_not_in_group(self):
return self.get_contact_list_from_sel(self.sel_cont_not_in_groups)
# id групп в которых состоит контакт
def get_group_id_list_with_contact(self,id_contact):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id from address_in_groups where id = %s"%id_contact)
for row in cursor.fetchall():
#(group_id) = row
list.append(str(row[0]))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
|
import pickle
from bitglitter.palettes.paletteobjects import DefaultPalette, TwentyFourBitPalette
from bitglitter.read.assembler import Assembler
class Config:
'''This is the master object that holds all session data.'''
def __init__(self):
self.colorHandler = PaletteHandler()
self.statsHandler = Statistics()
self.assembler = Assembler()
self.assembler.clearPartialSaves() # Deleting old folder if new config object must be made.
self.saveSession()
# Reserved for next release, introducing presets
# presetDict = {}
def saveSession(self):
with open('config.pickle', 'wb') as pickleSaver:
pickle.dump(self, pickleSaver)
class Statistics:
'''Read and write values are held in this object. It's attributes are changed through method calls.'''
def __init__(self):
self.blocksWrote = 0
self.framesWrote = 0
self.dataWrote = 0
self.blocksRead = 0
self.framesRead = 0
self.dataRead = 0
def __str__(self):
'''This is used by outputStats() in configfunctions to output a nice formatted text file showing usage
statistics.
'''
return('*' * 21 + '\nStatistics\n' + '*' * 21 + f'\n\nTotal Blocks Wrote: {self.blocksWrote}'
f'\nTotal Frames Wrote: {self.framesWrote}'
f'\nTotal Data Wrote: {int(self.dataWrote / 8)} B'
f'\n\nTotal Blocks Read: {self.blocksRead}'
f'\nTotal Frames Read: {self.framesRead}'
f'\nTotal Data Read: {int(self.dataRead / 8)} B')
def writeUpdate(self, blocks, frames, data):
self.blocksWrote += blocks
self.framesWrote += frames
self.dataWrote += data
def readUpdate(self, blocks, frames, data):
self.blocksRead += blocks
self.framesRead += frames
self.dataRead += data
def clearStats(self):
self.blocksWrote = 0
self.framesWrote = 0
self.dataWrote = 0
self.blocksRead = 0
self.framesRead = 0
self.dataRead = 0
class PaletteHandler:
'''This handles all palettes both default and custom. Please note that default palettes are created here as well.
All functions available in palettefunctions module that deal with custom palettes are interfacing with dictionaries
customPaletteList and customPaletteNicknameList in this object.
'''
def __init__(self):
self.defaultPaletteList = {'1' : DefaultPalette("1 bit default",
"Two colors, black and white. While it has the lowest density of one bit of data per "
"pixel, it has the highest reliability.", ((0,0,0), (255,255,255)), 441.67, 1),
'11' : DefaultPalette("1 bit alternate", "Uses cyan/magenta instead of white/black.",
((255, 0, 255), (0, 255, 255)), 360.12, 11),
'2' : DefaultPalette("2 bit default", "Four colors; black, red, green, blue.",
((0,0,0), (255,0,0), (0,255,0), (0,0,255)), 255, 2),
'22': DefaultPalette("2 bit alternate", "Four colors; black, magenta, cyan, yellow.",
((0, 0, 0), (255, 255, 0), (0, 255, 255), (255, 0, 255)), 255,
22),
'3' : DefaultPalette("3 bit default",
"Eight colors.", ((0,0,0), (255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,255,255),
(255,0,255), (255,255,255)), 255, 3),
'4' : DefaultPalette("4 bit default", "Sixteen colors.", ((0,0,0), (128,128,128),
(192,192,192), (128,0,0), (255,0,0), (128,128,0), (255,255,0), (0,255,0), (0,128,128),
(0,128,0), (0,0,128), (0,0,255), (0,255,255), (128,0,128), (255,0,255), (255,255,255)),
109.12, 4),
'6' : DefaultPalette("6 bit default", "Sixty-four colors.", ((0,0,0), (0,0,85),
(0,0,170), (0,0,255), (0,85,0), (0,85,85), (0,85,170), (0,85,255), (0,170,0), (0,170,85),
(0,170,170), (0,170,255), (0,255,0), (0,255,85), (0,255,170), (0,255,255), (85,0,0),
(85,0,85), (85,0,170), (85,0,255), (85,85,0), (85,85,85), (85,85,170), (85,85,255),
(85,170,0), (85,170,85), (85,170,170), (85,170,255), (85,255,0), (85,255,85), (85,255,170),
(85,255,255), (170,0,0), (170,0,85), (170,0,170), (170,0,255), (170,85,0), (170,85,85),
(170,85,170), (170,85,255), (170,170,0), (170,170,85), (170,170,170), (170,170,255),
(170,255,0), (170,255,85), (170,255,170), (170,255,255), (255,0,0), (255,0,85), (255,0,170),
(255,0,255), (255,85,0), (255,85,85), (255,85,170), (255,85,255), (255,170,0), (255,170,85),
(255,170,170), (255,170,255), (255,255,0), (255,255,85), (255,255,170), (255,255,255)), 85,
6),
'24': TwentyFourBitPalette()
}
self.customPaletteList = {}
self.customPaletteNicknameList = {}
|
from app import app
from app.selection import newsselector
from elasticsearch import Elasticsearch
from app import db
from flask import request
newsselector = newsselector()
'''Homepage that displays news articles in json format'''
@app.route('/', methods= ['GET', 'POST'])
@app.route('/homepage', methods= ['GET', 'POST'])
def select_news():
return newsselector.make_recommendations()
'''Random API'''
@app.route('/random', methods= ['GET', 'POST'])
def select_random_news():
return newsselector.make_random_recommendations()
#to add: Post to user database
'''Recent API'''
@app.route('/recent', methods= ['GET', 'POST'])
def select_recent_news():
return newsselector.make_recent_recommendations()
@app.route("/users", methods=['POST'])
def create_user():
"""
Create a new user. Request body should be a json with username, email, and password
"""
data = request.get_json(force=True)
if User.select().where(User.email == data['email']).exists():
return bad_request("User {} already exists".format(data['email']))
u = auth.create_user(username=data['username'], email=data['email'], password=data['password'])
return jsonify({"id": u.id, "email": u.email}), HTTPStatus.CREATED
|
import numpy as np
import matplotlib.pyplot as plt
microwave_positions = ['closer',
'closer_angled']
kettle_positions = ['top_right',
'bot_right',
'bot_right_angled',
'bot_left_angled']
cabinet_textures = ['wood1',
'wood2',
'metal1',
'metal2',
'marble1',
'tile1']
lighting_options = ['cast_left',
'cast_right',
'brighter',
'darker']
counter_textures = ['white_marble_tile2',
'tile1',
'wood1',
'wood2',
]
floor_textures = ['white_marble_tile',
'marble1',
'tile1',
'wood1',
'wood2',
'checker',
]
domain_parameters = [('change_camera', 2)]
microwave_idx = np.random.choice(5)
if microwave_idx < 4:
domain_parameters.append(('change_microwave', microwave_idx))
kettle_idx = np.random.choice(7)
if kettle_idx < 6:
domain_parameters.append(('change_kettle', kettle_idx))
microwave_position = np.random.choice(3)
if microwave_position < 2:
domain_parameters.append(('change_objects_layout', 'microwave', microwave_positions[microwave_position]))
kettle_position = np.random.choice(5)
if kettle_position < 4:
domain_parameters.append(('change_objects_layout', 'kettle', kettle_positions[kettle_position]))
hinge_texture = np.random.choice(7)
if hinge_texture < 6:
domain_parameters.append(('change_hinge_texture', cabinet_textures[hinge_texture]))
slide_texture = np.random.choice(7)
if slide_texture < 6:
domain_parameters.append(('change_slide_texture', cabinet_textures[slide_texture]))
#
#lighting_option = np.random.choice(5)
#if lighting_option < 4:
# domain_parameters.append(('change_lighting', lighting_options[lighting_option]))
counter_texture = np.random.choice(5)
if counter_texture < 4:
domain_parameters.append(('change_counter_texture', counter_textures[counter_texture]))
floor_texture = np.random.choice(7)
if floor_texture < 6:
domain_parameters.append(('change_floor_texture', floor_textures[floor_texture]))
# 4. Apply domain shifts to env
env.reset_domain_changes()
for p in domain_parameters:
fn = getattr(env, p[0])
fn(*p[1:])
env.reset(reload_model_xml=True)
obs = env.render(mode='rgb_array')
plt.imshow(obs)
|
# -*- coding: utf-8 -*-
from erpbrasil.febraban.entidades import Boleto
from erpbrasil.febraban.boleto.custom_property import CustomProperty
class BoletoCaixa(Boleto):
'''
Gera Dados necessários para criação de boleto para o banco Caixa
Economica Federal
'''
conta_cedente = CustomProperty('conta_cedente', 11)
'''
Este numero tem o inicio fixo
Carteira SR: 80, 81 ou 82
Carteira CR: 90 (Confirmar com gerente qual usar)
'''
nosso_numero = CustomProperty('nosso_numero', 10)
def __init__(self):
super(BoletoCaixa, self).__init__()
self.codigo_banco = "104"
self.local_pagamento = "Preferencialmente nas Casas Lotéricas e \
Agências da Caixa"
self.logo_image = "logo_bancocaixa.jpg"
@property
def dv_nosso_numero(self):
resto2 = self.modulo11(self.nosso_numero.split('-')[0], 9, 1)
digito = 11 - resto2
if digito == 10 or digito == 11:
dv = 0
else:
dv = digito
return dv
@property
def campo_livre(self):
content = "%10s%4s%11s" % (self.nosso_numero,
self.agencia_cedente,
self.conta_cedente.split('-')[0])
return content
def format_nosso_numero(self):
return self.nosso_numero + '-' + str(self.dv_nosso_numero)
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
"""
This module is the requests implementation of Pipeline ABC
"""
from azure.core.pipeline import PipelineRequest, PipelineResponse
from .base import SansIOHTTPPolicy
class CustomHookPolicy(SansIOHTTPPolicy):
"""A simple policy that enable the given callback
with the response.
"""
def __init__(self, **kwargs): # pylint: disable=unused-argument
self._callback = None
def on_request(self, request): # type: ignore # pylint: disable=arguments-differ
# type: (PipelineRequest) -> None
self._callback = request.context.options.pop('raw_response_hook', None) # type: ignore
def on_response(self, request, response): # type: ignore # pylint: disable=arguments-differ
# type: (PipelineRequest, PipelineResponse) -> None
if self._callback:
self._callback(response)
request.context.options.update({'raw_response_hook': self._callback}) # type: ignore
|
import pygame
import numpy as np
import math
import time
from gym.spaces.box import Box
import matplotlib.pyplot as plt
class Reacher:
def __init__(self, screen_size=1000, num_joints=2, link_lengths = [200, 140], ini_joint_angles=[0.1, 0.1], target_pos = [669,430], render=False, change_goal=False):
# Global variables
self.screen_size = screen_size # create a screen of screen_size * screen_size
self.num_joints = num_joints # not counting the initial one
self.link_lengths = link_lengths # length of the link between joints
self.ini_joint_angles = ini_joint_angles # initialized joint angle values
self.joint_angles = ini_joint_angles
self.num_actions = self.num_joints
self.num_observations= 2*(self.num_actions+2) # first 2 is x,y coordinates, second 2 is initial joint and target position
self.L = 8 # distance from target to get reward 2 for sparse reward
self.action_space=Box(-100,100, [self.num_actions])
self.observation_space=Box(-1000,1000, [2*(self.num_actions+2)])
self.target_pos=target_pos
self.render=render
if self.render == True:
self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))
pygame.display.set_caption("Reacher")
else:
pass
self.is_running = 1
self.steps=0
self.max_episode_steps=500 # maximum steps of one episode
self.reset_cnt=0 # for counting
self.change_goal = change_goal # change the goal if True
self.change_goal_episodes=10 # episode interval of changing a target position
# Function to compute the transformation matrix between two frames
def compute_trans_mat(self, angle, length):
cos_theta = math.cos(math.radians(angle))
sin_theta = math.sin(math.radians(angle))
dx = -length * sin_theta
dy = length * cos_theta
T = np.array([[cos_theta, -sin_theta, dx], [sin_theta, cos_theta, dy], [0, 0, 1]])
return T
# Function to draw the current state of the world
def draw_current_state(self, ):
T = np.zeros((self.num_joints, 3, 3)) # transition matrix
origin = np.zeros((self.num_joints, 3)) # transformed coordinates - 3 values
p = np.zeros((self.num_joints+1, 2)) # joint coordinates in world
p[0] = [0, 0] # initial joint coordinates
for i in range(self.num_joints):
T[i] = self.compute_trans_mat(self.joint_angles[i], self.link_lengths[i])
multiplier = np.array([0, 0, 1])
for j in range(i):
multiplier=np.dot(T[i-j], multiplier)
origin[i] = np.dot(T[0], multiplier)
p[i+1] = [origin[i][0], -1.*origin[i][1]] # the - is because the y-axis is opposite in world and image coordinates
int_coordinates = [[0 for i in range(2)] for j in range(self.num_joints+1)]
for i in range (self.num_joints+1):
int_coordinates[i][0] = int(0.5 * self.screen_size + p[i][0])
int_coordinates[i][1] = int(0.5 * self.screen_size + p[i][1])
if self.render == True:
self.screen.fill((0, 0, 0))
line_width=20 # origin 5
circle_size=20 # origin 10
for i in range (self.num_joints+1):
if i < self.num_joints:
pygame.draw.line(self.screen, (255, 255, 255), [int_coordinates[i][0], int_coordinates[i][1]], [int_coordinates[i+1][0], int_coordinates[i+1][1]], line_width) # draw link
pygame.draw.circle(self.screen, (0, 255, 0), [int_coordinates[i][0], int_coordinates[i][1]], circle_size) # draw joint
pygame.draw.circle(self.screen, (255, 255, 0), np.array(self.target_pos).astype(int), 2*circle_size) # draw target
# extra objects for disturbing:
# o1=[702, 230]
# o2=[794, 609]
# o3=[470, 234]
# o4=[270, 234]
# o5=[387, 534]
# pygame.draw.circle(self.screen, (155, 23, 208), o1, circle_size*3)
# pygame.draw.circle(self.screen, (255, 0, 182), o2, circle_size*2)
# pygame.draw.circle(self.screen, (155, 134, 0), o3, circle_size)
# pygame.draw.circle(self.screen, (25, 34, 190), o4, circle_size*2)
# pygame.draw.circle(self.screen, (215, 34, 129), o5, circle_size*3)
# Flip the display buffers to show the current rendering
pygame.display.flip()
# time.sleep(0.5)
''' screenshot the image '''
# pygame.image.save(self.screen, './screen.png')
array_screen = pygame.surfarray.array3d(self.screen) # 3d array pygame.surface (self.screen)
red_array_screen=pygame.surfarray.pixels_red(self.screen) # 2d array from red pixel of pygame.surface (self.screen)
downsampling_rate=5 # downsmaple the screen shot, origin 1000*1000*3
CHANNELS=['rgb', 'red'][1]
if CHANNELS == 'red': # 2d, need to expand 1 dim
downsampled_array_screen=np.expand_dims(red_array_screen[::downsampling_rate,::downsampling_rate,], axis=-1)
elif CHANNELS == 'rgb':
downsampled_array_screen=array_screen[::downsampling_rate,::downsampling_rate,]
# plt.imshow(array_screen[::downsampling_rate,::downsampling_rate,])
# plt.show()
else:
pass
return np.array(int_coordinates).reshape(-1), np.array([downsampled_array_screen])
def reset(self, screen_shot):
''' reset the environment '''
self.steps=0
self.joint_angles = np.array(self.ini_joint_angles)*180.0/np.pi
if self.render == True:
self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))
pygame.display.set_caption("Reacher")
else:
pass
self.is_running = 1
if self.change_goal is True:
''' reset the target position for learning across tasks '''
self.reset_cnt+=1
if self.reset_cnt > self.change_goal_episodes:
self.reset_cnt=0
range_pose=0.3 # allowe goal position range
target_pos=range_pose*np.random.rand(2) + [0.5,0.5]
self.target_pos=target_pos*self.screen_size
pos_set, screenshot=self.draw_current_state()
if screen_shot:
return screenshot
else:
return np.array(np.concatenate((pos_set,self.target_pos)))/self.screen_size
def step(self, action, sparse_reward, screen_shot):
# Get events and check if the user has closed the window
if self.render == True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.is_running = 0
break
else:
pass
# Change the joint angles (the increment is in degrees)
for i in range (self.num_joints):
self.joint_angles[i] += action[i]
pos_set, screenshot=self.draw_current_state()
distance2goal = np.sqrt((pos_set[-2]-self.target_pos[0])**2+(pos_set[-1]-self.target_pos[1])**2)
if sparse_reward:
if distance2goal < self.L:
reward = 20
else:
reward = -1
else: # dense reward
''' verison 1: inverse '''
reward_0=100.0
reward = reward_0 / (np.sqrt((pos_set[-2]-self.target_pos[0])**2+(pos_set[-1]-self.target_pos[1])**2)+1)
''' version 2: negative '''
# reward = -np.sqrt((pos_set[-2]-self.target_pos[0])**2+(pos_set[-1]-self.target_pos[1])**2)
if screen_shot:
return screenshot, reward, 0, distance2goal
else:
return np.array(np.concatenate((pos_set,self.target_pos)))/self.screen_size, reward, 0, distance2goal
if __name__ == "__main__":
num_episodes=500
num_steps=20
action_range=20.0
NUM_JOINTS=4
LINK_LENGTH=[200, 140, 80, 50]
INI_JOING_ANGLES=[0.1, 0.1, 0.1, 0.1]
SPARSE_REWARD=False
SCREEN_SHOT=False
reacher=Reacher(render=True) # 2-joint reacher
# reacher=Reacher(screen_size=1000, num_joints=NUM_JOINTS, link_lengths = LINK_LENGTH, \
# ini_joint_angles=INI_JOING_ANGLES, target_pos = [669,430], render=True, change_goal=False)
epi=0
while epi<num_episodes:
print(epi)
epi+=1
step=0
reacher.reset(SCREEN_SHOT)
while step<num_steps:
step+=1
action=np.random.uniform(-action_range,action_range,size=NUM_JOINTS)
state, re, _, _ =reacher.step(action, SPARSE_REWARD, SCREEN_SHOT)
|
###
# Pickpocket list: area > person > item
# Add XP, Gold, Rep
###
import struct
import os
from manual.area_names import gen_area_names
from template_index import index
from handle_page import handle
from root_index import root_index
# wrapper function to convert byte string to regular string
def mystr(a_str):
return str(a_str, 'UTF-8').strip('\x00').replace('"', '\\"')
# Given an index, return a strings
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/tlk_v1.htm
def getname(idx, game):
if idx == -1:
return ""
assert idx >= 0, f"Invalid Name/String index: {idx}"
with open(f'{game}_files\\dialog.tlk', 'rb') as file:
text = file.read()
max_str = struct.unpack('i', text[0xa:0xa+4])[0]
if idx >= max_str:
return ""
abs_str_off = struct.unpack('i', text[0xe:0xe+4])[0]
data_off = 0x1a * idx + 0x12
str_off = struct.unpack('i', text[data_off+0x12:data_off+0x12+4])[0]
str_len = struct.unpack('i', text[data_off+0x16:data_off+0x16+4])[0]
return mystr(text[abs_str_off+str_off:abs_str_off+str_off+str_len])
# Return an array with difficulties for pickpocketing various equipment slots
def load_pickpocketting(game):
# https://baldursgate.fandom.com/wiki/Thief#Pick_Pockets
key_order = ['helmet', 'armour', 'shield', 'gauntlets', 'ring_left', 'ring_right', 'amulet', 'belt', 'boots',
'weapon1', 'weapon2', 'weapon3', 'weapon4', 'ammo1', 'ammo2', 'ammo3', 'ammo4', 'cloak', 'misc1', 'misc2', 'misc3',
'inv1', 'inv2', 'inv3', 'inv4', 'inv5', 'inv6', 'inv7', 'inv8', 'inv9', 'inv10', 'inv11', 'inv12', 'inv13', 'inv14', 'inv15', 'inv16']
vals = {}
with open(f'{game}_files\\sltsteal.2da', 'r') as file:
# skip headers
for i in range(3):
file.readline()
for line in file:
l_list = line.lower().strip().split()
vals[l_list[0]] = int(l_list[1])
return [vals[x] for x in key_order]
# Given a character name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/cre_v1.htm
def view_char(cre_file, item_list, game, dlg_store):
with open(cre_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'CRE '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# Check version since eah one stores files differently
if version == 'v1.0':
name = 0x8
race = 0x272
status = 0x20
xp = 0x14
gold = 0x1c
item_count_off = 0x2c0
item_offset_off = 0x2bc
pick_off = 0x6a
equip_off = 0x2b8
enemy = 0x270
dlg = 0x2cc
else:
assert False, f"Invalid File Version: '{version}'"
ret['items'] = []
# get list of items, check item slots, assign difficulty
item_count = struct.unpack('i', text[item_count_off:item_count_off+4])[0]
f_race = struct.unpack('B', text[race:race+1])[0]
f_status = struct.unpack('I', text[status:status + 4])[0]
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['xp'] = struct.unpack('i', text[xp:xp + 4])[0]
ret['gold'] = struct.unpack('i', text[gold:gold + 4])[0]
dlg_file = mystr(text[dlg:dlg + 8]).lower()
if dlg_file in dlg_store:
ret['stores'] = dlg_store[dlg_file]
if item_count and f_race != 146 and not bool(f_status & 0b111111000000): # We can't pickpocket dragons or dead things
if struct.unpack('B', text[enemy:enemy+1])[0] == 0xff: # Enemies are hostile and cannot be pickpocketed, to my knowledge
return ret
item_offset = struct.unpack('i', text[item_offset_off:item_offset_off+4])[0]
items = []
for idx in range(item_count):
t_off = idx * 0x14 + item_offset
items.append((mystr(text[t_off:t_off + 8]).lower(), bool(struct.unpack('i', text[t_off + 0x10:t_off + 0x10 + 4])[0] & 0b1010), struct.unpack('h', text[t_off + 0xa:t_off + 0xa + 2])[0]))
pickpocket = struct.unpack('b', text[pick_off:pick_off + 1])[0]
equip_offset = struct.unpack('i', text[equip_off:equip_off + 4])[0]
pick_difficulty = load_pickpocketting(game)
equipped = struct.unpack('h', text[38 * 2 + equip_offset:38 * 2 + equip_offset + 2])[0]
if 0 <= equipped < 4:
pick_difficulty[9 + equipped] = 0
for idx, slot in enumerate(pick_difficulty):
if slot:
s_off = idx * 2 + equip_offset
item_idx = struct.unpack('h', text[s_off:s_off + 2])[0]
if item_count > item_idx >= 0:
item_itm = f"{items[item_idx][0]}"
if item_itm in item_list and item_list[item_itm]['drop'] and not items[item_idx][1]:
ret['items'].append({'type': item_list[item_itm]['type'], 'name': item_list[item_itm]['name'], 'price': item_list[item_itm]['price'], 'skill': pickpocket + pick_difficulty[idx]})
if items[item_idx][2] > 1 and item_list[item_itm]['type'] in ['Books & misc', 'Arrows', 'Potion', 'Scroll', 'Bullets', 'Darts', 'Bolts', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Fur/pelt']:
ret['items'][-1]['quantity'] = items[item_idx][2]
return ret
# Given an item name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm
# TODO: Wand charge max (from first ability)
def view_item(itm_file, game):
with open(itm_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'ITM '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm#Header_ItemType
item_type = ['Books & misc', 'Amulet', 'Armor', 'Belt & Girdle', 'Boots', 'Arrows', 'Bracers & gauntlets', 'Headgear', 'Key', 'Potion', 'Ring', 'Scroll', 'Shield', 'Food', 'Bullets', 'Bow', 'Dagger',
'Mace & Club', 'Sling', 'Small sword', 'Large sword', 'Hammer', 'Morning star', 'Flail', 'Darts', 'Axe', 'Quarterstaff', 'Crossbow', 'Hand-to-hand weapon', 'Spear', 'Halberd', 'Bolts',
'Cloaks & Robes', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Tattoos', 'Lenses', 'Bucklers/teeth',
'Candles', 'Unknown', 'Clubs (IWD)', 'Unknown', 'Unknown', 'Large Shields (IWD)', 'Unknown', 'Medium Shields (IWD)', 'Notes', 'Unknown', 'Unknown', 'Small Shields (IWD)', 'Unknown',
'Telescopes (IWD)', 'Drinks (IWD)', 'Great Swords (IWD)', 'Container', 'Fur/pelt', 'Leather Armor', 'Studded Leather Armor', 'Chain Mail', 'Splint Mail', 'Half Plate', 'Full Plate',
'Hide Armor', 'Robe', 'Unknown', 'Bastard Sword', 'Scarf', 'Food (IWD2)', 'Hat', 'Gauntlet', 'Eyeballs', 'Earrings', 'Teeth', 'Bracelets']
# Check version since eah one stores files differently
if version == 'v1 ':
name = 0xc
price = 0x34
flags = 0x18
itm_type = 0x1c
max_count = 0
else:
assert False, f"Invalid File Version: '{version}'"
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['price'] = struct.unpack('i', text[price:price + 4])[0]
ret['drop'] = bool(struct.unpack('i', text[flags:flags + 4])[0] & 4)
ret['type'] = item_type[struct.unpack('h', text[itm_type:itm_type + 2])[0]]
return ret
# Given an area name return a list of actors
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/are_v1.htm
def view_area(are_file, cre_dict):
with open(are_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'AREA'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = []
# Check version since eah one stores files differently
if version == 'v1.0':
actors_off = 0x54
actors_count = 0x58
else:
assert False, f"Invalid File Version: '{version}'"
t_actors = set() # Only include 1 instance of a character from each zone
for idx in range(struct.unpack('h', text[actors_count:actors_count + 2])[0]):
actor = struct.unpack('i', text[actors_off:actors_off + 4])[0] + idx * 0x110
actor_file = mystr(text[actor + 0x80:actor + 0x80 + 8]).lower()
if actor_file in cre_dict:
t_actors.add(actor_file)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# Given a store, return a list of items that can be stolen and how difficult they are
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/sto_v1.htm
def view_store(sto_file):
ret = {'items': [], 'difficult': 0}
with open(sto_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'STOR'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
steal = 0x10
diff = 0x20
item_offset = 0x34
item_count = 0x38
# in item
amount = 0x14
infinite = 0x18
stealable = 0x10
else:
assert False, f"Invalid File Version: '{version}'"
if struct.unpack('i', text[steal:steal + 4])[0] & 0b1000: # if you can steal
ret['skill'] = struct.unpack('H', text[diff:diff + 2])[0]
item_offset = struct.unpack('i', text[item_offset:item_offset + 4])[0]
for idx in range(struct.unpack('i', text[item_count:item_count + 4])[0]):
t_off = idx * 0x1c + item_offset
if not bool(struct.unpack('i', text[t_off + stealable:t_off + stealable + 4])[0] & 0b1010):
ret['items'].append((mystr(text[t_off:t_off + 8]).lower(), "Inf" if struct.unpack('i', text[t_off + infinite:t_off + infinite + 4])[0] else struct.unpack('i', text[t_off + amount:t_off + amount + 4])[0]))
return ret
# Given a dialog file, determine if it spawns a store you can steal from
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/dlg_v1.htm
def view_dlg(dlg_file, stores):
ret = []
with open(dlg_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'DLG '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
action_offset = 0x28
action_count = 0x2c
else:
assert False, f"Invalid File Version: '{version}'"
item_offset = struct.unpack('i', text[action_offset:action_offset + 4])[0]
for idx in range(struct.unpack('i', text[action_count:action_count + 4])[0]):
t_off = idx * 0x8 + item_offset
str_off = struct.unpack('i', text[t_off:t_off + 4])[0]
the_str = mystr(text[str_off:str_off + struct.unpack('i', text[t_off + 0x4:t_off + 0x4 + 4])[0]])
if the_str.startswith('StartStore'):
t_str = the_str.split('\\"')[1].lower()
if t_str in stores and t_str not in ret:
ret.append(t_str)
return ret
# Given a decompiled script file, extract all spawned creatures
def view_bcs(baf_file, cre_dict):
with open(baf_file, 'r', errors='ignore') as f:
t_actors = set() # Only include 1 instance of a character from each zone
ret = []
for line in f:
if 'CreateCreature' in line:
cre = line.split('"')[1].lower()
if cre in cre_dict:
t_actors.add(cre)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# NOTES: sell value is 1/2 of an items value. Items with charges are value/max_count*current_count
def walk_game(game, game_str):
area_lookup = gen_area_names(game)
# Areas -> actors -> items, so generate in reverse
# Create a dictionary with all valid items that can drop
items = {}
# Keep track of what can be stolen from stores
stores = {}
dlg_store = {}
# Create a list of all valid creatures that have valid items to pickpocket
cre_dict = {}
# Go through all areas and check all creatures for ones that can be pickpocketed
are_dict = {}
for r, d, f in os.walk(f"{game}_files"):
itm_files = []
cre_files = []
are_files = []
baf_files = []
sto_files = []
dlg_files = []
for f_temp in f:
f_temp = f_temp.lower()
if f_temp.endswith('.itm'):
itm_files.append(f_temp)
elif f_temp.endswith('.cre'):
cre_files.append(f_temp)
elif f_temp.endswith('.are'):
are_files.append(f_temp)
elif f_temp.endswith('.baf'):
baf_files.append(f_temp)
elif f_temp.endswith('.sto'):
sto_files.append(f_temp)
elif f_temp.endswith('.dlg'):
dlg_files.append(f_temp)
else:
print(f"Unexpected file: '{f_temp}'")
len_f = len(sto_files)
tick = len_f // 40
print(f"Reading {len_f} STO files.")
for c, file in enumerate(sto_files):
if not c % tick:
print(f"{game} STO: {c}/{len_f}")
item = view_store(os.path.join(r, file))
if item['items']:
stores[file[:-4].lower()] = item
len_f = len(dlg_files)
tick = len_f // 40
print(f"Reading {len_f} DLG files.")
for c, file in enumerate(dlg_files):
if not c % tick:
print(f"{game} DLG: {c}/{len_f}")
item = view_dlg(os.path.join(r, file), stores)
if item:
dlg_store[file[:-4].lower()] = item
len_f = len(itm_files)
tick = len_f // 40
print(f"Reading {len_f} ITM files.")
for c, file in enumerate(itm_files):
if not c % tick:
print(f"{game} ITM: {c}/{len_f}")
item = view_item(os.path.join(r, file), game)
# note items with no name
if not item['name']:
item['name'] = f"{file}{'' if file.startswith('rnd') else ' (TLK missing name)'}"
# remove EET items that appear to be script/difficulty related, ignore items with no proper name
if not (item['name'].startswith('dw#') and item['price'] == 0):
items[file[:-4].lower()] = item
len_f = len(cre_files)
tick = len_f // 40
print(f"Reading {len_f} CRE files.")
for c, file in enumerate(cre_files):
if not c % tick:
print(f"{game} CRE: {c}/{len_f}")
person = view_char(os.path.join(r, file), items, game, dlg_store)
if person['items'] or 'stores' in person:
if not person['name']:
person['name'] = file[:-4]
cre_dict[file.lower()[:-4]] = person
npc_list = set(cre_dict.keys())
len_f = len(are_files)
tick = len_f // 40
print(f"Reading {len_f} ARE files.")
for c, file in enumerate(are_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_area(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} - {area_lookup[area_key]}" if area_key in area_lookup else area_key] = area
len_f = len(baf_files)
tick = len_f // 40
print(f"Reading {len_f} BAF files.")
for c, file in enumerate(baf_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_bcs(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} (Spawned) - {area_lookup[area_key]}" if area_key in area_lookup else f"{area_key} (Spawned)"] = area
if npc_list:
are_dict['unknown'] = []
for npc in npc_list:
cre_dict[npc]['name'] += f" ({npc})"
are_dict['unknown']. append(cre_dict[npc])
# Store possible values that can appear in various columns to toggle those rows
areas = set()
item_types = {}
buf = ['data = [', '\t["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"],']
for are in sorted(are_dict):
for cre in sorted(are_dict[are], key=lambda i: i['name']):
for itm in sorted(cre['items'], key=lambda i: i['price'], reverse=True):
areas.add(are)
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}", "{cre["name"]}", {cre["xp"]}, {cre["gold"]}, {itm["skill"]}, {itm["price"]}, "{itm["type"]}", "{itm["name"] + " (" + str(itm["quantity"]) + ")" if "quantity" in itm else itm["name"]}", "{itm["type"]}_{itm["name"]}"],')
if 'stores' in cre:
for sto in sorted(cre['stores'], reverse=True):
areas.add(are)
for itm_id, itm_count in sorted(stores[sto]['items'], key=lambda i: i[0]):
# ignore items that don't have a proper identified name
if itm_id not in items:
continue
itm = items[itm_id]
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}", "{cre["name"]} (Store)", {cre["xp"]}, {cre["gold"]}, {stores[sto]["skill"]}, {itm["price"]}, "{itm["type"]}", "{itm["name"]} ({itm_count})", "{itm["type"]}_{itm["name"]}"],')
buf.append(']\n')
with open(f'docs/{game}_table_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
buf = [f'gamestr = "{game_str}"', 'headers = ["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"]\n', 'areas = [']
for a in sorted(areas):
buf.append(f'\t"{a}",')
buf.append(']\n')
buf.append('types = {')
for a in sorted(item_types):
buf.append(f'\t"{a}": [')
for b in sorted(item_types[a]):
buf.append(f'\t\t"{b}",')
buf.append('\t],')
buf.append('}\n')
with open(f'docs/{game}_config_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
with open(f'docs/{game}.html', 'w', encoding="utf-8") as f:
f.write(index.format(game, game_str))
with open(f'docs/{game}_handle_page.py', 'w', encoding="utf-8") as f:
f.write(handle.format(game))
def main():
vals = [
('iwdee', 'Icewind Dale EE 2.6.6.0'),
('bgee', 'Baldur\'s Gate EE 2.6.6.0'),
('bg2ee', 'Baldur\'s Gate 2 EE 2.6.6.0'),
('custom_iwdee', 'Icewind Dale EE 2.6.5.0 + BetterHOF + CDTWEAKS'),
('custom_bgeet', 'Baldur\'s Gate EET 2.6.5.0 + SCS'),
]
for game, game_str in vals:
walk_game(game, game_str)
with open('docs\\index.html', 'w') as f:
f.write(root_index.format('</p><p>'.join([f'<a href="{x[0]}.html" target="_blank">{x[1]}</a>' for x in vals])))
if __name__ == '__main__':
main()
|
# Import all the models, so that Base has them before being
# imported by Alembic
from .chat import Chat
from .db import db
from .user import User
__all__ = ("db", "Chat", "User")
|
""""Test adapter specific config options."""
from pprint import pprint
from tests.integration.base import DBTIntegrationTest, use_profile
import textwrap
import yaml
class TestBigqueryAdapterSpecific(DBTIntegrationTest):
@property
def schema(self):
return "bigquery_test"
@property
def models(self):
return "adapter-specific-models"
@property
def profile_config(self):
return self.bigquery_profile()
@property
def project_config(self):
return yaml.safe_load(textwrap.dedent('''\
config-version: 2
models:
test:
materialized: table
expiring_table:
hours_to_expiration: 4
'''))
@use_profile('bigquery')
def test_bigquery_hours_to_expiration(self):
_, stdout = self.run_dbt_and_capture(['--debug', 'run'])
self.assertIn(
'expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL '
'4 hour)', stdout)
|
__authors__ = ['Chick3nputer', 'Supersam654']
from itertools import islice, product
import string
import hashlib
import multiprocessing
from multiprocessing import Process
from random import shuffle
from sys import argv
chars = "0123456789abcdef"
def generate_strings(size):
alphabet = list(chars * size)
while True:
shuffle(alphabet)
for i in range(0, len(alphabet), size):
yield ''.join(alphabet[i: i + size])
def tsum(hexhash):
return sum(int(hexhash[i: i + 2], 16) for i in range(0, len(hexhash), 2))
def edit_distance(h1, h2):
xor = int(h1, 16) ^ int(h2, 16)
return bin(xor)[2:].count('1')
def work():
# Start both not at 0 and 128 to avoid a lot of startup noise.
max_ones = 109
min_ones = 19
rand_length = 32
i = 0
for combo in generate_strings(rand_length):
i += 1
if i % 100000000 == 0:
print "Processed %d hashes." % i
clear = combo
hashhex = hashlib.md5(clear).hexdigest()
ones_count = bin(int(hashhex, 16))[2:].count('1')
if ones_count > max_ones:
plain = hashhex + ':' + clear
max_ones = ones_count
print "New BITMAX Hash Found %s = %s" % (plain, max_ones)
elif ones_count < min_ones:
plain = hashhex + ':' + clear
min_ones = ones_count
print "New BITMIN Hash Found %s = %s" % (plain, min_ones)
if hashhex.startswith('ffffffffffffff'):
print "New MAX Hash Found %s:%s" % (hashhex, clear)
elif hashhex.startswith('00000000000000'):
print "New MIN Hash Found %s:%s" % (hashhex, clear)
tsumhex = tsum(hashhex)
if tsumhex < 190:
print "New TMIN Hash Found %s:%s" % (hashhex, clear)
elif tsumhex > 3909:
print "New TMAX Hash Found %s:%s" % (hashhex, clear)
base_distance = edit_distance(hashhex, '0123456789abcdeffedcba9876543210')
if base_distance < 20:
print "New BASE Hash Found %s:%s" % (hashhex, clear)
fp_distance = edit_distance(clear, hashhex)
if fp_distance < 26:
print "New FP Hash Found %s:%s" % (hashhex, clear)
if __name__ == '__main__':
count = multiprocessing.cpu_count()
for i in range(0, count):
p = Process(target=work)
p.start()
print "Starting worker %s" % (i+1)
|
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for Lale operators including individual operators, pipelines, and operator choice.
This module declares several functions for constructing individual
operators, pipelines, and operator choices.
- Functions `make_pipeline`_ and `Pipeline`_ compose linear sequential
pipelines, where each step has an edge to the next step. Instead of
these functions you can also use the `>>` combinator.
- Functions `make_union_no_concat`_ and `make_union`_ compose
pipelines that operate over the same data without edges between
their steps. Instead of these functions you can also use the `&`
combinator.
- Function `make_choice` creates an operator choice. Instead of this
function you can also use the `|` combinator.
- Function `make_pipeline_graph`_ creates a pipeline from
steps and edges, thus supporting any arbitrary acyclic directed
graph topology.
- Function `make_operator`_ creates an individual Lale operator from a
schema and an implementation class or object. This is called for each
of the operators in module lale.lib when it is being imported.
- Functions `get_available_operators`_, `get_available_estimators`_,
and `get_available_transformers`_ return lists of individual
operators previously registered by `make_operator`.
.. _make_operator: lale.operators.html#lale.operators.make_operator
.. _get_available_operators: lale.operators.html#lale.operators.get_available_operators
.. _get_available_estimators: lale.operators.html#lale.operators.get_available_estimators
.. _get_available_transformers: lale.operators.html#lale.operators.get_available_transformers
.. _make_pipeline_graph: lale.operators.html#lale.operators.make_pipeline_graph
.. _make_pipeline: lale.operators.html#lale.operators.make_pipeline
.. _Pipeline: Lale.Operators.Html#Lale.Operators.Pipeline
.. _make_union_no_concat: lale.operators.html#lale.operators.make_union_no_concat
.. _make_union: lale.operators.html#lale.operators.make_union
.. _make_choice: lale.operators.html#lale.operators.make_choice
The root of the hierarchy is the abstract class Operator_, all other
Lale operators inherit from this class, either directly or indirectly.
- The abstract classes Operator_, PlannedOperator_,
TrainableOperator_, and TrainedOperator_ correspond to lifecycle
states.
- The concrete classes IndividualOp_, PlannedIndividualOp_,
TrainableIndividualOp_, and TrainedIndividualOp_ inherit from the
corresponding abstract operator classes and encapsulate
implementations of individual operators from machine-learning
libraries such as scikit-learn.
- The concrete classes BasePipeline_, PlannedPipeline_,
TrainablePipeline_, and TrainedPipeline_ inherit from the
corresponding abstract operator classes and represent directed
acyclic graphs of operators. The steps of a pipeline can be any
operators, including individual operators, other pipelines, or
operator choices, whose lifecycle state is at least that of the
pipeline.
- The concrete class OperatorChoice_ represents a planned operator
that offers a choice for automated algorithm selection. The steps of
a choice can be any planned operators, including individual
operators, pipelines, or other operator choices.
The following picture illustrates the core operator class hierarchy.
.. image:: ../../docs/img/operator_classes.png
:alt: operators class hierarchy
.. _BasePipeline: lale.operators.html#lale.operators.BasePipeline
.. _IndividualOp: lale.operators.html#lale.operators.IndividualOp
.. _Operator: lale.operators.html#lale.operators.Operator
.. _OperatorChoice: lale.operators.html#lale.operators.OperatorChoice
.. _PlannedIndividualOp: lale.operators.html#lale.operators.PlannedIndividualOp
.. _PlannedOperator: lale.operators.html#lale.operators.PlannedOperator
.. _PlannedPipeline: lale.operators.html#lale.operators.PlannedPipeline
.. _TrainableIndividualOp: lale.operators.html#lale.operators.TrainableIndividualOp
.. _TrainableOperator: lale.operators.html#lale.operators.TrainableOperator
.. _TrainablePipeline: lale.operators.html#lale.operators.TrainablePipeline
.. _TrainedIndividualOp: lale.operators.html#lale.operators.TrainedIndividualOp
.. _TrainedOperator: lale.operators.html#lale.operators.TrainedOperator
.. _TrainedPipeline: lale.operators.html#lale.operators.TrainedPipeline
scikit-learn compatibility:
---------------------------
Lale operators attempt to behave like reasonable sckit-learn operators when possible.
In particular, operators support:
- get_params to return the hyperparameter settings for an operator.
- set_params for updating them (in-place). This is only supported by TrainableIndividualOps and Pipelines.
Note that while set_params is supported for
compatibility, but its use is not encouraged, since it mutates the operator in-place.
Instead, we recommend using with_params, a functional alternative that is supported by all
operators. It returns a new operator with updated parameters.
- sklearn.base.clone works for Lale operators, cloning them as expected.
Note that cloning a TrainedOperator will return a TrainableOperator, since
the cloned version does not have the result of training.
There also some known differences (that we are not currently planning on changing):
- Lale operators do not inherit from any sklearn base class.
- The Operator class constructors do not explicitly declare their set of hyperparameters.
However, the do implement get_params, (just not using sklearn style reflection).
There may also be other incompatibilities: our testing currently focuses on ensuring that clone works.
parameter path format:
^^^^^^^^^^^^^^^^^^^^^^
scikit-learn uses a simple addressing scheme to refer to nested hyperparameter: `name__param` refers to the
`param` hyperparameter nested under the `name` object.
Since lale supports richer structures, we conservatively extend this scheme as follows:
* `__` : separates nested components (as-in sklearn).
* `?` : is the discriminant (choice made) for a choice.
* `?` : is also a prefix for the nested parts of the chosen branch.
* `x@n` : In a pipeline, if multiple components have identical names,
everything but the first are suffixed with a number (starting with 1)
indicating which one we are talking about.
For example, given `(x >> y >> x)`, we would treat this much the same as
`(x >> y >> x@1)`.
* `$` : is used in the rare case that sklearn would expect the key of an object,
but we allow (and have) a non-object schema. In that case, $ is used as the key.
This should only happen at the top level, since nested occurrences should be removed.
* `#` : is a structure indicator, and the value should be one of 'list', 'tuple', or 'dict'.
* `n` : is used to represent the nth component in an array or tuple.
"""
import copy
import enum as enumeration
import importlib
import inspect
import itertools
import logging
import os
import shutil
import warnings
from abc import abstractmethod
from types import MappingProxyType
from typing import (
AbstractSet,
Any,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Set,
Text,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import jsonschema
import pandas as pd
import sklearn.base
from sklearn.pipeline import if_delegate_has_method
import lale.datasets.data_schemas
import lale.helpers
import lale.json_operator
import lale.pretty_print
import lale.type_checking
from lale import schema2enums as enum_gen
from lale.helpers import (
are_hyperparameters_equal,
get_name_and_index,
is_numeric_structure,
make_degen_indexed_name,
make_indexed_name,
nest_HPparams,
partition_sklearn_choice_params,
partition_sklearn_params,
structure_type_name,
)
from lale.json_operator import JSON_TYPE
from lale.schemas import Schema
from lale.search.PGO import remove_defaults_dict
from lale.util.VisitorMeta import AbstractVisitorMeta
logger = logging.getLogger(__name__)
_LALE_SKL_PIPELINE = "lale.lib.sklearn.pipeline._PipelineImpl"
_combinators_docstrings = """
Methods
-------
step_1 >> step_2 -> PlannedPipeline
Pipe combinator, create two-step pipeline with edge from step_1 to step_2.
If step_1 is a pipeline, create edges from all of its sinks.
If step_2 is a pipeline, create edges to all of its sources.
Parameters
^^^^^^^^^^
step_1 : Operator
The origin of the edge(s).
step_2 : Operator
The destination of the edge(s).
Returns
^^^^^^^
BasePipeline
Pipeline with edge from step_1 to step_2.
step_1 & step_2 -> PlannedPipeline
And combinator, create two-step pipeline without an edge between step_1 and step_2.
Parameters
^^^^^^^^^^
step_1 : Operator
The first step.
step_2 : Operator
The second step.
Returns
^^^^^^^
BasePipeline
Pipeline without any additional edges beyond those already inside of step_1 or step_2.
step_1 | step_2 -> OperatorChoice
Or combinator, create operator choice between step_1 and step_2.
Parameters
^^^^^^^^^^
step_1 : Operator
The first step.
step_2 : Operator
The second step.
Returns
^^^^^^^
OperatorChoice
Algorithmic coice between step_1 or step_2."""
class Operator(metaclass=AbstractVisitorMeta):
"""Abstract base class for all Lale operators.
Pipelines and individual operators extend this."""
_name: str
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_union_no_concat(self, other)
def __rand__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_union_no_concat(other, self)
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_pipeline(self, other)
def __rrshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_pipeline(other, self)
def __or__(self, other: Union[Any, "Operator"]) -> "OperatorChoice":
return make_choice(self, other)
def __ror__(self, other: Union[Any, "Operator"]) -> "OperatorChoice":
return make_choice(other, self)
def name(self) -> str:
"""Get the name of this operator instance."""
return self._name
def _set_name(self, name: str):
"""Set the name of this operator instance."""
self._name = name
def class_name(self) -> str:
"""Fully qualified Python class name of this operator."""
cls = self.__class__
return cls.__module__ + "." + cls.__name__
@abstractmethod
def validate_schema(self, X, y=None):
"""Validate that X and y are valid with respect to the input schema of this operator.
Parameters
----------
X :
Features.
y :
Target class labels or None for unsupervised operators.
Raises
------
ValueError
If X or y are invalid as inputs."""
pass
@abstractmethod
def transform_schema(self, s_X) -> JSON_TYPE:
"""Return the output schema given the input schema.
Parameters
----------
s_X :
Input dataset or schema.
Returns
-------
JSON schema
Schema of the output data given the input data schema."""
pass
@abstractmethod
def input_schema_fit(self) -> JSON_TYPE:
"""Input schema for the fit method."""
pass
def to_json(self) -> JSON_TYPE:
"""Returns the JSON representation of the operator.
Returns
-------
JSON document
JSON representation that describes this operator and is valid with respect to lale.json_operator.SCHEMA.
"""
return lale.json_operator.to_json(self, call_depth=2)
@abstractmethod
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""For scikit-learn compatibility"""
pass
def visualize(self, ipython_display: bool = True):
"""Visualize the operator using graphviz (use in a notebook).
Parameters
----------
ipython_display : bool, default True
If True, proactively ask Jupyter to render the graph.
Otherwise, the graph will only be rendered when visualize()
was called in the last statement in a notebook cell.
Returns
-------
Digraph
Digraph object from the graphviz package.
"""
return lale.helpers.to_graphviz(self, ipython_display, call_depth=2)
def pretty_print(
self,
show_imports: bool = True,
combinators: bool = True,
customize_schema: bool = False,
astype: str = "lale",
ipython_display: Union[bool, str] = False,
):
"""Returns the Python source code representation of the operator.
Parameters
----------
show_imports : bool, default True
Whether to include import statements in the pretty-printed code.
combinators : bool, default True
If True, pretty-print with combinators (`>>`, `|`, `&`). Otherwise, pretty-print with functions (`make_pipeline`, `make_choice`, `make_union`) instead. Always False when astype is 'sklearn'.
customize_schema : bool, default False
If True, then individual operators whose schema differs from the lale.lib version of the operator will be printed with calls to `customize_schema` that reproduce this difference.
astype : union type, default 'lale'
- 'lale'
Use `lale.operators.make_pipeline` and `lale.operators.make_union` when pretty-printing wth functions.
- 'sklearn'
Set combinators to False and use `sklearn.pipeline.make_pipeline` and `sklearn.pipeline.make_union` for pretty-printed functions.
ipython_display : union type, default False
- False
Return the pretty-printed code as a plain old Python string.
- True:
Pretty-print in notebook cell output with syntax highlighting.
- 'input'
Create a new notebook cell with pretty-printed code as input.
Returns
-------
str or None
If called with ipython_display=False, return pretty-printed Python source code as a Python string.
"""
result = lale.pretty_print.to_string(
self, show_imports, combinators, customize_schema, astype, call_depth=2
)
if ipython_display is False:
return result
elif ipython_display == "input":
import IPython.core
ipython = IPython.core.getipython.get_ipython()
comment = "# generated by pretty_print(ipython_display='input') from previous cell\n"
ipython.set_next_input(comment + result, replace=False)
else:
assert ipython_display in [True, "output"]
import IPython.display
markdown = IPython.display.Markdown(f"```python\n{result}\n```")
return IPython.display.display(markdown)
@abstractmethod
def _has_same_impl(self, other: "Operator") -> bool:
"""Checks if the type of the operator implementations are compatible"""
pass
@abstractmethod
def is_supervised(self) -> bool:
"""Checks if this operator needs labeled data for learning.
Returns
-------
bool
True if the fit method requires a y argument.
"""
pass
@abstractmethod
def is_classifier(self) -> bool:
"""Checks if this operator is a clasifier.
Returns
-------
bool
True if the classifier tag is set.
"""
pass
def is_frozen_trainable(self) -> bool:
"""Return true if all hyperparameters are bound, in other words,
search spaces contain no free hyperparameters to be tuned.
"""
return False
def is_frozen_trained(self) -> bool:
"""Return true if all learnable coefficients are bound, in other
words, there are no free parameters to be learned by fit.
"""
return False
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
return None
@property
def _final_estimator(self) -> Any:
op: Optional[IndividualOp] = self._final_individual_op
model = None
if op is not None:
# if fit was called, we want to use trained result
# even if the code uses the original operrator
# since sklearn assumes that fit mutates the operator
if hasattr(op, "_trained"):
tr_op: Any = op._trained
assert isinstance(tr_op, TrainedIndividualOp)
op = tr_op
if hasattr(op, "_impl"):
impl = op._impl_instance()
if hasattr(impl, "_wrapped_model"):
model = impl._wrapped_model
else:
model = impl
return "passthrough" if model is None else model
@property
def classes_(self):
return self._final_estimator.classes_
@property
def n_classes_(self):
return self._final_estimator.n_classes_
@property
def _get_tags(self):
return self._final_estimator._get_tags
@property
def coef_(self):
return self._final_estimator.coef_
@property
def feature_importances_(self):
return self._final_estimator.feature_importances_
def get_param_ranges(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
Warning: ignores side constraints and unions."""
op: Optional[IndividualOp] = self._final_individual_op
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
else:
return op.get_param_ranges()
def get_param_dist(self, size=10) -> Dict[str, List[Any]]:
"""Returns a dictionary for discretized hyperparameters.
Each entry is a list of values. For continuous hyperparameters,
it returns up to `size` uniformly distributed values.
Warning: ignores side constraints, unions, and distributions."""
op: Optional[IndividualOp] = self._final_individual_op
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
else:
return op.get_param_dist(size=size)
# should this be abstract? what do we do for grammars?
def get_defaults(self) -> Mapping[str, Any]:
return {}
def clone(self) -> "Operator":
"""Return a copy of this operator, with the same hyper-parameters but without training data
This behaves the same as calling sklearn.base.clone(self)
"""
from sklearn.base import clone
cp = clone(self)
return cp
def with_params(self, **impl_params) -> "Operator":
"""This implements a functional version of set_params
which returns a new operator instead of modifying the original
"""
return self._with_params(False, **impl_params)
@abstractmethod
def _with_params(self, try_mutate: bool, **impl_params) -> "Operator":
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place,
although this may not always be possible
"""
pass
def to_lale(self):
"""This is a deprecated method for backward compatibility and will be removed soon"""
warnings.warn(
"Operator.to_lale exists for backwards compatibility with make_sklearn_compat and will be removed soon",
DeprecationWarning,
)
return self
def __getattr__(self, name: str) -> Any:
predict_methods = [
"get_pipeline",
"summary",
"transform",
"predict",
"predict_proba",
"decision_function",
"score",
"score_samples",
"predict_log_proba",
]
if name in predict_methods:
if isinstance(self, TrainedIndividualOp) or (
isinstance(self, TrainableIndividualOp) and hasattr(self, "_trained")
):
raise AttributeError(
f"The underlying operator implementation class does not define {name}"
)
elif isinstance(self, TrainableIndividualOp) and not hasattr(
self, "_trained"
):
raise AttributeError(
f"{self.name()} is not trained. Note that in lale, the result of fit is a new trained operator that should be used with {name}."
)
elif isinstance(self, PlannedOperator) and not isinstance(
self, TrainableOperator
):
pass # as the plannedOperators are handled in a separate block next
else:
raise AttributeError(
f"Calling {name} on a {type(self)} is deprecated. It needs to be trained by calling fit. Note that in lale, the result of fit is a new TrainedOperator that should be used with {name}."
)
if name == "fit" or name in predict_methods:
def get_error_msg(op, i):
if isinstance(op, OperatorChoice):
error_msg = f"""[A.{i}] Please remove the operator choice `|` from `{op.name()}` and keep only one of those operators.\n"""
elif isinstance(op, PlannedIndividualOp) and not isinstance(
op, TrainableIndividualOp
):
error_msg = f"[A.{i}] Please use `{op.name()}()` instead of `{op.name()}.`\n"
else:
return ""
return error_msg
def add_error_msg_for_predict_methods(op, error_msg):
if name in [
"get_pipeline",
"summary",
"transform",
"predict",
"predict_proba",
"decision_function",
"score",
"score_samples",
"predict_log_proba",
]:
error_msg = (
error_msg
+ """\nAfter applying the suggested fixes the operator might need to be trained by calling fit ."""
)
return error_msg
# This method is called only when `name` is not found on the object, so
# we don't need to account for the case when self is trainable or trained.
if isinstance(self, PlannedIndividualOp):
error_msg = f"""Please use `{self.name()}()` instead of `{self.name()}` to make it trainable.
Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the operator to use Hyperopt for
`max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`."""
error_msg = add_error_msg_for_predict_methods(self, error_msg)
raise AttributeError(error_msg)
elif isinstance(self, PlannedPipeline) or isinstance(self, OperatorChoice):
error_msg = f"""The pipeline is not trainable, which means you can not call {name} on it.\n
Suggested fixes:\nFix [A]: You can make the following changes in the pipeline in order to make it trainable:\n"""
i = 1
if isinstance(self, PlannedPipeline):
for step in self.steps():
step_err = get_error_msg(step, i)
if step_err != "":
error_msg = error_msg + step_err
i += 1
elif isinstance(self, OperatorChoice):
error_msg = error_msg + get_error_msg(self, i)
error_msg = (
error_msg
+ """\nFix [B]: Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the pipeline
to use Hyperopt for `max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`."""
)
error_msg = add_error_msg_for_predict_methods(self, error_msg)
raise AttributeError(error_msg)
raise AttributeError()
Operator.__doc__ = cast(str, Operator.__doc__) + "\n" + _combinators_docstrings
class PlannedOperator(Operator):
"""Abstract class for Lale operators in the planned lifecycle state."""
def auto_configure(
self, X, y=None, optimizer=None, cv=None, scoring=None, **kwargs
) -> "TrainedOperator":
"""
Perform combined algorithm selection and hyperparameter tuning on this planned operator.
Parameters
----------
X:
Features that conform to the X property of input_schema_fit.
y: optional
Labels that conform to the y property of input_schema_fit.
Default is None.
optimizer:
lale.lib.lale.Hyperopt or lale.lib.lale.GridSearchCV
default is None.
cv:
cross-validation option that is valid for the optimizer.
Default is None, which will use the optimizer's default value.
scoring:
scoring option that is valid for the optimizer.
Default is None, which will use the optimizer's default value.
kwargs:
Other keyword arguments to be passed to the optimizer.
Returns
-------
TrainableOperator
Best operator discovered by the optimizer.
"""
if optimizer is None:
raise ValueError("Please provide a valid optimizer for auto_configure.")
if kwargs is None:
kwargs = {}
if cv is not None:
kwargs["cv"] = cv
if scoring is not None:
kwargs["scoring"] = scoring
optimizer_obj = optimizer(estimator=self, **kwargs)
trained = optimizer_obj.fit(X, y)
return trained.get_pipeline()
PlannedOperator.__doc__ = (
cast(str, PlannedOperator.__doc__) + "\n" + _combinators_docstrings
)
class TrainableOperator(PlannedOperator):
"""Abstract class for Lale operators in the trainable lifecycle state."""
@overload
def __and__(self, other: "TrainedOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __and__(self, other): # type: ignore
return make_union_no_concat(self, other)
@overload
def __rshift__(self, other: "TrainedOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __rshift__(self, other): # type: ignore
return make_pipeline(self, other)
@abstractmethod
def fit(self, X, y=None, **fit_params) -> "TrainedOperator":
"""Train the learnable coefficients of this operator, if any.
Return a trained version of this operator. If this operator
has free learnable coefficients, bind them to values that fit
the data according to the operator's algorithm. Do nothing if
the operator implementation lacks a `fit` method or if the
operator has been marked as `is_frozen_trained`.
Parameters
----------
X:
Features that conform to the X property of input_schema_fit.
y: optional
Labels that conform to the y property of input_schema_fit.
Default is None.
fit_params: Dictionary, optional
A dictionary of keyword parameters to be used during training.
Returns
-------
TrainedOperator
A new copy of this operators that is the same except that its
learnable coefficients are bound to their trained values.
"""
pass
@abstractmethod
def freeze_trainable(self) -> "TrainableOperator":
"""Return a copy of the trainable parts of this operator that is the same except
that all hyperparameters are bound and none are free to be tuned.
If there is an operator choice, it is kept as is.
"""
pass
@abstractmethod
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
pass
TrainableOperator.__doc__ = (
cast(str, TrainableOperator.__doc__) + "\n" + _combinators_docstrings
)
class TrainedOperator(TrainableOperator):
"""Abstract class for Lale operators in the trained lifecycle state."""
@overload
def __and__(self, other: "TrainedOperator") -> "TrainedPipeline":
...
@overload
def __and__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __and__(self, other): # type: ignore
return make_union_no_concat(self, other)
@overload
def __rshift__(self, other: "TrainedOperator") -> "TrainedPipeline":
...
@overload
def __rshift__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __rshift__(self, other): # type: ignore
return make_pipeline(self, other)
@abstractmethod
def transform(self, X, y=None) -> Any:
"""Transform the data.
Parameters
----------
X :
Features; see input_transform schema of the operator.
Returns
-------
result :
Transformed features; see output_transform schema of the operator.
"""
pass
@abstractmethod
def _predict(self, X) -> Any:
pass
@abstractmethod
def predict(self, X, **predict_params) -> Any:
"""Make predictions.
Parameters
----------
X :
Features; see input_predict schema of the operator.
Returns
-------
result :
Predictions; see output_predict schema of the operator.
"""
pass
@abstractmethod
def predict_proba(self, X):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
pass
@abstractmethod
def decision_function(self, X):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
pass
@abstractmethod
def score_samples(self, X):
"""Scores for each sample in X. The type of scores depends on the operator.
Parameters
----------
X :
Features.
Returns
-------
result :
scores per sample.
"""
pass
@abstractmethod
def score(self, X, y, **score_params):
"""Performance evaluation with a default metric.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the underlying operator.
Returns
-------
score :
performance metric value
"""
pass
@abstractmethod
def predict_log_proba(self, X):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
pass
@abstractmethod
def freeze_trained(self) -> "TrainedOperator":
"""Return a copy of this trainable operator that is the same except
that all learnable coefficients are bound and thus fit is a no-op.
"""
pass
TrainedOperator.__doc__ = (
cast(str, TrainedOperator.__doc__) + "\n" + _combinators_docstrings
)
_schema_derived_attributes = ["_enum_attributes", "_hyperparam_defaults"]
class _DictionaryObjectForEnum:
_d: Dict[str, enumeration.Enum]
def __init__(self, d: Dict[str, enumeration.Enum]):
self._d = d
def __contains__(self, key: str) -> bool:
return key in self._d
# This method in fact always return an enumeration
# however, the values of the enumeration are not known, which causes
# the type checker to complain about a common (and desired) idiom
# such as, e.g. LogisticRegression.enum.solver.saga
# so we weaken the type to Any for pragmatic reasons
def __getattr__(self, key: str) -> Any: # enumeration.Enum:
if key in self._d:
return self._d[key]
else:
raise AttributeError("No enumeration found for hyper-parameter: " + key)
# This method in fact always return an enumeration
# however, the values of the enumeration are not known, which causes
# the type checker to complain about a common (and desired) idiom
# such as, e.g. LogisticRegression.enum.solver.saga
# so we weaken the type to Any for pragmatic reasons
def __getitem__(self, key: str) -> Any: # enumeration.Enum:
if key in self._d:
return self._d[key]
else:
raise KeyError("No enumeration found for hyper-parameter: " + key)
class _WithoutGetParams(object):
"""This is a wrapper class whose job is to *NOT* have a get_params method,
causing sklearn clone to call deepcopy on it (and its contents).
This is currently used, for example, to wrap the impl class instance
returned by an individual operator's get_params (since the class itself may have
a get_params method defined, causing problems if this wrapper is not used).
"""
@classmethod
def unwrap(cls, obj):
while isinstance(obj, _WithoutGetParams):
obj = obj.klass
return obj
@classmethod
def wrap(cls, obj):
if isinstance(obj, _WithoutGetParams):
return obj
else:
return _WithoutGetParams(obj)
klass: type
def __init__(self, klass: type):
self.klass = klass
class IndividualOp(Operator):
"""
This is a concrete class that can instantiate a new individual
operator and provide access to its metadata.
The enum property can be used to access enumerations for hyper-parameters,
auto-generated from the operator's schema.
For example, `LinearRegression.enum.solver.saga`
As a short-hand, if the hyper-parameter name does not conflict with
any fields of this class, the auto-generated enums can also be accessed
directly.
For example, `LinearRegression.solver.saga`"""
_impl: Any
_impl_class_: Union[type, _WithoutGetParams]
_hyperparams: Optional[Dict[str, Any]]
_frozen_hyperparams: Optional[List[str]]
# this attribute may not be defined
_hyperparam_defaults: Mapping[str, Any]
def __init__(
self,
_lale_name: str,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
**hp,
) -> None:
"""Create a new IndividualOp.
Parameters
----------
name : String
Name of the operator.
impl :
An instance of operator implementation class. This is a class that
contains fit, predict/transform methods implementing an underlying
algorithm.
schemas : dict
This is a dictionary of json schemas for the operator.
"""
self._name = _lale_name
self._enum_attributes = None
if _lale_schemas:
self._schemas = _lale_schemas
else:
self._schemas = lale.type_checking.get_default_schema(_lale_impl)
# if we are given a class instance, we need to preserve it
# so that get_params can return the same exact one that we got
# this is important for scikit-learn's clone to work correctly
unwrapped = _WithoutGetParams.unwrap(_lale_impl)
self._impl = unwrapped
if inspect.isclass(unwrapped):
self._impl_class_ = _lale_impl
else:
self._impl_class_ = unwrapped.__class__
self._frozen_hyperparams = _lale_frozen_hyperparameters
self._hyperparams = hp
def _is_instantiated(self):
return not inspect.isclass(self._impl)
def _check_schemas(self):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
lale.type_checking.validate_is_schema(self._schemas)
from lale.pretty_print import json_to_string
assert (
self.has_tag("transformer") == self.is_transformer()
), f"{self.class_name()}: {json_to_string(self._schemas)}"
assert self.has_tag("estimator") == self.has_method(
"predict"
), f"{self.class_name()}: {json_to_string(self._schemas)}"
if self.has_tag("classifier") or self.has_tag("regressor"):
assert self.has_tag(
"estimator"
), f"{self.class_name()}: {json_to_string(self._schemas)}"
# Add enums from the hyperparameter schema to the object as fields
# so that their usage looks like LogisticRegression.penalty.l1
# enum_gen.addSchemaEnumsAsFields(self, self.hyperparam_schema())
_enum_attributes: Optional[_DictionaryObjectForEnum]
@classmethod
def _add_nested_params(cls, output: Dict[str, Any], k: str, v: Any):
nested_params = cls._get_nested_params(v)
if nested_params:
output.update(nest_HPparams(k, nested_params))
@classmethod
def _get_nested_params(cls, v: Any) -> Optional[Dict[str, Any]]:
# TODO: design question. This seems like the right thing,
# but sklearn does not currently do this, as is apparent with,
# e.g VotingClassifier
# if isinstance(v, list) or isinstance(v, tuple):
# output: Dict[str, Any] = {}
# for i, elem in enumerate(v):
# nested = cls._get_nested_params(elem)
# if nested:
# output.update(nest_HPparams(str(i)), nested)
# return output
# elif isinstance(v, dict):
# output: Dict[str, Any] = {}
# for sub_k, sub_v in v.items():
# nested = cls._get_nested_params(sub_v)
# if nested:
# output.update(nest_HPparams(sub_k), nested)
# return output
# else:
try:
return v.get_params(deep=True)
except AttributeError:
return None
def _get_params_all(self, deep: bool = False) -> Dict[str, Any]:
output: Dict[str, Any] = {}
hps = self.hyperparams_all()
if hps is not None:
output.update(hps)
defaults = self.get_defaults()
for k in defaults.keys():
if k not in output:
output[k] = defaults[k]
if deep:
deep_stuff: Dict[str, Any] = {}
for k, v in output.items():
self._add_nested_params(deep_stuff, k, v)
output.update(deep_stuff)
return output
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""Get parameters for this operator.
This method follows scikit-learn's convention that all operators
have a constructor which takes a list of keyword arguments.
This is not required for operator impls which do not desire
scikit-compatibility.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this operator state wrapper and
its impl object
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out: Dict[str, Any] = dict()
out["_lale_name"] = self._name
out["_lale_schemas"] = self._schemas
out["_lale_impl"] = _WithoutGetParams.wrap(self._wrapped_impl_class())
# we need to stringify the class object, since the class object
# has a get_params method (the instance method), which causes problems for
# sklearn clone
if self._is_instantiated():
impl = self._impl_instance()
if hasattr(impl, "get_params"):
out.update(impl.get_params(deep=deep))
elif hasattr(impl, "_wrapped_model") and hasattr(
impl._wrapped_model, "get_params"
):
out.update(impl._wrapped_model.get_params(deep=deep))
else:
out.update(self._get_params_all(deep=deep))
else:
out.update(self._get_params_all(deep=deep))
if self.frozen_hyperparams() is not None:
out["_lale_frozen_hyperparameters"] = self.frozen_hyperparams()
return out
def _with_params(self, try_mutate: bool, **impl_params) -> "IndividualOp":
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
hyper = self.hyperparams()
# we set the sub params first
for sub_key, sub_params in partitioned_sub_params.items():
with_structured_params(try_mutate, sub_key, sub_params, hyper)
# we have now updated any nested operators
# (if this is a higher order operator)
# and can work on the main operator
all_params = {**hyper, **main_params}
filtered_impl_params = _fixup_hyperparams_dict(all_params)
# These are used by lale. Since they are returned by get_param
# they may show up here (if the user calls get_param, changes
# a values, and then calls set_param), so we remove them here
filtered_impl_params.pop("_lale_name", None)
filtered_impl_params.pop("_lale_impl", None)
filtered_impl_params.pop("_lale_schemas", None)
filtered_impl_params.pop("_lale_frozen_hyperparameters", None)
return self._with_op_params(try_mutate, **filtered_impl_params)
def _with_op_params(
self, try_mutate: bool, **impl_params
) -> "TrainableIndividualOp":
# for an individual (and planned individual) operator,
# we don't mutate the operator itself even if try_mutate is True
res = self._configure(**impl_params)
return res
# we have different views on the hyperparameters
def hyperparams_all(self) -> Optional[Dict[str, Any]]:
"""This is the hyperparameters that are currently set.
Some of them may not have been set explicitly
(e.g. if this is a clone of an operator,
some of these may be defaults.
To get the hyperparameters that were actually set,
use :meth:`hyperparams`
"""
return getattr(self, "_hyperparams", None)
def frozen_hyperparams(self) -> Optional[List[str]]:
return getattr(self, "_frozen_hyperparams", None)
def _hyperparams_helper(self) -> Optional[Dict[str, Any]]:
actuals = self.hyperparams_all()
if actuals is None:
return None
frozen_params = self.frozen_hyperparams()
if frozen_params is None:
return None
params = {k: actuals[k] for k in frozen_params}
return params
def hyperparams(self) -> Dict[str, Any]:
params = self._hyperparams_helper()
if params is None:
return {}
else:
return params
def reduced_hyperparams(self):
actuals = self._hyperparams_helper()
if actuals is None:
return None
defaults = self.get_defaults()
actuals_minus_defaults = {
k: actuals[k]
for k in actuals
if k not in defaults
or not are_hyperparameters_equal(actuals[k], defaults[k])
}
if not hasattr(self, "_hyperparam_positionals"):
sig = inspect.signature(self._impl_class().__init__)
positionals = {
name: defaults[name]
for name, param in sig.parameters.items()
if name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and param.default == inspect.Parameter.empty
}
self._hyperparam_positionals = positionals
result = {**self._hyperparam_positionals, **actuals_minus_defaults}
return result
def _configure(self, *args, **kwargs) -> "TrainableIndividualOp":
class_ = self._impl_class()
hyperparams = {}
for arg in args:
k, v = self._enum_to_strings(arg)
hyperparams[k] = v
for k, v in _fixup_hyperparams_dict(kwargs).items():
if k in hyperparams:
raise ValueError("Duplicate argument {}.".format(k))
v = lale.helpers.val_wrapper.unwrap(v)
if isinstance(v, enumeration.Enum):
k2, v2 = self._enum_to_strings(v)
if k != k2:
raise ValueError(
"Invalid keyword {} for argument {}.".format(k2, v2)
)
else:
v2 = v
hyperparams[k] = v2
frozen_hyperparams = list(hyperparams.keys())
# using params_all instead of hyperparams to ensure the construction is consistent with schema
trainable_to_get_params = TrainableIndividualOp(
_lale_name=self.name(),
_lale_impl=class_,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
**hyperparams,
)
# TODO: improve this code
params_all = trainable_to_get_params._get_params_all()
self._validate_hyperparams(
hyperparams, params_all, self.hyperparam_schema(), class_
)
# TODO: delay creating the impl here
if len(params_all) == 0:
impl = class_()
else:
impl = class_(**params_all)
if self._should_configure_trained(impl):
result: TrainableIndividualOp = TrainedIndividualOp(
_lale_name=self.name(),
_lale_impl=impl,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
_lale_trained=True,
**hyperparams,
)
else:
result = TrainableIndividualOp(
_lale_name=self.name(),
_lale_impl=impl,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
**hyperparams,
)
return result
@property
def enum(self) -> _DictionaryObjectForEnum:
ea = getattr(self, "_enum_attributes", None)
if ea is None:
nea = enum_gen.schemaToPythonEnums(self.hyperparam_schema())
doe = _DictionaryObjectForEnum(nea)
self._enum_attributes = doe
return doe
else:
return ea
def _invalidate_enum_attributes(self) -> None:
for k in _schema_derived_attributes:
try:
delattr(self, k)
except AttributeError:
pass
def __getattr__(self, name: str) -> Any:
if name in _schema_derived_attributes or name in ["__setstate__", "_schemas"]:
raise AttributeError
if name == "_estimator_type":
if self.is_classifier():
return "classifier" # satisfy sklearn.base.is_classifier(op)
elif self.is_regressor():
return "regressor" # satisfy sklearn.base.is_regressor(op)
return super().__getattr__(name)
def __getstate__(self):
state = self.__dict__.copy()
# Remove entries that can't be pickled
for k in _schema_derived_attributes:
state.pop(k, None)
return state
def get_schema(self, schema_kind: str) -> Dict[str, Any]:
"""Return a schema of the operator.
Parameters
----------
schema_kind : string, 'hyperparams' or 'input_fit' or 'input_transform' or 'input_predict' or 'input_predict_proba' or 'input_decision_function' or 'output_transform' or 'output_predict' or 'output_predict_proba' or 'output_decision_function'
Type of the schema to be returned.
Returns
-------
dict
The python object containing the json schema of the operator.
For all the schemas currently present, this would be a dictionary.
"""
props = self._schemas["properties"]
assert (
schema_kind in props
), f"missing schema {schema_kind} for operator {self.name()} with class {self.class_name()}"
result = props[schema_kind]
return result
def has_schema(self, schema_kind: str) -> bool:
"""Return true if the operator has the schema kind.
Parameters
----------
schema_kind : string, 'hyperparams' or 'input_fit' or 'input_transform' or 'input_predict' or 'input_predict_proba' or 'input_decision_function' or 'output_transform' or 'output_predict' or 'output_predict_proba' or 'output_decision_function' or 'input_score_samples' or 'output_score_samples'
Type of the schema to be returned.
Returns
-------
True if the json schema is present, False otherwise.
"""
props = self._schemas["properties"]
return schema_kind in props
def documentation_url(self):
if "documentation_url" in self._schemas:
return self._schemas["documentation_url"]
return None
def get_tags(self) -> Dict[str, List[str]]:
"""Return the tags of an operator.
Returns
-------
list
A list of tags describing the operator.
"""
return self._schemas.get("tags", {})
def has_tag(self, tag: str) -> bool:
"""Check the presence of a tag for an operator.
Parameters
----------
tag : string
Returns
-------
boolean
Flag indicating the presence or absence of the given tag
in this operator's schemas.
"""
tags = [t for ll in self.get_tags().values() for t in ll]
return tag in tags
def input_schema_fit(self) -> JSON_TYPE:
"""Input schema for the fit method."""
return self.get_schema("input_fit")
def input_schema_transform(self) -> JSON_TYPE:
"""Input schema for the transform method."""
return self.get_schema("input_transform")
def input_schema_predict(self) -> JSON_TYPE:
"""Input schema for the predict method."""
return self.get_schema("input_predict")
def input_schema_predict_proba(self) -> JSON_TYPE:
"""Input schema for the predict_proba method."""
return self.get_schema("input_predict_proba")
def input_schema_predict_log_proba(self) -> JSON_TYPE:
"""Input schema for the predict_log_proba method.
We assume that it is the same as the predict_proba method if none has been defined explicitly."""
if self.has_schema("input_predict_log_proba"):
return self.get_schema("input_predict_log_proba")
else:
return self.get_schema("input_predict_proba")
def input_schema_decision_function(self) -> JSON_TYPE:
"""Input schema for the decision_function method."""
return self.get_schema("input_decision_function")
def input_schema_score_samples(self) -> JSON_TYPE:
"""Input schema for the score_samples method.
We assume that it is the same as the predict method if none has been defined explicitly."""
if self.has_schema("input_score_samples"):
return self.get_schema("input_score_samples")
else:
return self.get_schema("input_predict")
def output_schema_transform(self) -> JSON_TYPE:
"""Oputput schema for the transform method."""
return self.get_schema("output_transform")
def output_schema_predict(self) -> JSON_TYPE:
"""Output schema for the predict method."""
return self.get_schema("output_predict")
def output_schema_predict_proba(self) -> JSON_TYPE:
"""Output schema for the predict_proba method."""
return self.get_schema("output_predict_proba")
def output_schema_decision_function(self) -> JSON_TYPE:
"""Output schema for the decision_function method."""
return self.get_schema("output_decision_function")
def output_schema_score_samples(self) -> JSON_TYPE:
"""Output schema for the score_samples method.
We assume that it is the same as the predict method if none has been defined explicitly."""
if self.has_schema("output_score_samples"):
return self.get_schema("output_score_samples")
else:
return self.get_schema("output_predict")
def output_schema_predict_log_proba(self) -> JSON_TYPE:
"""Output schema for the predict_log_proba method.
We assume that it is the same as the predict_proba method if none has been defined explicitly."""
if self.has_schema("output_predict_log_proba"):
return self.get_schema("output_predict_log_proba")
else:
return self.get_schema("output_predict_proba")
def hyperparam_schema(self, name: Optional[str] = None) -> JSON_TYPE:
"""Returns the hyperparameter schema for the operator.
Parameters
----------
name : string, optional
Name of the hyperparameter.
Returns
-------
dict
Full hyperparameter schema for this operator or part of the schema
corresponding to the hyperparameter given by parameter `name`.
"""
hp_schema = self.get_schema("hyperparams")
if name is None:
return hp_schema
else:
params = next(iter(hp_schema.get("allOf", [])))
return params.get("properties", {}).get(name)
def get_defaults(self) -> Mapping[str, Any]:
"""Returns the default values of hyperparameters for the operator.
Returns
-------
dict
A dictionary with names of the hyperparamers as keys and
their default values as values.
"""
if not hasattr(self, "_hyperparam_defaults"):
schema = self.hyperparam_schema()
props_container: Dict[str, Any] = next(iter(schema.get("allOf", [])), {})
props: Dict[str, Any] = props_container.get("properties", {})
# since we want to share this, we don't want callers
# to modify the returned dictionary, htereby modifying the defaults
defaults: MappingProxyType[str, Any] = MappingProxyType(
{k: props[k].get("default") for k in props.keys()}
)
self._hyperparam_defaults = defaults
return self._hyperparam_defaults
def get_param_ranges(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
Warning: ignores side constraints and unions."""
hyperparam_obj = next(iter(self.hyperparam_schema().get("allOf", [])))
original = hyperparam_obj.get("properties")
def is_relevant(hp, s):
if "relevantToOptimizer" in hyperparam_obj:
return hp in hyperparam_obj["relevantToOptimizer"]
return True
relevant = {hp: s for hp, s in original.items() if is_relevant(hp, s)}
def pick_one_type(schema):
if "anyOf" in schema:
def by_type(typ):
for s in schema["anyOf"]:
if "type" in s and s["type"] == typ:
if ("forOptimizer" not in s) or s["forOptimizer"]:
return s
return None
s = None
for typ in ["number", "integer", "string"]:
s = by_type(typ)
if s:
return s
if s is None:
for s in schema["anyOf"]:
if "enum" in s:
if ("forOptimizer" not in s) or s["forOptimizer"]:
return s
return schema["anyOf"][0]
return schema
unityped = {hp: pick_one_type(relevant[hp]) for hp in relevant}
def add_default(schema):
if "type" in schema:
minimum, maximum = 0.0, 1.0
if "minimumForOptimizer" in schema:
minimum = schema["minimumForOptimizer"]
elif "minimum" in schema:
minimum = schema["minimum"]
if "maximumForOptimizer" in schema:
maximum = schema["maximumForOptimizer"]
elif "maximum" in schema:
maximum = schema["maximum"]
result = {**schema}
if schema["type"] in ["number", "integer"]:
if "default" not in schema:
schema["default"] = None
if "minimumForOptimizer" not in schema:
result["minimumForOptimizer"] = minimum
if "maximumForOptimizer" not in schema:
result["maximumForOptimizer"] = maximum
return result
elif "enum" in schema:
if "default" in schema:
return schema
return {"default": schema["enum"][0], **schema}
return schema
defaulted = {hp: add_default(unityped[hp]) for hp in unityped}
def get_range(hp, schema):
if "enum" in schema:
default = schema["default"]
non_default = [v for v in schema["enum"] if v != default]
return [*non_default, default]
elif schema["type"] == "boolean":
return (False, True, schema["default"])
else:
def get(schema, key):
return schema[key] if key in schema else None
keys = ["minimumForOptimizer", "maximumForOptimizer", "default"]
return tuple([get(schema, key) for key in keys])
def get_cat_idx(schema):
if "enum" not in schema:
return None
return (0, len(schema["enum"]) - 1, len(schema["enum"]) - 1)
autoai_ranges = {hp: get_range(hp, s) for hp, s in defaulted.items()}
if "min_samples_split" in autoai_ranges and "min_samples_leaf" in autoai_ranges:
if self._name not in (
"_GradientBoostingRegressorImpl",
"_GradientBoostingClassifierImpl",
"_ExtraTreesClassifierImpl",
):
autoai_ranges["min_samples_leaf"] = (1, 5, 1)
autoai_ranges["min_samples_split"] = (2, 5, 2)
autoai_cat_idx = {
hp: get_cat_idx(s) for hp, s in defaulted.items() if "enum" in s
}
return autoai_ranges, autoai_cat_idx
def get_param_dist(self, size=10) -> Dict[str, List[Any]]:
"""Returns a dictionary for discretized hyperparameters.
Each entry is a list of values. For continuous hyperparameters,
it returns up to `size` uniformly distributed values.
Warning: ignores side constraints, unions, and distributions."""
autoai_ranges, autoai_cat_idx = self.get_param_ranges()
def one_dist(key: str) -> List[Any]:
one_range = autoai_ranges[key]
if isinstance(one_range, tuple):
minimum, maximum, default = one_range
if minimum is None:
dist = [default]
elif isinstance(minimum, bool):
if minimum == maximum:
dist = [minimum]
else:
dist = [minimum, maximum]
elif isinstance(minimum, int) and isinstance(maximum, int):
step = float(maximum - minimum) / (size - 1)
fdist = [minimum + i * step for i in range(size)]
dist = list(set([round(f) for f in fdist]))
dist.sort()
elif isinstance(minimum, (int, float)):
# just in case the minimum or maximum is exclusive
epsilon = (maximum - minimum) / (100 * size)
minimum += epsilon
maximum -= epsilon
step = (maximum - minimum) / (size - 1)
dist = [minimum + i * step for i in range(size)]
else:
assert False, f"key {key}, one_range {one_range}"
else:
dist = [*one_range]
return dist
autoai_dists = {k: one_dist(k) for k in autoai_ranges.keys()}
return autoai_dists
def _enum_to_strings(self, arg: "enumeration.Enum") -> Tuple[str, Any]:
"""[summary]
Parameters
----------
arg : [type]
[description]
Raises
------
ValueError
[description]
Returns
-------
[type]
[description]
"""
if not isinstance(arg, enumeration.Enum):
raise ValueError("Missing keyword on argument {}.".format(arg))
return arg.__class__.__name__, arg.value
def _wrapped_impl_class(self):
if not hasattr(self, "_impl_class_"):
if inspect.isclass(self._impl):
self._impl_class_ = self._impl
else:
self._impl_class_ = self._impl.__class__
return self._impl_class_
def _impl_class(self):
return _WithoutGetParams.unwrap(self._wrapped_impl_class())
def _impl_instance(self):
if not self._is_instantiated():
defaults = self.get_defaults()
all_hps = self.hyperparams_all()
if all_hps:
hyperparams = {**defaults, **all_hps}
else:
hyperparams = defaults
class_ = self._impl_class()
try:
instance = class_(
**hyperparams
) # always with default values of hyperparams
except TypeError as e:
logger.debug(
f"Constructor for {class_.__module__}.{class_.__name__} "
f"threw exception {e}"
)
# TODO: Is this really a reasonable fallback?
instance = class_.__new__() # type:ignore
self._impl = instance
return self._impl
@property
def impl(self):
"""Returns the underlying impl. This can be used to access additional
field and methods not exposed by Lale. If only the type of the
impl is needed, please use self.impl_class instead, as it can be more efficient.
"""
return self._impl_instance()
@property
def impl_class(self) -> type:
"""Returns the class of the underlying impl. This should return the same thing
as self.impl.__class__, but can be more efficient.
"""
return self._impl_class()
# This allows the user, for example, to check isinstance(LR().fit(...), LR)
def __instancecheck__(self, other):
if isinstance(other, IndividualOp):
return issubclass(other.impl_class, self.impl_class)
else:
return False
def class_name(self) -> str:
module = None
if self._impl is not None:
module = self._impl.__module__
if module is None or module == str.__class__.__module__: # type: ignore
class_name = self.name()
else:
class_name = module + "." + self._impl_class().__name__
return class_name
def __str__(self) -> str:
return self.name()
# # sklearn calls __repr__ instead of __str__
def __repr__(self):
name = self.name()
return name
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator implementations are compatible"""
if not isinstance(other, IndividualOp):
return False
return self._impl_class() == other._impl_class()
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, None] = None,
tags: Optional[Dict] = None,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "IndividualOp":
return customize_schema(
self, schemas, relevantToOptimizer, constraint, tags, **kwargs
)
def _propose_fixed_hyperparams(
self, key_candidates, hp_all, hp_schema, max_depth=2
):
defaults = self.get_defaults()
explicit_defaults: Dict[str, Any] = {k: defaults[k] for k in key_candidates}
found: bool = False
for depth in range(0, max_depth):
if found:
return None
candidate_replacements: Any = list(
itertools.combinations(explicit_defaults.items(), depth + 1)
)
for replacements in candidate_replacements:
new_values = dict(replacements)
fixed_hp = {**hp_all, **new_values}
try:
lale.type_checking.validate_schema(fixed_hp, hp_schema)
found = True
yield new_values
except jsonschema.ValidationError:
pass
MAX_FIX_DEPTH: int = 2
MAX_FIX_SUGGESTIONS: int = 3
def _validate_hyperparams(self, hp_explicit, hp_all, hp_schema, class_):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
try:
lale.type_checking.validate_schema(hp_all, hp_schema)
except jsonschema.ValidationError as e_orig:
e = e_orig if e_orig.parent is None else e_orig.parent
lale.type_checking.validate_is_schema(e.schema)
schema = lale.pretty_print.to_string(e.schema)
defaults = self.get_defaults()
extra_keys = [k for k in hp_explicit.keys() if k not in defaults]
trimmed_valid: bool = False
if extra_keys:
trimmed_hp_all = {
k: v for k, v in hp_all.items() if k not in extra_keys
}
trimmed_hp_explicit_keys = {
k for k in hp_explicit.keys() if k not in extra_keys
}
remove_recommendation = (
"unknown key "
+ ("s" if len(extra_keys) > 1 else "")
+ ", ".join(("'" + k + "'" for k in extra_keys))
)
try:
lale.type_checking.validate_schema(trimmed_hp_all, hp_schema)
trimmed_valid = True
except jsonschema.ValidationError:
pass
else:
trimmed_hp_all = hp_all
trimmed_hp_explicit_keys = hp_explicit.keys()
remove_recommendation = ""
proposed_fix: str = ""
if trimmed_valid and remove_recommendation:
proposed_fix = "To fix, please remove " + remove_recommendation + "\n"
else:
find_fixed_hyperparam_iter = self._propose_fixed_hyperparams(
trimmed_hp_explicit_keys,
trimmed_hp_all,
hp_schema,
max_depth=self.MAX_FIX_DEPTH,
)
fix_suggestions: List[Dict[str, Any]] = list(
itertools.islice(
find_fixed_hyperparam_iter, self.MAX_FIX_SUGGESTIONS
)
)
if fix_suggestions:
from lale.pretty_print import hyperparams_to_string
if remove_recommendation:
remove_recommendation = (
"remove " + remove_recommendation + " and "
)
proposed_fix = "Some possible fixes include:\n" + "".join(
(
"- "
+ remove_recommendation
+ "set "
+ hyperparams_to_string(d)
+ "\n"
for d in fix_suggestions
)
)
if [*e.schema_path][:3] == ["allOf", 0, "properties"]:
arg = e.schema_path[3]
reason = f"invalid value {arg}={e.instance}"
schema_path = f"argument {arg}"
elif [*e.schema_path][:3] == ["allOf", 0, "additionalProperties"]:
pref, suff = "Additional properties are not allowed (", ")"
assert e.message.startswith(pref) and e.message.endswith(suff)
reason = "argument " + e.message[len(pref) : -len(suff)]
schema_path = "arguments and their defaults"
schema = self.get_defaults()
elif e.schema_path[0] == "allOf" and int(e.schema_path[1]) != 0:
assert e.schema_path[2] == "anyOf"
descr = e.schema["description"]
if descr.endswith("."):
descr = descr[:-1]
reason = f"constraint {descr[0].lower()}{descr[1:]}"
schema_path = "failing constraint"
if self.documentation_url() is not None:
schema = f"{self.documentation_url()}#constraint-{e.schema_path[1]}"
else:
reason = e.message
schema_path = e.schema_path
msg = (
f"Invalid configuration for {self.name()}("
+ f"{lale.pretty_print.hyperparams_to_string(hp_explicit if hp_explicit else {})}) "
+ f"due to {reason}.\n"
+ proposed_fix
+ f"Schema of {schema_path}: {schema}\n"
+ f"Invalid value: {e.instance}"
)
raise jsonschema.ValidationError(msg)
user_validator = getattr(class_, "validate_hyperparams", None)
if user_validator:
user_validator(**hp_all)
def validate_schema(self, X, y=None):
if self.has_method("fit"):
X = self._validate_input_schema("X", X, "fit")
method = "transform" if self.is_transformer() else "predict"
self._validate_input_schema("X", X, method)
if self.is_supervised(default_if_missing=False):
if y is None:
raise ValueError(f"{self.name()}.fit() y cannot be None")
else:
if self.has_method("fit"):
y = self._validate_input_schema("y", y, "fit")
self._validate_input_schema("y", y, method)
def _validate_input_schema(self, arg_name: str, arg, method: str):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return arg
if not lale.helpers.is_empty_dict(arg):
if method == "fit" or method == "partial_fit":
schema = self.input_schema_fit()
elif method == "transform":
schema = self.input_schema_transform()
elif method == "predict":
schema = self.input_schema_predict()
elif method == "predict_proba":
schema = self.input_schema_predict_proba()
elif method == "predict_log_proba":
schema = self.input_schema_predict_log_proba()
elif method == "decision_function":
schema = self.input_schema_decision_function()
elif method == "score_samples":
schema = self.input_schema_score_samples()
else:
raise ValueError(f"Unexpected method argument: {method}")
if "properties" in schema and arg_name in schema["properties"]:
arg = lale.datasets.data_schemas.add_schema(arg)
try:
sup: JSON_TYPE = schema["properties"][arg_name]
lale.type_checking.validate_schema_or_subschema(arg, sup)
except lale.type_checking.SubschemaError as e:
sub_str: str = lale.pretty_print.json_to_string(e.sub)
sup_str: str = lale.pretty_print.json_to_string(e.sup)
raise ValueError(
f"{self.name()}.{method}() invalid {arg_name}, the schema of the actual data is not a subschema of the expected schema of the argument.\nactual_schema = {sub_str}\nexpected_schema = {sup_str}"
)
except Exception as e:
exception_type = f"{type(e).__module__}.{type(e).__name__}"
raise ValueError(
f"{self.name()}.{method}() invalid {arg_name}: {exception_type}: {e}"
) from None
return arg
def _validate_output_schema(self, result, method):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return result
if method == "transform":
schema = self.output_schema_transform()
elif method == "predict":
schema = self.output_schema_predict()
elif method == "predict_proba":
schema = self.output_schema_predict_proba()
elif method == "predict_log_proba":
schema = self.output_schema_predict_log_proba()
elif method == "decision_function":
schema = self.output_schema_decision_function()
elif method == "score_samples":
schema = self.output_schema_score_samples()
else:
raise ValueError(f"Unexpected method argument: {method}")
result = lale.datasets.data_schemas.add_schema(result)
try:
lale.type_checking.validate_schema_or_subschema(result, schema)
except Exception as e:
print(f"{self.name()}.{method}() invalid result: {e}")
raise ValueError(f"{self.name()}.{method}() invalid result: {e}") from e
return result
def transform_schema(self, s_X) -> JSON_TYPE:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
elif self.is_transformer():
return self.output_schema_transform()
elif self.has_method("predict_proba"):
return self.output_schema_predict_proba()
elif self.has_method("decision_function"):
return self.output_schema_decision_function()
else:
return self.output_schema_predict()
def is_supervised(self, default_if_missing=True) -> bool:
if self.has_method("fit"):
schema_fit = self.input_schema_fit()
return lale.type_checking.is_subschema(schema_fit, _is_supervised_schema)
return default_if_missing
def is_classifier(self) -> bool:
return self.has_tag("classifier")
def is_regressor(self) -> bool:
return self.has_tag("regressor")
def has_method(self, method_name: str) -> bool:
return hasattr(self._impl, method_name)
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
return self.has_method("transform")
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
return self
_is_supervised_schema = {"type": "object", "required": ["y"]}
class PlannedIndividualOp(IndividualOp, PlannedOperator):
"""
This is a concrete class that returns a trainable individual
operator through its __call__ method. A configure method can use
an optimizer and return the best hyperparameter combination.
"""
_hyperparams: Optional[Dict[str, Any]]
def __init__(
self,
_lale_name: str,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
_lale_trained=False,
**hp,
) -> None:
super(PlannedIndividualOp, self).__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
def _should_configure_trained(self, impl):
# TODO: may also want to do this for other higher-order operators
if self.class_name() != _LALE_SKL_PIPELINE:
return False
return isinstance(impl._pipeline, TrainedPipeline)
# give it a more precise type: if the input is an individual op, the output is as well
def auto_configure(
self, X, y=None, optimizer=None, cv=None, scoring=None, **kwargs
) -> "TrainedIndividualOp":
trained = super().auto_configure(
X, y=y, optimizer=optimizer, cv=cv, scoring=scoring, **kwargs
)
assert isinstance(trained, TrainedIndividualOp)
return trained
def __call__(self, *args, **kwargs) -> "TrainableIndividualOp":
return self._configure(*args, **kwargs)
def _hyperparam_schema_with_hyperparams(self, data_schema={}):
def fix_hyperparams(schema):
hyperparams = self.hyperparams()
if not hyperparams:
return schema
props = {k: {"enum": [v]} for k, v in hyperparams.items()}
obj = {"type": "object", "properties": props}
obj["relevantToOptimizer"] = list(hyperparams.keys())
obj["required"] = list(hyperparams.keys())
top = {"allOf": [schema, obj]}
return top
s_1 = self.hyperparam_schema()
s_2 = fix_hyperparams(s_1)
s_3 = lale.type_checking.replace_data_constraints(s_2, data_schema)
return s_3
def freeze_trainable(self) -> "TrainableIndividualOp":
return self._configure().freeze_trainable()
def free_hyperparams(self):
hyperparam_schema = self.hyperparam_schema()
if (
"allOf" in hyperparam_schema
and "relevantToOptimizer" in hyperparam_schema["allOf"][0]
):
to_bind = hyperparam_schema["allOf"][0]["relevantToOptimizer"]
else:
to_bind = []
bound = self.frozen_hyperparams()
if bound is None:
return set(to_bind)
else:
return set(to_bind) - set(bound)
def is_frozen_trainable(self) -> bool:
free = self.free_hyperparams()
return len(free) == 0
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, None] = None,
tags: Optional[Dict] = None,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "PlannedIndividualOp":
return customize_schema(
self, schemas, relevantToOptimizer, constraint, tags, **kwargs
)
def _mutation_warning(method_name: str) -> str:
msg = str(
"The `{}` method is deprecated on a trainable "
"operator, because the learned coefficients could be "
"accidentally overwritten by retraining. Call `{}` "
"on the trained operator returned by `fit` instead."
)
return msg.format(method_name, method_name)
class TrainableIndividualOp(PlannedIndividualOp, TrainableOperator):
def __init__(
self,
_lale_name,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
**hp,
):
super(TrainableIndividualOp, self).__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
def _with_op_params(
self, try_mutate, **impl_params: Dict[str, Any]
) -> "TrainableIndividualOp":
if not try_mutate:
return super()._with_op_params(try_mutate, **impl_params)
hps = self.hyperparams_all()
if hps is not None:
hyperparams = {**hps, **impl_params}
else:
hyperparams = impl_params
frozen = self.frozen_hyperparams()
self._hyperparams = hyperparams
if frozen:
frozen.extend((k for k in impl_params.keys() if k not in frozen))
else:
self._frozen_hyperparams = list(impl_params.keys())
if self._is_instantiated():
# if we already have an instance impl, we need to update it
impl = self._impl
if hasattr(impl, "set_params"):
new_impl = impl.set_params(**hyperparams)
self._impl = new_impl
self._impl_class_ = new_impl.__class__
elif hasattr(impl, "_wrapped_model") and hasattr(
impl._wrapped_model, "set_params"
):
impl._wrapped_model.set_params(**hyperparams)
else:
hyper_d = {**self.get_defaults(), **hyperparams}
self._impl = self._impl_class()(**hyper_d)
return self
def _clone_impl(self):
impl_instance = self._impl_instance()
if hasattr(impl_instance, "get_params"):
result = sklearn.base.clone(impl_instance)
else:
try:
result = copy.deepcopy(impl_instance)
except Exception:
impl_class = self._impl_class()
params_all = self._get_params_all()
result = impl_class(**params_all)
return result
def _trained_hyperparams(self, trained_impl) -> Optional[Dict[str, Any]]:
hp = self.hyperparams()
if not hp:
return None
# TODO: may also want to do this for other higher-order operators
if self.class_name() != _LALE_SKL_PIPELINE:
return hp
names_list = [name for name, op in hp["steps"]]
steps_list = trained_impl._pipeline.steps()
trained_steps = list(zip(names_list, steps_list))
result = {**hp, "steps": trained_steps}
return result
def _validate_hyperparam_data_constraints(self, X, y=None):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return True
hp_schema = self.hyperparam_schema()
if not hasattr(self, "__has_data_constraints"):
has_dc = lale.type_checking.has_data_constraints(hp_schema)
self.__has_data_constraints = has_dc
if self.__has_data_constraints:
hp_explicit = self.hyperparams()
hp_all = self._get_params_all()
data_schema = lale.helpers.fold_schema(X, y)
hp_schema_2 = lale.type_checking.replace_data_constraints(
hp_schema, data_schema
)
self._validate_hyperparams(
hp_explicit, hp_all, hp_schema_2, self.impl_class
)
def fit(self, X, y=None, **fit_params) -> "TrainedIndividualOp":
# logger.info("%s enter fit %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "fit")
y = self._validate_input_schema("y", y, "fit")
self._validate_hyperparam_data_constraints(X, y)
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
if isinstance(self, TrainedIndividualOp):
trainable_impl = self.impl
else:
trainable_impl = self._clone_impl()
if filtered_fit_params is None:
trained_impl = trainable_impl.fit(X, y)
else:
trained_impl = trainable_impl.fit(X, y, **filtered_fit_params)
# if the trainable fit method returns None, assume that
# the trainableshould be used as the trained impl as well
if trained_impl is None:
trained_impl = trainable_impl
hps = self._trained_hyperparams(trained_impl)
frozen: Optional[List[str]] = list(hps.keys()) if hps is not None else None
if hps is None:
hps = {}
result = TrainedIndividualOp(
self.name(),
trained_impl,
self._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=frozen,
**hps,
)
if not isinstance(self, TrainedIndividualOp):
self._trained = result
# logger.info("%s exit fit %s", time.asctime(), self.name())
return result
def partial_fit(self, X, y=None, **fit_params) -> "TrainedIndividualOp":
if not self.has_method("partial_fit"):
raise AttributeError(f"{self.name()} has no partial_fit implemented.")
X = self._validate_input_schema("X", X, "partial_fit")
y = self._validate_input_schema("y", y, "partial_fit")
self._validate_hyperparam_data_constraints(X, y)
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
if isinstance(self, TrainedIndividualOp):
trainable_impl = self.impl
else:
trainable_impl = self._clone_impl()
if filtered_fit_params is None:
trained_impl = trainable_impl.partial_fit(X, y)
else:
trained_impl = trainable_impl.partial_fit(X, y, **filtered_fit_params)
if trained_impl is None:
trained_impl = trainable_impl
hps = self.hyperparams_all()
if hps is None:
hps = {}
result = TrainedIndividualOp(
self.name(),
trained_impl,
self._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=self.frozen_hyperparams(),
**hps,
)
if not isinstance(self, TrainedIndividualOp):
self._trained = result
return result
def freeze_trained(self) -> "TrainedIndividualOp":
"""
.. deprecated:: 0.0.0
The `freeze_trained` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `freeze_trained`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("freeze_trained"), DeprecationWarning)
try:
return self._trained.freeze_trained()
except AttributeError:
raise ValueError("Must call `fit` before `freeze_trained`.")
def __repr__(self):
name = self.name()
hps = self.reduced_hyperparams()
hyp_string: str
if hps is None:
hyp_string = ""
else:
hyp_string = lale.pretty_print.hyperparams_to_string(hps)
return name + "(" + hyp_string + ")"
@if_delegate_has_method(delegate="_impl")
def get_pipeline(
self, pipeline_name=None, astype="lale"
) -> Optional[TrainableOperator]:
"""
.. deprecated:: 0.0.0
The `get_pipeline` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `get_pipeline`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("get_pipeline"), DeprecationWarning)
try:
return self._trained.get_pipeline(pipeline_name, astype)
except AttributeError:
raise ValueError("Must call `fit` before `get_pipeline`.")
@if_delegate_has_method(delegate="_impl")
def summary(self) -> pd.DataFrame:
"""
.. deprecated:: 0.0.0
The `summary` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `summary`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("summary"), DeprecationWarning)
try:
return self._trained.summary()
except AttributeError:
raise ValueError("Must call `fit` before `summary`.")
@if_delegate_has_method(delegate="_impl")
def transform(self, X, y=None) -> Any:
"""
.. deprecated:: 0.0.0
The `transform` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `transform`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("transform"), DeprecationWarning)
try:
return self._trained.transform(X, y)
except AttributeError:
raise ValueError("Must call `fit` before `transform`.")
@if_delegate_has_method(delegate="_impl")
def predict(self, X=None, **predict_params) -> Any:
"""
.. deprecated:: 0.0.0
The `predict` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict"), DeprecationWarning)
try:
return self._trained.predict(X)
except AttributeError:
raise ValueError("Must call `fit` before `predict`.")
@if_delegate_has_method(delegate="_impl")
def predict_proba(self, X=None):
"""
.. deprecated:: 0.0.0
The `predict_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_proba"), DeprecationWarning)
try:
return self._trained.predict_proba(X)
except AttributeError:
raise ValueError("Must call `fit` before `predict_proba`.")
@if_delegate_has_method(delegate="_impl")
def decision_function(self, X=None):
"""
.. deprecated:: 0.0.0
The `decision_function` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `decision_function`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("decision_function"), DeprecationWarning)
try:
return self._trained.decision_function(X)
except AttributeError:
raise ValueError("Must call `fit` before `decision_function`.")
@if_delegate_has_method(delegate="_impl")
def score(self, X, y, **score_params) -> Any:
"""
.. deprecated:: 0.0.0
The `score` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score"), DeprecationWarning)
try:
if score_params is None:
return self._trained.score(X, y)
else:
return self._trained.score(X, y, **score_params)
except AttributeError:
raise ValueError("Must call `fit` before `score`.")
@if_delegate_has_method(delegate="_impl")
def score_samples(self, X=None):
"""
.. deprecated:: 0.0.0
The `score_samples` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score_samples`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score_samples"), DeprecationWarning)
try:
return self._trained.score_samples(X)
except AttributeError:
raise ValueError("Must call `fit` before `score_samples`.")
@if_delegate_has_method(delegate="_impl")
def predict_log_proba(self, X=None):
"""
.. deprecated:: 0.0.0
The `predict_log_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_log_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_log_proba"), DeprecationWarning)
try:
return self._trained.predict_log_proba(X)
except AttributeError:
raise ValueError("Must call `fit` before `predict_log_proba`.")
def free_hyperparams(self) -> Set[str]:
hyperparam_schema = self.hyperparam_schema()
to_bind: List[str]
if (
"allOf" in hyperparam_schema
and "relevantToOptimizer" in hyperparam_schema["allOf"][0]
):
to_bind = hyperparam_schema["allOf"][0]["relevantToOptimizer"]
else:
to_bind = []
bound = self.frozen_hyperparams()
if bound is None:
return set(to_bind)
else:
return set(to_bind) - set(bound)
def _freeze_trainable_bindings(self) -> Dict[str, Any]:
old_bindings = self.hyperparams_all()
if old_bindings is None:
old_bindings = {}
free = self.free_hyperparams()
defaults: Mapping[str, Any] = self.get_defaults()
new_bindings: Dict[str, Any] = {name: defaults[name] for name in free}
bindings: Dict[str, Any] = {**old_bindings, **new_bindings}
return bindings
def freeze_trainable(self) -> "TrainableIndividualOp":
bindings = self._freeze_trainable_bindings()
result = self._configure(**bindings)
assert result.is_frozen_trainable(), str(result.free_hyperparams())
return result
def transform_schema(self, s_X):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
if self.has_method("transform_schema"):
try:
return self._impl_instance().transform_schema(s_X)
except BaseException as e:
raise ValueError(
f"unexpected error in {self.name()}.transform_schema({lale.pretty_print.to_string(s_X)}"
) from e
else:
return super(TrainableIndividualOp, self).transform_schema(s_X)
def input_schema_fit(self) -> JSON_TYPE:
if self.has_method("input_schema_fit"):
return self._impl_instance().input_schema_fit()
else:
return super(TrainableIndividualOp, self).input_schema_fit()
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, None] = None,
tags: Optional[Dict] = None,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "TrainableIndividualOp":
return customize_schema(
self, schemas, relevantToOptimizer, constraint, tags, **kwargs
)
class TrainedIndividualOp(TrainableIndividualOp, TrainedOperator):
_frozen_trained: bool
def __new__(cls, *args, _lale_trained=False, _lale_impl=None, **kwargs):
if (
"_lale_name" not in kwargs
or _lale_trained
or (_lale_impl is not None and not hasattr(_lale_impl, "fit"))
):
obj = super(TrainedIndividualOp, cls).__new__(TrainedIndividualOp)
return obj
else:
# unless _lale_trained=True, we actually want to return a Trainable
obj = super(TrainedIndividualOp, cls).__new__(TrainableIndividualOp)
# apparently python does not call __ini__ if the type returned is not the
# expected type
obj.__init__(*args, **kwargs)
return obj
def __init__(
self,
_lale_name,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
_lale_trained=False,
**hp,
):
super(TrainedIndividualOp, self).__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
self._frozen_trained = not self.has_method("fit")
def __call__(self, *args, **kwargs) -> "TrainedIndividualOp":
filtered_kwargs_params = _fixup_hyperparams_dict(kwargs)
trainable = self._configure(*args, **filtered_kwargs_params)
hps = trainable.hyperparams_all()
if hps is None:
hps = {}
instance = TrainedIndividualOp(
trainable._name,
trainable._impl,
trainable._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=trainable.frozen_hyperparams(),
**hps,
)
return instance
def fit(self, X, y=None, **fit_params) -> "TrainedIndividualOp":
if self.has_method("fit") and not self.is_frozen_trained():
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
return super(TrainedIndividualOp, self).fit(X, y, **filtered_fit_params)
else:
return self
@if_delegate_has_method(delegate="_impl")
def transform(self, X, y=None) -> Any:
"""Transform the data.
Parameters
----------
X :
Features; see input_transform schema of the operator.
Returns
-------
result :
Transformed features; see output_transform schema of the operator.
"""
# logger.info("%s enter transform %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "transform")
if "y" in [
required_property.lower()
for required_property in self.input_schema_transform().get("required", [])
]:
y = self._validate_input_schema("y", y, "transform")
raw_result = self._impl_instance().transform(X, y)
else:
raw_result = self._impl_instance().transform(X)
result = self._validate_output_schema(raw_result, "transform")
# logger.info("%s exit transform %s", time.asctime(), self.name())
return result
def _predict(self, X, **predict_params):
X = self._validate_input_schema("X", X, "predict")
raw_result = self._impl_instance().predict(X, **predict_params)
result = self._validate_output_schema(raw_result, "predict")
return result
@if_delegate_has_method(delegate="_impl")
def predict(self, X=None, **predict_params) -> Any:
"""Make predictions.
Parameters
----------
X :
Features; see input_predict schema of the operator.
Returns
-------
result :
Predictions; see output_predict schema of the operator.
"""
# logger.info("%s enter predict %s", time.asctime(), self.name())
result = self._predict(X, **predict_params)
# logger.info("%s exit predict %s", time.asctime(), self.name())
if isinstance(result, lale.datasets.data_schemas.NDArrayWithSchema):
return lale.datasets.data_schemas.strip_schema(
result
) # otherwise scorers return zero-dim array
return result
@if_delegate_has_method(delegate="_impl")
def predict_proba(self, X=None):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
# logger.info("%s enter predict_proba %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "predict_proba")
raw_result = self._impl_instance().predict_proba(X)
result = self._validate_output_schema(raw_result, "predict_proba")
# logger.info("%s exit predict_proba %s", time.asctime(), self.name())
return result
@if_delegate_has_method(delegate="_impl")
def decision_function(self, X=None):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
# logger.info("%s enter decision_function %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "decision_function")
raw_result = self._impl_instance().decision_function(X)
result = self._validate_output_schema(raw_result, "decision_function")
# logger.info("%s exit decision_function %s", time.asctime(), self.name())
return result
@if_delegate_has_method(delegate="_impl")
def score(self, X, y, **score_params) -> Any:
"""Performance evaluation with a default metric.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the underlying operator.
Returns
-------
score :
performance metric value
"""
# Use the input schema of predict as in most cases it applies to score as well.
X = self._validate_input_schema("X", X, "predict")
if score_params is None:
result = self._impl_instance().score(X, y)
else:
result = self._impl_instance().score(X, y, **score_params)
# We skip output validation for score for now
return result
@if_delegate_has_method(delegate="_impl")
def score_samples(self, X=None):
"""Scores for each sample in X. The type of scores depends on the operator.
Parameters
----------
X :
Features.
Returns
-------
result :
scores per sample.
"""
X = self._validate_input_schema("X", X, "score_samples")
raw_result = self._impl_instance().score_samples(X)
result = self._validate_output_schema(raw_result, "score_samples")
return result
@if_delegate_has_method(delegate="_impl")
def predict_log_proba(self, X=None):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
X = self._validate_input_schema("X", X, "predict_log_proba")
raw_result = self._impl_instance().predict_log_proba(X)
result = self._validate_output_schema(raw_result, "predict_log_proba")
return result
def freeze_trainable(self) -> "TrainedIndividualOp":
result = copy.deepcopy(self)
new_bindings = self._freeze_trainable_bindings()
result._hyperparams = new_bindings
result._frozen_hyperparams = list(new_bindings)
assert result.is_frozen_trainable(), str(result.free_hyperparams())
assert isinstance(result, TrainedIndividualOp)
return result
def is_frozen_trained(self) -> bool:
return self._frozen_trained
def freeze_trained(self) -> "TrainedIndividualOp":
if self.is_frozen_trained():
return self
result = copy.deepcopy(self)
result._frozen_trained = True
assert result.is_frozen_trained()
return result
@overload
def get_pipeline(
self, pipeline_name: None = None, astype: str = "lale"
) -> Optional[TrainedOperator]:
...
@overload
def get_pipeline(
self, pipeline_name: str, astype: str = "lale"
) -> Optional[TrainableOperator]:
...
@if_delegate_has_method(delegate="_impl")
def get_pipeline(self, pipeline_name=None, astype="lale"):
result = self._impl_instance().get_pipeline(pipeline_name, astype)
return result
@if_delegate_has_method(delegate="_impl")
def summary(self) -> pd.DataFrame:
return self._impl_instance().summary()
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, None] = None,
tags: Optional[Dict] = None,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "TrainedIndividualOp":
return customize_schema(
self, schemas, relevantToOptimizer, constraint, tags, **kwargs
)
_all_available_operators: List[PlannedOperator] = []
def wrap_operator(impl) -> Operator:
if isinstance(impl, Operator):
return impl
else:
return make_operator(impl)
# variant of make_operator for impls that are already trained (don't have a fit method)
def make_pretrained_operator(
impl, schemas=None, name: Optional[str] = None
) -> TrainedIndividualOp:
x = make_operator(impl, schemas, name)
assert isinstance(x, TrainedIndividualOp)
return x
def get_op_from_lale_lib(impl_class) -> Optional[IndividualOp]:
assert inspect.isclass(impl_class)
assert not issubclass(impl_class, Operator)
assert hasattr(impl_class, "predict") or hasattr(impl_class, "transform")
if impl_class.__module__.startswith("lale.lib"):
assert impl_class.__name__.endswith("Impl"), impl_class.__name__
assert impl_class.__name__.startswith("_"), impl_class.__name__
module = importlib.import_module(impl_class.__module__)
class_name = impl_class.__name__[1 : -len("Impl")]
result = getattr(module, class_name)
else:
try:
module_name = impl_class.__module__.split(".")[0]
module = importlib.import_module("lale.lib." + module_name)
result = getattr(module, impl_class.__name__)
except (ModuleNotFoundError, AttributeError):
try:
module = importlib.import_module("lale.lib.autogen")
result = getattr(module, impl_class.__name__)
except (ModuleNotFoundError, AttributeError):
result = None
if result is not None:
result._check_schemas()
return result
def get_lib_schemas(impl_class) -> Optional[JSON_TYPE]:
operator = get_op_from_lale_lib(impl_class)
return None if operator is None else operator._schemas
def make_operator(
impl, schemas=None, name: Optional[str] = None
) -> PlannedIndividualOp:
if name is None:
name = lale.helpers.assignee_name(level=2)
if name is None:
if inspect.isclass(impl):
n: str = impl.__name__
if n.startswith("_"):
n = n[1:]
if n.endswith("Impl"):
n = n[: -len("Impl")]
name = n
else:
name = "Unknown"
if schemas is None:
if isinstance(impl, IndividualOp):
schemas = impl._schemas
elif inspect.isclass(impl):
schemas = get_lib_schemas(impl)
else:
schemas = get_lib_schemas(impl.__class__)
if inspect.isclass(impl):
if hasattr(impl, "fit"):
operatorObj = PlannedIndividualOp(
name, impl, schemas, _lale_frozen_hyperparameters=None
)
else:
operatorObj = TrainedIndividualOp(
name,
impl,
schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=None,
)
else:
hps: Dict[str, Any] = {}
frozen: Optional[List[str]] = None
if hasattr(impl, "get_params"):
hps = impl.get_params(deep=False)
frozen = list(hps.keys())
if hasattr(impl, "fit"):
operatorObj = TrainableIndividualOp(
name, impl, schemas, _lale_frozen_hyperparameters=frozen, **hps
)
else:
operatorObj = TrainedIndividualOp(
name,
impl,
schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=frozen,
**hps,
)
operatorObj._check_schemas()
_all_available_operators.append(operatorObj)
return operatorObj
def get_available_operators(
tag: str, more_tags: AbstractSet[str] = None
) -> List[PlannedOperator]:
singleton = set([tag])
tags = singleton if (more_tags is None) else singleton.union(more_tags)
def filter(op):
tags_dict = op.get_tags()
if tags_dict is None:
return False
tags_set = {tag for prefix in tags_dict for tag in tags_dict[prefix]}
return tags.issubset(tags_set)
return [op for op in _all_available_operators if filter(op)]
def get_available_estimators(tags: AbstractSet[str] = None) -> List[PlannedOperator]:
return get_available_operators("estimator", tags)
def get_available_transformers(tags: AbstractSet[str] = None) -> List[PlannedOperator]:
return get_available_operators("transformer", tags)
OpType = TypeVar("OpType", bound=Operator, covariant=True)
class BasePipeline(Operator, Generic[OpType]):
"""
This is a concrete class that can instantiate a new pipeline operator and provide access to its meta data.
"""
_steps: List[OpType]
_preds: Dict[OpType, List[OpType]]
_cached_preds: Optional[Dict[int, List[int]]]
_name: str
def _steps_to_indices(self) -> Dict[OpType, int]:
return dict([(op, i) for (i, op) in enumerate(self._steps)])
def _preds_to_indices(self) -> Dict[int, List[int]]:
step_map = self._steps_to_indices()
return {
step_map[k]: ([step_map[v] for v in vs]) for (k, vs) in self._preds.items()
}
def _get_preds_indices(self) -> Dict[int, List[int]]:
p: Dict[int, List[int]]
if self._cached_preds is None:
p = self._preds_to_indices()
self._cached_preds = p
else:
p = self._cached_preds
return p
@property
def _estimator_type(self):
estimator = self._final_individual_op
if estimator is not None:
return estimator._estimator_type
@classmethod
def _indices_to_preds(
cls, _steps: List[OpType], _pred_indices: Dict[int, List[int]]
) -> Dict[OpType, List[OpType]]:
return {
_steps[k]: ([_steps[v] for v in vs]) for (k, vs) in _pred_indices.items()
}
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out: Dict[str, Any] = {}
out["steps"] = self._steps
out["_lale_preds"] = self._get_preds_indices()
indices: Dict[str, int] = {}
def make_indexed(name: str) -> str:
idx = 0
if name in indices:
idx = indices[name] + 1
indices[name] = idx
else:
indices[name] = 0
return make_indexed_name(name, idx)
if deep:
for op in self._steps:
name = make_indexed(op.name())
nested_params = op.get_params(deep=deep)
if nested_params:
out.update(nest_HPparams(name, nested_params))
return out
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
def _with_params(self, try_mutate: bool, **impl_params) -> "BasePipeline[OpType]":
steps = self.steps()
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
assert not main_params, f"Unexpected non-nested arguments {main_params}"
found_names: Dict[str, int] = {}
step_map: Dict[OpType, OpType] = {}
for s in steps:
name = s.name()
name_index = 0
params: Dict[str, Any] = {}
if name in found_names:
name_index = found_names[name] + 1
found_names[name] = name_index
uname = make_indexed_name(name, name_index)
if uname in partitioned_sub_params:
params = partitioned_sub_params[uname]
else:
found_names[name] = 0
uname = make_degen_indexed_name(name, 0)
if uname in partitioned_sub_params:
params = partitioned_sub_params[uname]
assert name not in partitioned_sub_params
elif name in partitioned_sub_params:
params = partitioned_sub_params[name]
new_s = s._with_params(try_mutate, **params)
if s != new_s:
# getting this to statically type check would be very complicated
# if even possible
step_map[s] = new_s # type: ignore
# make sure that no parameters were passed in for operations
# that are not actually part of this pipeline
for k in partitioned_sub_params.keys():
n, i = get_name_and_index(k)
assert n in found_names and i <= found_names[n]
if try_mutate:
if step_map:
self._subst_steps(step_map)
pipeline_graph_class = _pipeline_graph_class(self.steps())
self.__class__ = pipeline_graph_class # type: ignore
return self
else:
needs_copy = False
if step_map:
needs_copy = True
else:
pipeline_graph_class = _pipeline_graph_class(self.steps())
if pipeline_graph_class != self.__class__:
needs_copy = True
if needs_copy:
# it may be better practice to change the steps/edges ahead of time
# and then create the correct class
op_copy = make_pipeline_graph(self.steps(), self.edges(), ordered=True) # type: ignore
op_copy._subst_steps(step_map)
pipeline_graph_class = _pipeline_graph_class(op_copy.steps())
op_copy.__class__ = pipeline_graph_class # type: ignore
return op_copy
else:
return self
def __init__(
self,
steps: List[OpType],
edges: Optional[Iterable[Tuple[OpType, OpType]]] = None,
_lale_preds: Optional[
Union[Dict[int, List[int]], Dict[OpType, List[OpType]]]
] = None,
ordered: bool = False,
) -> None:
self._name = "pipeline_" + str(id(self))
self._preds = {}
for step in steps:
assert isinstance(step, Operator)
if _lale_preds is not None:
# this is a special case that is meant for use with cloning
# if preds is set, we assume that it is ordered as well
assert edges is None
self._steps = steps
if _lale_preds:
# TODO: improve typing situation
if isinstance(list(_lale_preds.keys())[0], int):
self._preds = self._indices_to_preds(steps, _lale_preds) # type: ignore
self._cached_preds = _lale_preds # type: ignore
else:
self._preds = _lale_preds # type: ignore
self._cached_preds = None # type: ignore
else:
self._cached_preds = _lale_preds # type: ignore
return
self._cached_preds = None
if edges is None:
# Which means there is a linear pipeline #TODO:Test extensively with clone and get_params
# This constructor is mostly called due to cloning. Make sure the objects are kept the same.
self.__constructor_for_cloning(steps)
else:
self._steps = []
for step in steps:
if step in self._steps:
raise ValueError(
"Same instance of {} already exists in the pipeline. "
"This is not allowed.".format(step.name())
)
if isinstance(step, BasePipeline):
# PIPELINE_TYPE_INVARIANT_NOTE
# we use tstep (typed step) here to help pyright
# with some added information we have:
# Since the step is an OpType, if it is a pipeline,
# then its steps must all be at least OpType as well
# this invariant is not expressible in the type system due to
# the open world assumption, but is intended to hold
tstep: BasePipeline[OpType] = step
# Flatten out the steps and edges
self._steps.extend(tstep.steps())
# from step's edges, find out all the source and sink nodes
source_nodes = [
dst
for dst in tstep.steps()
if (step._preds[dst] is None or step._preds[dst] == [])
]
sink_nodes = tstep._find_sink_nodes()
# Now replace the edges to and from the inner pipeline to to and from source and sink nodes respectively
new_edges = tstep.edges()
# list comprehension at the cost of iterating edges thrice
new_edges.extend(
[
(node, edge[1])
for edge in edges
if edge[0] == tstep
for node in sink_nodes
]
)
new_edges.extend(
[
(edge[0], node)
for edge in edges
if edge[1] == tstep
for node in source_nodes
]
)
new_edges.extend(
[
edge
for edge in edges
if (edge[1] != tstep and edge[0] != tstep)
]
)
edges = new_edges
else:
self._steps.append(step)
self._preds = {step: [] for step in self._steps}
for (src, dst) in edges:
self._preds[dst].append(src)
if not ordered:
self.__sort_topologically()
assert self.__is_in_topological_order()
def __constructor_for_cloning(self, steps: List[OpType]):
edges: List[Tuple[OpType, OpType]] = []
prev_op: Optional[OpType] = None
# This is due to scikit base's clone method that needs the same list object
self._steps = steps
prev_leaves: List[OpType]
curr_roots: List[OpType]
for curr_op in self._steps:
if isinstance(prev_op, BasePipeline):
# using tprev_op as per PIPELINE_TYPE_INVARIANT_NOTE above
tprev_op: BasePipeline[OpType] = prev_op
prev_leaves = tprev_op._find_sink_nodes()
else:
prev_leaves = [] if prev_op is None else [prev_op]
prev_op = curr_op
if isinstance(curr_op, BasePipeline):
# using tcurr_op as per PIPELINE_TYPE_INVARIANT_NOTE above
tcurr_op: BasePipeline[OpType] = curr_op
curr_roots = tcurr_op._find_source_nodes()
self._steps.extend(tcurr_op.steps())
edges.extend(tcurr_op.edges())
else:
curr_roots = [curr_op]
edges.extend([(src, tgt) for src in prev_leaves for tgt in curr_roots])
seen_steps: List[OpType] = []
for step in self._steps:
if step in seen_steps:
raise ValueError(
"Same instance of {} already exists in the pipeline. "
"This is not allowed.".format(step.name())
)
seen_steps.append(step)
self._preds = {step: [] for step in self._steps}
for (src, dst) in edges:
self._preds[dst].append(src)
# Since this case is only allowed for linear pipelines, it is always
# expected to be in topological order
assert self.__is_in_topological_order()
def edges(self) -> List[Tuple[OpType, OpType]]:
return [(src, dst) for dst in self._steps for src in self._preds[dst]]
def __is_in_topological_order(self) -> bool:
seen: Dict[OpType, bool] = {}
for operator in self._steps:
for pred in self._preds[operator]:
if pred not in seen:
return False
seen[operator] = True
return True
def steps(self) -> List[OpType]:
return self._steps
def _subst_steps(self, m: Dict[OpType, OpType]) -> None:
if dict:
# for i, s in enumerate(self._steps):
# self._steps[i] = m.get(s,s)
self._steps = [m.get(s, s) for s in self._steps]
self._preds = {
m.get(k, k): [m.get(s, s) for s in v] for k, v in self._preds.items()
}
def __sort_topologically(self) -> None:
class state(enumeration.Enum):
TODO = (enumeration.auto(),)
DOING = (enumeration.auto(),)
DONE = enumeration.auto()
states: Dict[OpType, state] = {op: state.TODO for op in self._steps}
result: List[OpType] = []
# Since OpType is covariant, this is disallowed by mypy for safety
# in this case it is safe, since while the value of result will be written
# into _steps, all the values in result came from _steps originally
def dfs(operator: OpType) -> None: # type: ignore
if states[operator] is state.DONE:
return
if states[operator] is state.DOING:
raise ValueError("Cycle detected.")
states[operator] = state.DOING
for pred in self._preds[operator]:
dfs(pred)
states[operator] = state.DONE
result.append(operator)
for operator in self._steps:
if states[operator] is state.TODO:
dfs(operator)
self._steps = result
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator imnplementations are compatible"""
if not isinstance(other, BasePipeline):
return False
my_steps = self.steps()
other_steps = other.steps()
if len(my_steps) != len(other_steps):
return False
for (m, o) in zip(my_steps, other_steps):
if not m._has_same_impl(o):
return False
return True
def _find_sink_nodes(self) -> List[OpType]:
is_sink = {s: True for s in self.steps()}
for src, _ in self.edges():
is_sink[src] = False
result = [s for s in self.steps() if is_sink[s]]
return result
def _find_source_nodes(self) -> List[OpType]:
is_source = {s: True for s in self.steps()}
for _, dst in self.edges():
is_source[dst] = False
result = [s for s in self.steps() if is_source[s]]
return result
def _validate_or_transform_schema(self, X, y=None, validate=True):
def combine_schemas(schemas):
n_datasets = len(schemas)
if n_datasets == 1:
result = schemas[0]
else:
result = {
"type": "array",
"minItems": n_datasets,
"maxItems": n_datasets,
"items": [lale.datasets.data_schemas.to_schema(i) for i in schemas],
}
return result
outputs = {}
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = X
else:
inputs = combine_schemas([outputs[pred] for pred in preds])
if validate:
operator.validate_schema(X=inputs, y=y)
output = operator.transform_schema(inputs)
outputs[operator] = output
if not validate:
sinks = self._find_sink_nodes()
pipeline_outputs = [outputs[sink] for sink in sinks]
return combine_schemas(pipeline_outputs)
def validate_schema(self, X, y=None):
self._validate_or_transform_schema(X, y, validate=True)
def transform_schema(self, s_X):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
else:
return self._validate_or_transform_schema(s_X, validate=False)
def input_schema_fit(self) -> JSON_TYPE:
sources = self._find_source_nodes()
pipeline_inputs = [source.input_schema_fit() for source in sources]
result = lale.type_checking.join_schemas(*pipeline_inputs)
return result
def is_supervised(self) -> bool:
s = self.steps()
if len(s) == 0:
return False
return self.steps()[-1].is_supervised()
def remove_last(self, inplace: bool = False) -> "BasePipeline[OpType]":
sink_nodes = self._find_sink_nodes()
if len(sink_nodes) > 1:
raise ValueError(
"This pipeline has more than 1 sink nodes, can not remove last step meaningfully."
)
elif not inplace:
modified_pipeline = copy.deepcopy(self)
old_clf = modified_pipeline._steps[-1]
modified_pipeline._steps.remove(old_clf)
del modified_pipeline._preds[old_clf]
return modified_pipeline
else:
old_clf = self._steps[-1]
self._steps.remove(old_clf)
del self._preds[old_clf]
return self
def get_last(self) -> Optional[OpType]:
sink_nodes = self._find_sink_nodes()
if len(sink_nodes) > 1:
return None
else:
old_clf = self._steps[-1]
return old_clf
def export_to_sklearn_pipeline(self):
from sklearn.pipeline import FeatureUnion, make_pipeline
from lale.lib.lale.concat_features import ConcatFeatures
from lale.lib.lale.no_op import NoOp
from lale.lib.lale.relational import Relational
def convert_nested_objects(node):
for element in dir(node): # Looking at only 1 level for now.
try:
value = getattr(node, element)
if isinstance(value, IndividualOp):
if isinstance(
value._impl_instance(), sklearn.base.BaseEstimator
):
setattr(node, element, value._impl_instance())
if hasattr(value._impl_instance(), "_wrapped_model"):
# node is a higher order operator
setattr(
node, element, value._impl_instance()._wrapped_model
)
stripped = lale.datasets.data_schemas.strip_schema(value)
if value is stripped:
continue
setattr(node, element, stripped)
except BaseException:
# This is an optional processing, so if there is any exception, continue.
# For example, some scikit-learn classes will fail at getattr because they have
# that property defined conditionally.
pass
def create_pipeline_from_sink_node(sink_node):
# Ensure that the pipeline is either linear or has a "union followed by concat" construct
# Translate the "union followed by concat" constructs to "featureUnion"
# Inspect the node and convert any data with schema objects to original data types
if isinstance(sink_node, OperatorChoice):
raise ValueError(
"A pipeline that has an OperatorChoice can not be converted to "
" a scikit-learn pipeline:{}".format(self.to_json())
)
if sink_node._impl_class() == Relational._impl_class():
return None
convert_nested_objects(sink_node._impl)
if sink_node._impl_class() == ConcatFeatures._impl_class():
list_of_transformers = []
for pred in self._preds[sink_node]:
pred_transformer = create_pipeline_from_sink_node(pred)
list_of_transformers.append(
(
pred.name() + "_" + str(id(pred)),
make_pipeline(*pred_transformer)
if isinstance(pred_transformer, list)
else pred_transformer,
)
)
return FeatureUnion(list_of_transformers)
else:
preds = self._preds[sink_node]
if preds is not None and len(preds) > 1:
raise ValueError(
"A pipeline graph that has operators other than ConcatFeatures with "
"multiple incoming edges is not a valid scikit-learn pipeline:{}".format(
self.to_json()
)
)
else:
if hasattr(sink_node._impl_instance(), "_wrapped_model"):
sklearn_op = sink_node._impl_instance()._wrapped_model
convert_nested_objects(
sklearn_op
) # This case needs one more level of conversion
else:
sklearn_op = sink_node._impl_instance()
sklearn_op = copy.deepcopy(sklearn_op)
if preds is None or len(preds) == 0:
return sklearn_op
else:
output_pipeline_steps = []
previous_sklearn_op = create_pipeline_from_sink_node(preds[0])
if previous_sklearn_op is not None and not isinstance(
previous_sklearn_op, NoOp._impl_class()
):
if isinstance(previous_sklearn_op, list):
output_pipeline_steps = previous_sklearn_op
else:
output_pipeline_steps.append(previous_sklearn_op)
if not isinstance(
sklearn_op, NoOp._impl_class()
): # Append the current op only if not NoOp
output_pipeline_steps.append(sklearn_op)
return output_pipeline_steps
sklearn_steps_list = []
# Finding the sink node so that we can do a backward traversal
sink_nodes = self._find_sink_nodes()
# For a trained pipeline that is scikit compatible, there should be only one sink node
if len(sink_nodes) != 1:
raise ValueError(
"A pipeline graph that ends with more than one estimator is not a"
" valid scikit-learn pipeline:{}".format(self.to_json())
)
else:
sklearn_steps_list = create_pipeline_from_sink_node(sink_nodes[0])
# not checking for isinstance(sklearn_steps_list, NoOp) here as there is no valid sklearn pipeline with just one NoOp.
try:
sklearn_pipeline = (
make_pipeline(*sklearn_steps_list)
if isinstance(sklearn_steps_list, list)
else make_pipeline(sklearn_steps_list)
)
except TypeError:
raise TypeError(
"Error creating a scikit-learn pipeline, most likely because the steps are not scikit compatible."
)
return sklearn_pipeline
def is_classifier(self) -> bool:
sink_nodes = self._find_sink_nodes()
for op in sink_nodes:
if not op.is_classifier():
return False
return True
def get_defaults(self) -> Dict[str, Any]:
defaults_list: Iterable[Dict[str, Any]] = (
lale.helpers.nest_HPparams(s.name(), s.get_defaults()) for s in self.steps()
)
# TODO: could this just be dict(defaults_list)
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
op = self.get_last()
if op is None:
return None
else:
return op._final_individual_op
PlannedOpType = TypeVar("PlannedOpType", bound=PlannedOperator, covariant=True)
class PlannedPipeline(BasePipeline[PlannedOpType], PlannedOperator):
def __init__(
self,
steps: List[PlannedOpType],
edges: Optional[Iterable[Tuple[PlannedOpType, PlannedOpType]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
) -> None:
super(PlannedPipeline, self).__init__(
steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered
)
# give it a more precise type: if the input is a pipeline, the output is as well
def auto_configure(
self, X, y=None, optimizer=None, cv=None, scoring=None, **kwargs
) -> "TrainedPipeline":
trained = super().auto_configure(
X, y=y, optimizer=optimizer, cv=cv, scoring=scoring, **kwargs
)
assert isinstance(trained, TrainedPipeline)
return trained
def remove_last(self, inplace: bool = False) -> "PlannedPipeline[PlannedOpType]":
pipe = super().remove_last(inplace=inplace)
assert isinstance(pipe, PlannedPipeline)
return pipe
def is_frozen_trainable(self) -> bool:
return all([step.is_frozen_trainable() for step in self.steps()])
def is_frozen_trained(self) -> bool:
return all([step.is_frozen_trained() for step in self.steps()])
TrainableOpType = TypeVar(
"TrainableOpType", bound=TrainableIndividualOp, covariant=True
)
class TrainablePipeline(PlannedPipeline[TrainableOpType], TrainableOperator):
def __init__(
self,
steps: List[TrainableOpType],
edges: Optional[Iterable[Tuple[TrainableOpType, TrainableOpType]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
_lale_trained=False,
) -> None:
super(TrainablePipeline, self).__init__(
steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered
)
def remove_last(
self, inplace: bool = False
) -> "TrainablePipeline[TrainableOpType]":
pipe = super().remove_last(inplace=inplace)
assert isinstance(pipe, TrainablePipeline)
return pipe
def fit(self, X, y=None, **fit_params) -> "TrainedPipeline[TrainedIndividualOp]":
# filtered_fit_params = _fixup_hyperparams_dict(fit_params)
X = lale.datasets.data_schemas.add_schema(X)
y = lale.datasets.data_schemas.add_schema(y)
self.validate_schema(X, y)
trained_steps: List[TrainedIndividualOp] = []
outputs: Dict[Operator, Any] = {}
meta_outputs: Dict[Operator, Any] = {}
edges: List[Tuple[TrainableOpType, TrainableOpType]] = self.edges()
trained_map: Dict[TrainableOpType, TrainedIndividualOp] = {}
sink_nodes = self._find_sink_nodes()
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [X]
meta_data_inputs: Dict[Operator, Any] = {}
else:
inputs = [outputs[pred] for pred in preds]
# we create meta_data_inputs as a dictionary with metadata from all previous steps
# Note that if multiple previous steps generate the same key, it will retain only one of those.
meta_data_inputs = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
trainable = operator
if len(inputs) == 1:
inputs = inputs[0]
if operator.has_method("set_meta_data"):
operator._impl_instance().set_meta_data(meta_data_inputs)
meta_output: Dict[Operator, Any] = {}
trained: TrainedOperator
if isinstance(
inputs, tuple
): # This is the case for transformers which return X and y, such as resamplers.
inputs, y = inputs
if trainable.is_supervised():
trained = trainable.fit(X=inputs, y=y)
else:
trained = trainable.fit(X=inputs)
trained_map[operator] = trained
trained_steps.append(trained)
if (
trainable not in sink_nodes
): # There is no need to transform/predict on the last node during fit
if trained.is_transformer():
output = trained.transform(X=inputs, y=y)
if trained.has_method("get_transform_meta_output"):
meta_output = (
trained._impl_instance().get_transform_meta_output()
)
else:
if trainable in sink_nodes:
output = trained._predict(
X=inputs
) # We don't support y for predict yet as there is no compelling case
else:
# This is ok because trainable pipelines steps
# must only be individual operators
if trained.has_method("predict_proba"): # type: ignore
output = trained.predict_proba(X=inputs)
elif trained.has_method("decision_function"): # type: ignore
output = trained.decision_function(X=inputs)
else:
output = trained._predict(X=inputs)
if trained.has_method("get_predict_meta_output"):
meta_output = trained._impl_instance().get_predict_meta_output()
outputs[operator] = output
meta_output_so_far = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
meta_output_so_far.update(
meta_output
) # So newest gets preference in case of collisions
meta_outputs[operator] = meta_output_so_far
trained_edges = [(trained_map[x], trained_map[y]) for (x, y) in edges]
result: TrainedPipeline[TrainedIndividualOp] = TrainedPipeline(
trained_steps, trained_edges, ordered=True, _lale_trained=True
)
self._trained = result
return result
def transform(self, X, y=None) -> Any:
"""
.. deprecated:: 0.0.0
The `transform` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `transform`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("transform"), DeprecationWarning)
try:
return self._trained.transform(X, y=None)
except AttributeError:
raise ValueError("Must call `fit` before `transform`.")
def predict(self, X, **predict_params) -> Any:
"""
.. deprecated:: 0.0.0
The `predict` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict"), DeprecationWarning)
try:
return self._trained.predict(X, **predict_params)
except AttributeError:
raise ValueError("Must call `fit` before `predict`.")
def predict_proba(self, X):
"""
.. deprecated:: 0.0.0
The `predict_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_proba"), DeprecationWarning)
try:
return self._trained.predict_proba(X)
except AttributeError:
raise ValueError("Must call `fit` before `predict_proba`.")
def decision_function(self, X):
"""
.. deprecated:: 0.0.0
The `decision_function` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `decision_function`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("decision_function"), DeprecationWarning)
try:
return self._trained.decision_function(X)
except AttributeError:
raise ValueError("Must call `fit` before `decision_function`.")
def score(self, X, y, **score_params):
"""
.. deprecated:: 0.0.0
The `score` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score"), DeprecationWarning)
try:
return self._trained.score(X, y, **score_params)
except AttributeError:
raise ValueError("Must call `fit` before `score`.")
def score_samples(self, X=None):
"""
.. deprecated:: 0.0.0
The `score_samples` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score_samples`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score_samples"), DeprecationWarning)
try:
return self._trained.score_samples(X)
except AttributeError:
raise ValueError("Must call `fit` before `score_samples`.")
def predict_log_proba(self, X):
"""
.. deprecated:: 0.0.0
The `predict_log_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_log_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_log_proba"), DeprecationWarning)
try:
return self._trained.predict_log_proba(X)
except AttributeError:
raise ValueError("Must call `fit` before `predict_log_proba`.")
def freeze_trainable(self) -> "TrainablePipeline":
frozen_steps: List[TrainableOperator] = []
frozen_map: Dict[Operator, Operator] = {}
for liquid in self._steps:
frozen = liquid.freeze_trainable()
frozen_map[liquid] = frozen
frozen_steps.append(frozen)
frozen_edges = [(frozen_map[x], frozen_map[y]) for x, y in self.edges()]
result = cast(
TrainablePipeline,
make_pipeline_graph(frozen_steps, frozen_edges, ordered=True),
)
assert result.is_frozen_trainable()
return result
def fit_with_batches(
self, X, y=None, serialize=True, num_epochs_batching=None
) -> "TrainedPipeline[TrainedIndividualOp]":
"""[summary]
Parameters
----------
X :
[description]
y : [type], optional
For a supervised pipeline, this is an array with the unique class labels
in the entire dataset, by default None
Returns
-------
[type]
[description]
"""
trained_steps: List[TrainedIndividualOp] = []
outputs: Dict[Operator, Any] = {}
edges: List[Tuple[TrainableOpType, TrainableOpType]] = self.edges()
trained_map: Dict[TrainableOpType, TrainedIndividualOp] = {}
serialization_out_dir: Text = ""
if serialize:
serialization_out_dir = os.path.join(
os.path.dirname(__file__), "temp_serialized"
)
if not os.path.exists(serialization_out_dir):
os.mkdir(serialization_out_dir)
sink_nodes = self._find_sink_nodes()
operator_idx = 0
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [X]
else:
inputs = [
outputs[pred][0]
if isinstance(outputs[pred], tuple)
else outputs[pred]
for pred in preds
]
trainable = operator
if len(inputs) == 1:
inputs = inputs[0]
trained: Optional[TrainedIndividualOp] = None
if trainable.has_method("partial_fit"):
try:
num_epochs = trainable._impl_instance().num_epochs
except AttributeError:
if num_epochs_batching is None:
warnings.warn(
"Operator {} does not have num_epochs and none given to Batching operator, using 1 as a default".format(
trainable.name()
)
)
num_epochs = 1
else:
num_epochs = num_epochs_batching
assert num_epochs >= 0
else:
raise AttributeError(
"All operators to be trained with batching need to implement partial_fit. {} doesn't.".format(
operator.name()
)
)
inputs_for_transform: Any = inputs
for epoch in range(num_epochs):
for _, batch_data in enumerate(
inputs
): # batching_transformer will output only one obj
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
elif isinstance(batch_data, list):
batch_X = batch_data[0]
batch_y = batch_data[1]
else:
batch_X = batch_data
batch_y = None
if trainable.is_supervised():
try:
trained = trainable.partial_fit(batch_X, batch_y, classes=y)
except TypeError:
trained = trainable.partial_fit(batch_X, batch_y)
else:
trained = trainable.partial_fit(batch_X)
assert trained is not None
trained = TrainedIndividualOp(
trained.name(),
trained._impl,
trained._schemas,
None,
_lale_trained=True,
)
trained_map[operator] = trained
trained_steps.append(trained)
output = None
for batch_idx, batch_data in enumerate(
inputs_for_transform
): # batching_transformer will output only one obj
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
elif isinstance(batch_data, list):
batch_X = batch_data[0]
batch_y = batch_data[1]
else:
batch_X = batch_data
batch_y = None
if trained.is_transformer():
batch_output = trained.transform(batch_X, batch_y)
else:
if trainable in sink_nodes:
batch_output = trained._predict(
X=batch_X
) # We don't support y for predict yet as there is no compelling case
else:
# This is ok because trainable pipelines steps
# must only be individual operators
if trained.has_method("predict_proba"): # type: ignore
batch_output = trained.predict_proba(X=batch_X)
elif trained.has_method("decision_function"): # type: ignore
batch_output = trained.decision_function(X=batch_X)
else:
batch_output = trained._predict(X=batch_X)
if isinstance(batch_output, tuple):
batch_out_X, batch_out_y = batch_output
else:
batch_out_X = batch_output
batch_out_y = None
if serialize:
output = lale.helpers.write_batch_output_to_file(
output,
os.path.join(
serialization_out_dir,
"fit_with_batches" + str(operator_idx) + ".hdf5",
),
len(inputs_for_transform.dataset),
batch_idx,
batch_X,
batch_y,
batch_out_X,
batch_out_y,
)
else:
if batch_out_y is None:
output = lale.helpers.append_batch(
output, (batch_output, batch_y)
)
else:
output = lale.helpers.append_batch(output, batch_output)
if serialize:
output.close() # type: ignore
output = lale.helpers.create_data_loader(
os.path.join(
serialization_out_dir,
"fit_with_batches" + str(operator_idx) + ".hdf5",
),
batch_size=inputs_for_transform.batch_size,
)
else:
if isinstance(output, tuple):
output = lale.helpers.create_data_loader(
X=output[0],
y=output[1],
batch_size=inputs_for_transform.batch_size,
)
else:
output = lale.helpers.create_data_loader(
X=output, y=None, batch_size=inputs_for_transform.batch_size
)
outputs[operator] = output
operator_idx += 1
if serialize:
shutil.rmtree(serialization_out_dir)
trained_edges = [(trained_map[x], trained_map[y]) for (x, y) in edges]
trained_steps2: Any = trained_steps
result: TrainedPipeline[TrainedIndividualOp] = TrainedPipeline(
trained_steps2, trained_edges, ordered=True, _lale_trained=True
)
self._trained = result
return result
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
sink_nodes = self._find_sink_nodes()
all_transformers = [
True if operator.has_method("transform") else False
for operator in sink_nodes
]
return all(all_transformers)
TrainedOpType = TypeVar("TrainedOpType", bound=TrainedIndividualOp, covariant=True)
class TrainedPipeline(TrainablePipeline[TrainedOpType], TrainedOperator):
def __new__(cls, *args, _lale_trained=False, **kwargs):
if "steps" not in kwargs or _lale_trained:
obj = super(TrainedPipeline, cls).__new__(TrainedPipeline)
return obj
else:
# unless _lale_trained=True, we actually want to return a Trainable
obj = super(TrainedPipeline, cls).__new__(TrainablePipeline)
# apparently python does not call __ini__ if the type returned is not the
# expected type
obj.__init__(*args, **kwargs)
return obj
def __init__(
self,
steps: List[TrainedOpType],
edges: Optional[List[Tuple[TrainedOpType, TrainedOpType]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
_lale_trained=False,
) -> None:
super(TrainedPipeline, self).__init__(
steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered
)
def remove_last(self, inplace: bool = False) -> "TrainedPipeline[TrainedOpType]":
pipe = super().remove_last(inplace)
assert isinstance(pipe, TrainedPipeline)
return pipe
def _predict(self, X, y=None, **predict_params):
return self._predict_based_on_type(
"predict", "_predict", X, y, **predict_params
)
def predict(self, X, **predict_params) -> Any:
result = self._predict(X, **predict_params)
if isinstance(result, lale.datasets.data_schemas.NDArrayWithSchema):
return lale.datasets.data_schemas.strip_schema(
result
) # otherwise scorers return zero-dim array
return result
def transform(self, X, y=None) -> Any:
# TODO: What does a transform on a pipeline mean, if the last step is not a transformer
# can it be just the output of predict of the last step?
# If this implementation changes, check to make sure that the implementation of
# self.is_transformer is kept in sync with the new assumptions.
return self._predict_based_on_type("transform", "transform", X, y)
def _predict_based_on_type(
self, impl_method_name, operator_method_name, X=None, y=None, **kwargs
):
outputs = {}
meta_outputs = {}
sink_nodes = self._find_sink_nodes()
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [X]
meta_data_inputs = {}
else:
inputs = [
outputs[pred][0]
if isinstance(outputs[pred], tuple)
else outputs[pred]
for pred in preds
]
# we create meta_data_inputs as a dictionary with metadata from all previous steps
# Note that if multiple previous steps generate the same key, it will retain only one of those.
meta_data_inputs = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
if len(inputs) == 1:
inputs = inputs[0]
if operator.has_method("set_meta_data"):
operator._impl_instance().set_meta_data(meta_data_inputs)
meta_output = {}
if operator in sink_nodes:
if operator.has_method(
impl_method_name
): # Since this is pipeline's predict, we should invoke predict from sink nodes
method_to_call_on_operator = getattr(operator, operator_method_name)
if operator_method_name == "score":
output = method_to_call_on_operator(X=inputs, y=y, **kwargs)
else:
output = method_to_call_on_operator(X=inputs, **kwargs)
else:
raise AttributeError(
"The sink node of the pipeline does not support",
operator_method_name,
)
elif operator.is_transformer():
output = operator.transform(X=inputs, y=y)
if hasattr(operator._impl, "get_transform_meta_output"):
meta_output = operator._impl_instance().get_transform_meta_output()
elif operator.has_method(
"predict_proba"
): # For estimator as a transformer, use predict_proba if available
output = operator.predict_proba(X=inputs)
elif operator.has_method(
"decision_function"
): # For estimator as a transformer, use decision_function if available
output = operator.decision_function(X=inputs)
else:
output = operator._predict(X=inputs)
if operator.has_method("get_predict_meta_output"):
meta_output = operator._impl_instance().get_predict_meta_output()
outputs[operator] = output
meta_output_so_far = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
meta_output_so_far.update(
meta_output
) # So newest gets preference in case of collisions
meta_outputs[operator] = meta_output_so_far
result = outputs[self._steps[-1]]
return result
def predict_proba(self, X):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
return self._predict_based_on_type("predict_proba", "predict_proba", X)
def decision_function(self, X):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
return self._predict_based_on_type("decision_function", "decision_function", X)
def score(self, X, y, **score_params):
"""Performance evaluation with a default metric based on the final estimator.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the final estimator. These will be ignored for now.
Returns
-------
score :
Performance metric value.
"""
return self._predict_based_on_type("score", "score", X, y)
def score_samples(self, X=None):
"""Scores for each sample in X. There type of scores is based on the last operator in the pipeline.
Parameters
----------
X :
Features.
Returns
-------
result :
Scores per sample.
"""
return self._predict_based_on_type("score_samples", "score_samples", X)
def predict_log_proba(self, X):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
return self._predict_based_on_type("predict_log_proba", "predict_log_proba", X)
def transform_with_batches(self, X, y=None, serialize=True):
"""[summary]
Parameters
----------
X : [type]
[description]
y : [type], optional
by default None
Returns
-------
[type]
[description]
"""
outputs = {}
serialization_out_dir: Text = ""
if serialize:
serialization_out_dir = os.path.join(
os.path.dirname(__file__), "temp_serialized"
)
if not os.path.exists(serialization_out_dir):
os.mkdir(serialization_out_dir)
sink_nodes = self._find_sink_nodes()
operator_idx = 0
inputs: Any
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [X]
else:
inputs = [
outputs[pred][0]
if isinstance(outputs[pred], tuple)
else outputs[pred]
for pred in preds
]
if len(inputs) == 1:
inputs = inputs[0]
trained = operator
output = None
for batch_idx, batch_data in enumerate(
inputs
): # batching_transformer will output only one obj
if isinstance(batch_data, Tuple):
batch_X, batch_y = batch_data
else:
batch_X = batch_data
batch_y = None
if trained.is_transformer():
batch_output = trained.transform(batch_X, batch_y)
else:
if trained in sink_nodes:
batch_output = trained._predict(
X=batch_X
) # We don't support y for predict yet as there is no compelling case
else:
# This is ok because trainable pipelines steps
# must only be individual operators
if trained.has_method("predict_proba"): # type: ignore
batch_output = trained.predict_proba(X=batch_X)
elif trained.has_method("decision_function"): # type: ignore
batch_output = trained.decision_function(X=batch_X)
else:
batch_output = trained._predict(X=batch_X)
if isinstance(batch_output, tuple):
batch_out_X, batch_out_y = batch_output
else:
batch_out_X = batch_output
batch_out_y = None
if serialize:
output = lale.helpers.write_batch_output_to_file(
output,
os.path.join(
serialization_out_dir,
"fit_with_batches" + str(operator_idx) + ".hdf5",
),
len(inputs.dataset),
batch_idx,
batch_X,
batch_y,
batch_out_X,
batch_out_y,
)
else:
if batch_out_y is not None:
output = lale.helpers.append_batch(
output, (batch_output, batch_out_y)
)
else:
output = lale.helpers.append_batch(output, batch_output)
if serialize:
output.close() # type: ignore
output = lale.helpers.create_data_loader(
os.path.join(
serialization_out_dir,
"fit_with_batches" + str(operator_idx) + ".hdf5",
),
batch_size=inputs.batch_size,
)
else:
if isinstance(output, tuple):
output = lale.helpers.create_data_loader(
X=output[0], y=output[1], batch_size=inputs.batch_size
)
else:
output = lale.helpers.create_data_loader(
X=output, y=None, batch_size=inputs.batch_size
)
outputs[operator] = output
operator_idx += 1
return_data = outputs[self._steps[-1]].dataset.get_data()
if serialize:
shutil.rmtree(serialization_out_dir)
return return_data
def freeze_trainable(self) -> "TrainedPipeline":
result = super(TrainedPipeline, self).freeze_trainable()
return cast(TrainedPipeline, result)
def freeze_trained(self) -> "TrainedPipeline":
frozen_steps = []
frozen_map = {}
for liquid in self._steps:
frozen = liquid.freeze_trained()
frozen_map[liquid] = frozen
frozen_steps.append(frozen)
frozen_edges = [(frozen_map[x], frozen_map[y]) for x, y in self.edges()]
result = TrainedPipeline(
frozen_steps, frozen_edges, ordered=True, _lale_trained=True
)
assert result.is_frozen_trained()
return result
OperatorChoiceType = TypeVar("OperatorChoiceType", bound=Operator, covariant=True)
class OperatorChoice(PlannedOperator, Generic[OperatorChoiceType]):
_name: str
_steps: List[OperatorChoiceType]
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out: Dict[str, Any] = {}
out["steps"] = self._steps
out["name"] = self._name
indices: Dict[str, int] = {}
def make_indexed(name: str) -> str:
idx = 0
if name in indices:
idx = indices[name] + 1
indices[name] = idx
else:
indices[name] = 0
return make_indexed_name(name, idx)
if deep:
for op in self._steps:
name = make_indexed(op.name())
nested_params = op.get_params(deep=deep)
if nested_params:
out.update(nest_HPparams(name, nested_params))
return out
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
# TODO: enhance to support setting params of a choice without picking a choice
# TODO: also, enhance to support mutating it in place?
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place
this may not always be possible
"""
choices = self.steps()
choice_index: int
choice_params: Dict[str, Any]
if len(choices) == 1:
choice_index = 0
chosen_params = impl_params
else:
(choice_index, chosen_params) = partition_sklearn_choice_params(impl_params)
assert 0 <= choice_index and choice_index < len(choices)
choice: Operator = choices[choice_index]
new_step = choice._with_params(try_mutate, **chosen_params)
# in the functional case
# we remove the OperatorChoice, replacing it with the branch that was taken
# TODO: in the mutating case, we could update this choice
return new_step
def __init__(self, steps, name: Optional[str] = None) -> None:
if name is None or name == "":
name = lale.helpers.assignee_name(level=2)
if name is None or name == "":
name = "OperatorChoice"
self._name = name
self._steps = steps
def steps(self) -> List[OperatorChoiceType]:
return self._steps
def fit(self, X, y=None, **fit_params):
if len(self.steps()) == 1:
s = self.steps()[0]
if s is not None:
f = getattr(s, "fit", None)
if f is not None:
return f(X, y, **fit_params)
else:
self.__getattr__("fit")
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator imnplementations are compatible"""
if not isinstance(other, OperatorChoice):
return False
my_steps = self.steps()
other_steps = other.steps()
if len(my_steps) != len(other_steps):
return False
for (m, o) in zip(my_steps, other_steps):
if not m._has_same_impl(o):
return False
return True
def is_supervised(self) -> bool:
s = self.steps()
if len(s) == 0:
return False
return self.steps()[-1].is_supervised()
def validate_schema(self, X, y=None):
for step in self.steps():
step.validate_schema(X, y)
def transform_schema(self, s_X):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
else:
transformed_schemas = [st.transform_schema(s_X) for st in self.steps()]
result = lale.type_checking.join_schemas(*transformed_schemas)
return result
def input_schema_fit(self) -> JSON_TYPE:
pipeline_inputs = [s.input_schema_fit() for s in self.steps()]
result = lale.type_checking.join_schemas(*pipeline_inputs)
return result
def is_frozen_trainable(self) -> bool:
return all([step.is_frozen_trainable() for step in self.steps()])
def is_classifier(self) -> bool:
for op in self.steps():
if not op.is_classifier():
return False
return True
def get_defaults(self) -> Mapping[str, Any]:
defaults_list: Iterable[Mapping[str, Any]] = (
s.get_defaults() for s in self.steps()
)
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
class _PipelineFactory:
def __init__(self):
pass
def __call__(self, steps: List[Any]):
warnings.warn(
"lale.operators.Pipeline is deprecated, use sklearn.pipeline.Pipeline or lale.lib.sklearn.Pipeline instead",
DeprecationWarning,
)
for i in range(len(steps)):
op = steps[i]
if isinstance(op, tuple):
assert isinstance(op[1], Operator)
op[1]._set_name(op[0])
steps[i] = op[1]
return make_pipeline(*steps)
Pipeline = _PipelineFactory()
def _pipeline_graph_class(steps) -> Type[PlannedPipeline]:
isTrainable: bool = True
isTrained: bool = True
for operator in steps:
if not isinstance(operator, TrainedOperator):
isTrained = False # Even if a single step is not trained, the pipeline can't be used for predict/transform
# without training it first
if isinstance(operator, OperatorChoice) or not isinstance(
operator, TrainableOperator
):
isTrainable = False
if isTrained:
return TrainedPipeline
elif isTrainable:
return TrainablePipeline
else:
return PlannedPipeline
@overload
def make_pipeline_graph(
steps: List[TrainedOperator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> TrainedPipeline:
...
@overload
def make_pipeline_graph(
steps: List[TrainableOperator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> TrainablePipeline:
...
@overload
def make_pipeline_graph(
steps: List[Operator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> PlannedPipeline:
...
def make_pipeline_graph(steps, edges, ordered=False) -> PlannedPipeline:
"""
Based on the state of the steps, it is important to decide an appropriate type for
a new Pipeline. This method will decide the type, create a new Pipeline of that type and return it.
#TODO: If multiple independently trained components are composed together in a pipeline,
should it be of type TrainedPipeline?
Currently, it will be TrainablePipeline, i.e. it will be forced to train it again.
"""
pipeline_class = _pipeline_graph_class(steps)
if pipeline_class is TrainedPipeline:
return TrainedPipeline(steps, edges, ordered=ordered, _lale_trained=True)
else:
return pipeline_class(steps, edges, ordered=ordered)
@overload
def make_pipeline(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_pipeline(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_pipeline(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_pipeline(*orig_steps):
steps: List[Operator] = []
edges: List[Tuple[Operator, Operator]] = []
prev_op: Optional[Operator] = None
for curr_op in orig_steps:
if isinstance(prev_op, BasePipeline):
prev_leaves: List[Operator] = prev_op._find_sink_nodes()
else:
prev_leaves = [] if prev_op is None else [prev_op]
if isinstance(curr_op, BasePipeline):
curr_roots: List[Operator] = curr_op._find_source_nodes()
steps.extend(curr_op.steps())
edges.extend(curr_op.edges())
else:
if not isinstance(curr_op, Operator):
curr_op = make_operator(curr_op, name=curr_op.__class__.__name__)
curr_roots = [curr_op]
steps.append(curr_op)
edges.extend([(src, tgt) for src in prev_leaves for tgt in curr_roots])
prev_op = curr_op
return make_pipeline_graph(steps, edges, ordered=True)
@overload
def make_union_no_concat(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_union_no_concat(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_union_no_concat(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_union_no_concat(*orig_steps): # type: ignore
steps, edges = [], []
for curr_op in orig_steps:
if isinstance(curr_op, BasePipeline):
steps.extend(curr_op._steps)
edges.extend(curr_op.edges())
else:
if not isinstance(curr_op, Operator):
curr_op = make_operator(curr_op, name=curr_op.__class__.__name__)
steps.append(curr_op)
return make_pipeline_graph(steps, edges, ordered=True)
@overload
def make_union(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_union(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_union(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_union(*orig_steps): # type: ignore
from lale.lib.lale import ConcatFeatures
return make_union_no_concat(*orig_steps) >> ConcatFeatures()
def make_choice(
*orig_steps: Union[Operator, Any], name: Optional[str] = None
) -> OperatorChoice:
if name is None:
name = ""
name_: str = name # to make mypy happy
steps: List[Operator] = []
for operator in orig_steps:
if isinstance(operator, OperatorChoice):
steps.extend(operator.steps())
else:
if not isinstance(operator, Operator):
operator = make_operator(operator, name=operator.__class__.__name__)
steps.append(operator)
name_ = name_ + " | " + operator.name()
return OperatorChoice(steps, name_[3:])
def _fixup_hyperparams_dict(d):
d1 = remove_defaults_dict(d)
d2 = {k: lale.helpers.val_wrapper.unwrap(v) for k, v in d1.items()}
return d2
CustomizeOpType = TypeVar("CustomizeOpType", bound=IndividualOp)
def customize_schema(
op: CustomizeOpType,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, None] = None,
tags: Optional[Dict] = None,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> CustomizeOpType:
"""Return a new operator with a customized schema
Parameters
----------
schemas : Schema
A dictionary of json schemas for the operator. Override the entire schema and ignore other arguments
input : Schema
(or `input_*`) override the input schema for method `*`.
`input_*` must be an existing method (already defined in the schema for lale operators, existing method for external operators)
output : Schema
(or `output_*`) override the output schema for method `*`.
`output_*` must be an existing method (already defined in the schema for lale operators, existing method for external operators)
constraint : Schema
Add a constraint in JSON schema format.
relevantToOptimizer : String list
update the set parameters that will be optimized.
param : Schema
Override the schema of the hyperparameter.
`param` must be an existing parameter (already defined in the schema for lale operators, __init__ parameter for external operators)
tags : Dict
Override the tags of the operator.
Returns
-------
IndividualOp
Copy of the operator with a customized schema
"""
# TODO: why are we doing a deeopcopy here?
op = copy.deepcopy(op)
methods = ["fit", "transform", "predict", "predict_proba", "decision_function"]
# explicitly enable the hyperparams schema check because it is important
from lale.settings import (
disable_hyperparams_schema_validation,
set_disable_hyperparams_schema_validation,
)
existing_disable_hyperparams_schema_validation = (
disable_hyperparams_schema_validation
)
set_disable_hyperparams_schema_validation(False)
if schemas is not None:
schemas.schema["$schema"] = "http://json-schema.org/draft-04/schema#"
lale.type_checking.validate_is_schema(schemas.schema)
op._schemas = schemas.schema
else:
if relevantToOptimizer is not None:
assert isinstance(relevantToOptimizer, list)
op._schemas["properties"]["hyperparams"]["allOf"][0][
"relevantToOptimizer"
] = relevantToOptimizer
if constraint is not None:
if isinstance(constraint, Schema):
constraint = constraint.schema
op._schemas["properties"]["hyperparams"]["allOf"].append(constraint)
if tags is not None:
assert isinstance(tags, dict)
op._schemas["tags"] = tags
for arg in kwargs:
value = kwargs[arg]
if isinstance(value, Schema):
value = value.schema
if value is not None:
lale.type_checking.validate_is_schema(value)
if arg in [p + n for p in ["input_", "output_"] for n in methods]:
# multiple input types (e.g., fit, predict)
assert value is not None
lale.type_checking.validate_method(op, arg)
op._schemas["properties"][arg] = value
elif value is None:
scm = op._schemas["properties"]["hyperparams"]["allOf"][0]
scm["required"] = [k for k in scm["required"] if k != arg]
scm["relevantToOptimizer"] = [
k for k in scm["relevantToOptimizer"] if k != arg
]
scm["properties"] = {
k: scm["properties"][k] for k in scm["properties"] if k != arg
}
else:
op._schemas["properties"]["hyperparams"]["allOf"][0]["properties"][
arg
] = value
# since the schema has changed, we need to invalidate any
# cached enum attributes
op._invalidate_enum_attributes()
set_disable_hyperparams_schema_validation(
existing_disable_hyperparams_schema_validation
)
# we also need to prune the hyperparameter, if any, removing defaults (which may have changed)
op._hyperparams = op.hyperparams()
return op
CloneOpType = TypeVar("CloneOpType", bound=Operator)
def clone_op(op: CloneOpType, name: str = None) -> CloneOpType:
"""Clone any operator."""
from sklearn.base import clone
nop = clone(op)
if name:
nop._set_name(name)
return nop
def with_structured_params(
try_mutate: bool, k, params: Dict[str, Any], hyper_parent
) -> None:
# need to handle the different encoding schemes used
if params is None:
return
if structure_type_name in params:
# this is a structured type
structure_type = params[structure_type_name]
type_params, sub_params = partition_sklearn_params(params)
hyper = None
if isinstance(hyper_parent, dict):
hyper = hyper_parent.get(k, None)
elif isinstance(hyper_parent, list) and k < len(hyper_parent):
hyper = hyper_parent[k]
if hyper is None:
hyper = {}
elif isinstance(hyper, tuple):
# to make it mutable
hyper = list(hyper)
del type_params[structure_type_name]
actual_key: Union[str, int]
for elem_key, elem_value in type_params.items():
if elem_value is not None:
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
# we may need to extend the array
try:
hyper[actual_key] = elem_value
except IndexError:
assert 0 <= actual_key
hyper.extend((actual_key - len(hyper)) * [None])
hyper.append(elem_value)
else:
actual_key = elem_key
hyper[actual_key] = elem_value
for elem_key, elem_params in sub_params.items():
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
else:
actual_key = elem_key
with_structured_params(try_mutate, actual_key, elem_params, hyper)
if isinstance(hyper, dict) and is_numeric_structure(structure_type):
max_key = max(map(int, hyper.keys()))
hyper = [hyper.get(str(x), None) for x in range(max_key)]
if structure_type == "tuple":
hyper = tuple(hyper)
hyper_parent[k] = hyper
else:
# if it is not a structured parameter
# then it must be a nested higher order operator
sub_op = hyper_parent[k]
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
(disc, chosen_params) = partition_sklearn_choice_params(params)
assert 0 <= disc and disc < len(sub_op)
sub_op = sub_op[disc]
params = chosen_params
trainable_sub_op = sub_op._with_params(try_mutate, **params)
hyper_parent[k] = trainable_sub_op
|
# settings.py
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
ACCESS_TOKEN = os.environ.get("ACCESS_TOKEN")
ACCESS_SECRET = os.environ.get("ACCESS_SECRET")
BROKER_URL = os.environ.get("BROKER_URL")
|
from __future__ import absolute_import
from pants.build_graph.target import Target
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
class YoyoTarget(Target):
def __init__(self,
db_string=None,
prod_db_envvar='POSTGRES_URL',
payload=None, **kwargs):
payload = payload or Payload()
payload.add_fields({
"db_string": PrimitiveField(db_string),
"prod_db_envvar": PrimitiveField(prod_db_envvar)
})
super(YoyoTarget, self).__init__(payload=payload, **kwargs)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEcoEduKtSchoolinfoModifyModel import AlipayEcoEduKtSchoolinfoModifyModel
class AlipayEcoEduKtSchoolinfoModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEcoEduKtSchoolinfoModifyModel):
self._biz_content = value
else:
self._biz_content = AlipayEcoEduKtSchoolinfoModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.eco.edu.kt.schoolinfo.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
from numpy.random import random
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.layouts import column, widgetbox
from bokeh.models import Button, ColumnDataSource
from bokeh.server.server import Server
"""
create and run a demo bokeh app on a cloud server
"""
def run(doc):
fig = figure(title='random data', width=400, height=200, tools='pan,box_zoom,reset,save')
source = ColumnDataSource(data={'x': [], 'y': []})
fig.line('x', 'y', source=source)
def click(n=100):
source.data = {'x': range(n), 'y': random(n)}
button = Button(label='update', button_type='success')
button.on_click(click)
layout = column(widgetbox(button), fig)
doc.add_root(layout)
click()
# configure and run bokeh server
kws = {'port': 5100, 'prefix': '/bokeh', 'allow_websocket_origin': ['165.227.26.215']}
server = Server(run, **kws)
server.start()
if __name__ == '__main__':
server.io_loop.add_callback(server.show, '/')
server.io_loop.start()
|
#
# Copyright (c) 2019-2020 Google LLC. All Rights Reserved.
# Copyright (c) 2016-2018 Nest Labs Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# This file effects a Weave Data Language (WDL) validator that
# validates and enforces the nullable constraint.
#
"""Validate that only valid fields are nullable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gwv import schema
from gwv import validator
class NullableValidator(validator.VisitorValidator):
"""Validate that only valid fields are nullable."""
def visit_Field(self, field):
if field.is_nullable:
if field.is_map:
self.add_failure('Maps cannot be nullable', field)
elif field.is_array:
self.add_failure('Arrays cannot be nullable', field)
elif field.data_type == schema.Field.DataType.ENUM:
self.add_failure('Enums cannot be nullable', field)
process = NullableValidator.process
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from unittest.mock import MagicMock, patch
from ... import commands, configuration_monitor, project_files_monitor
from ...analysis_directory import AnalysisDirectory
from ...commands import restart
from .command_test import mock_arguments, mock_configuration
class RestartTest(unittest.TestCase):
@patch("{}.ProjectFilesMonitor".format(project_files_monitor.__name__))
@patch.object(restart, "Stop")
@patch.object(configuration_monitor.ConfigurationMonitor, "daemonize")
def test_restart(
self, _daemonize, commands_Stop, _daemonize_project_files_monitor
) -> None:
state = MagicMock()
state.running = ["."]
original_directory = "/original/directory"
arguments = mock_arguments()
arguments.terminal = False
configuration = mock_configuration()
analysis_directory = AnalysisDirectory(".")
with patch.object(restart, "Stop") as commands_Stop, patch.object(
restart, "Start"
) as commands_Start, patch.object(
restart, "Incremental"
) as commands_Incremental:
commands.Restart(
arguments, original_directory, configuration, analysis_directory
)._run()
commands_Stop.assert_called_with(
arguments, original_directory, configuration, analysis_directory
)
commands_Incremental.assert_called_with(
arguments, original_directory, configuration, analysis_directory
)
commands_Start.assert_not_called()
|
# -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where the key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
return f
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` or ``(response, headers)``
where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list or
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status_or_headers = headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status_or_headers)
headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status_or_headers is not None:
if isinstance(status_or_headers, string_types):
rv.status = status_or_headers
else:
rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:class:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treasurehunt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.tree import DecisionTreeRegressor
from ngboost import NGBClassifier
from ngboost.distns import k_categorical
if __name__ == "__main__":
# An example where the base learner is also searched over (this is how you would vary tree depth):
X, Y = load_breast_cancer(True)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
b1 = DecisionTreeRegressor(criterion="friedman_mse", max_depth=2)
b2 = DecisionTreeRegressor(criterion="friedman_mse", max_depth=4)
b3 = Ridge(alpha=0.0)
param_grid = {
"n_estimators": [20, 50],
"minibatch_frac": [1.0, 0.5],
"Base": [b1, b2],
}
ngb = NGBClassifier(natural_gradient=True, verbose=False, Dist=k_categorical(2))
grid_search = GridSearchCV(ngb, param_grid=param_grid, cv=5)
grid_search.fit(X_train, Y_train)
print(grid_search.best_params_)
|
'''
Train QSAR models
DESCRIPTION
This module holds functions for training QSAR models.
'''
# Imports
import pandas as pd
import numpy as np
import sklearn.preprocessing as skp
import sklearn.decomposition as skd
import sklearn.ensemble as ske
import sklearn.model_selection as skm
import sklearn.neural_network as skn
import sklearn.metrics as skmet
import imblearn.over_sampling as imbl_over
import imblearn.combine as imbl_comb
from ..Validation.appDom import ad_pdf_normal
from ..Model import c_knnra as knnra
# Functions
def plotPrediction(Y_Train,Y_Train_Pred,Y_Test,Y_pred):
'''
Plot measured vs. prediction.
'''
# Imports
import matplotlib.pyplot as plt
import matplotlib as mpl
# Plotting parameters
mpl.rcParams['font.size'] = 16
# Variables
if (len(Y_Test) != 0):
x_min = min([min(Y_Test),min(Y_pred),min(Y_Train),min(Y_Train_Pred)])
x_max = max([max(Y_Test),max(Y_pred),max(Y_Train),max(Y_Train_Pred)])
else:
x_min = min([min(Y_Train),min(Y_Train_Pred)])
x_max = max([max(Y_Train),max(Y_Train_Pred)])
x_min -= 0.05*(x_max-x_min)
x_max += 0.05*(x_max-x_min)
xVals = np.linspace(x_min,x_max,1000)
plt.scatter(Y_Train_Pred,Y_Train,label='Training',color="#003fa0")
if (len(Y_Test) != 0):
plt.scatter(Y_pred,Y_Test,label='Testing',color="#b42f21",marker='^')
plt.plot(xVals,xVals,lw=3,color='k')
plt.xlim([x_min,x_max])
plt.ylim([x_min,x_max])
plt.ylabel(r'Measured Log$_{10}$ RD50 (ppm)')
plt.xlabel(r'Predicted Log$_{10}$ RD50 (ppm)')
plt.legend(loc=0)
plt.tight_layout()
plt.savefig('Shifted_Regression.pdf',dpi=1000,format='pdf')
plt.show()
def model_test(TestDF,modelFit):
'''
Test data against a model fit.
INPUT
TestDF: (pandas Dataframe) Dataframe containing testing data.
modelFit: (model) Class containing the model which has already been fit.
OUTPUT
NOTES
- The modelFit variable must be a class containing a 'predict' function similar to ScikitLearn model classes.
'''
# Variables
TestDF_cpy = TestDF.copy()
# Prepare data for prediction
X_Test = (TestDF_cpy.iloc[:,1:]).values
# Predict
Y_Pred = modelFit.predict(X_Test)
return Y_Pred,X_Test
def model_nn_reg(TrainDF,TestDF):
'''
Train regression model using neural network.
INPUT
TrainDF: (pandas Data Frame) Training data.
TestDF: (pandas Data Frame) Testing data.
OUTPUT
outDF: (pandas Data Frame) Dataframe containing predicted values.
NOTES
Input dataframes should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
'''
# Variables
TrainDF_cpy = TrainDF.copy()
TestDF_cpy = TestDF.copy()
testBool = True
# Only use test sets with data
if ((TestDF.shape)[0] == 0):
testBool = False
# Get numpy arrays for the activity and descriptors
X_Train = (TrainDF_cpy.iloc[:,1:]).values
Y_Train = (TrainDF_cpy.iloc[:,0]).values
# Only initialize for test sets with data
if (testBool):
X_Test = (TestDF_cpy.iloc[:,1:]).values
Y_Test = (TestDF_cpy.iloc[:,0]).values
else:
X_Test = []
Y_Test = []
# Set number of hidden layers
numLayers = (X_Train.shape)[1]
# Initialize neural network
mlreg = skn.MLPRegressor(hidden_layer_sizes=(numLayers,),
alpha=0.0001,
batch_size='auto',
learning_rate='constant',
learning_rate_init=0.01,
solver='lbfgs')
bagReg = ske.BaggingRegressor(base_estimator=mlreg,
n_estimators=100,
n_jobs=7,
random_state=None,
warm_start=False,
bootstrap=True,
oob_score=True)
# Fitting
print("Fitting...")
bagReg.fit(X_Train,Y_Train)
Y_Train_Pred = bagReg.predict(X_Train)
score_train = skmet.r2_score(Y_Train,Y_Train_Pred)
print("Training: " + str(score_train))
if (testBool):
Y_Pred = bagReg.predict(X_Test)
score_test = skmet.r2_score(Y_Test,Y_Pred)
print("Testing: " + str(score_test))
else:
Y_Pred = []
# Plot training
oob_score = bagReg.oob_score_
print("OOB Score: " + str(oob_score))
# Plot the results
print('Plotting...')
plotPrediction(Y_Train,Y_Train_Pred,Y_Test,Y_Pred)
return Y_Train_Pred,Y_Train,Y_Pred,Y_Test,bagReg
def model_rf_reg(TrainDF,TestDF):
'''
Train regression model using random forest.
INPUT
TrainDF: (pandas Data Frame) Training data.
TestDF: (pandas Data Frame) Testing data.
OUTPUT
outDF: (pandas Data Frame) Dataframe containing predicted values.
NOTES
Input dataframes should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
'''
# Variables
TrainDF_cpy = TrainDF.copy()
TestDF_cpy = TestDF.copy()
testBool = True
# Only use test sets with data
if ((TestDF.shape)[0] == 0):
testBool = False
# Get numpy arrays for the activity and descriptors
X_Train = (TrainDF_cpy.iloc[:,1:]).values
Y_Train = (TrainDF_cpy.iloc[:,0]).values
# Only initialize for test sets with data
if (testBool):
X_Test = (TestDF_cpy.iloc[:,1:]).values
Y_Test = (TestDF_cpy.iloc[:,0]).values
else:
X_Test = []
Y_Test = []
# Modeling
reg_rf = ske.RandomForestRegressor(random_state=42,
n_estimators=1000,
max_features='auto',
min_samples_split=2,
bootstrap=True,
oob_score=True)
print(reg_rf)
# Fitting
print("Fitting...")
reg_rf.fit(X_Train,Y_Train)
Y_Train_Pred = reg_rf.predict(X_Train)
score_train = skmet.r2_score(Y_Train,Y_Train_Pred)
print("Training: " + str(score_train))
if (testBool):
Y_Pred = reg_rf.predict(X_Test)
score_test = skmet.r2_score(Y_Test,Y_Pred)
print("Testing: " + str(score_test))
else:
Y_Pred = []
# Plot training
oob_score = reg_rf.oob_score_
print("OOB Score: " + str(oob_score))
# Print importances
#cols = TrainDF_cpy.columns.values
#for index,importance in enumerate(rfreg.feature_importances_):
# print(cols[index+1] + ' : ' + str(importance))
# Plot the results
print('Plotting...')
plotPrediction(Y_Train,Y_Train_Pred,Y_Test,Y_Pred)
'''
feature_import = zip(TrainDF_cpy.columns.values[1:],reg_rf.feature_importances_)
feature_import = sorted(feature_import,key=lambda x:x[1])
feature_import = list(reversed(feature_import))
for val in feature_import:
print(val)
'''
return Y_Train_Pred,Y_Train,Y_Pred,Y_Test,reg_rf
def model_rf_class(TrainDF,TestDF):
'''
Train classification model using random forest.
INPUT
TrainDF: (pandas Data Frame) Training data.
TestingDF: (pandas Data Frame) Testing data.
OUTPUT
outDF: (pandas Data Frame) Dataframe containing predicted values.
NOTES
Input dataframes should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
'''
# Variables
TrainDF_cpy = TrainDF.copy()
TestDF_cpy = TestDF.copy()
testBool = True
# Only use test sets with data
if ((TestDF.shape)[0] == 0):
testBool = False
# Get numpy arrays for the activity and descriptors
X_Train = (TrainDF_cpy.iloc[:,1:]).values
Y_Train = (TrainDF_cpy.iloc[:,0]).values
# Only initialize for test sets with data
if (testBool):
X_Test = (TestDF_cpy.iloc[:,1:]).values
Y_Test = (TestDF_cpy.iloc[:,0]).values
else:
X_Test = []
Y_Test = []
# Set up model
class_RF = ske.RandomForestClassifier(random_state=42,
n_estimators=1000,
max_features='auto',
min_samples_split=2,
oob_score=True,
class_weight='balanced')
# Fitting
print("Fitting...")
class_RF.fit(X_Train,Y_Train)
Y_Train_Pred = class_RF.predict(X_Train)
print("Confusion Matrix - Training:")
print(set(Y_Train))
print(skmet.confusion_matrix(Y_Train,Y_Train_Pred))
if (testBool):
Y_Pred = class_RF.predict(X_Test)
print("Confusion Matrix - Testing:")
print(set(Y_Test))
print(skmet.confusion_matrix(Y_Test,Y_Pred))
else:
Y_Pred = []
# Output training statistics
print("OOB Score (Q^2): " + str(class_RF.oob_score_))
return Y_Train_Pred,Y_Train,Y_Pred,Y_Test,class_RF
def model_rf_class_DEBUG(inDF):
'''
Train classification model using random forest.
INPUT
inDF: (pandas Data Frame) Input dataframe should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
OUTPUT
outDF: (pandas Data Frame) Single column dataframe containing predicted values.
'''
# Variables
modelDF = inDF.copy()
#print(modelDF.max())
# Normalize descriptors
normDesc = skp.normalize(modelDF.iloc[:,1:])
# Dimensionality reduction
pca = skd.PCA(n_components=10)
#X = pca.fit_transform(normDesc)
X = normDesc
Y = (modelDF.iloc[:,0]).values
# Split into testing and training sets
X_Train, X_Test, Y_Train, Y_Test = skm.train_test_split(X,Y,
test_size=0.25,
random_state=42,
stratify=None)
# Check applicability domain
print('Checking applicability domain...')
TrainDF = pd.DataFrame(np.hstack((np.matrix(Y_Train).T,X_Train)))
TestDF = pd.DataFrame(np.hstack((np.matrix(Y_Test).T,X_Test)))
TestDF = ad_pdf_normal(TestDF,TrainDF)
X_Test = (TestDF.values)[:,1:]
Y_Test = (TestDF.values)[:,0]
# Modeling
rfclass = ske.RandomForestClassifier(random_state=42,
n_estimators=1000,
max_features='auto',
min_samples_split=2,
oob_score=True,
class_weight='balanced')
# Fitting
rfclass.fit(X_Train,Y_Train)
print("Fitting...")
#Y_Train_pred = skm.cross_val_predict(rfClass,X_Train,Y_Train,cv=100)
Y_pred = rfclass.predict(X_Test)
Y_Train_Pred = rfclass.predict(X_Train)
# Plot training
score_train = skmet.r2_score(Y_Train,Y_Train_Pred)
score_test = skmet.r2_score(Y_Test,Y_pred)
oob_score = rfclass.oob_score_
print("Training: " + str(score_train))
print("Testing: " + str(score_test))
print("OOB Score: " + str(oob_score))
# Compute confusion matrix
print("Confusion Matrix - Training:")
print(set(Y_Train))
print(skmet.confusion_matrix(Y_Train,Y_Train_Pred))
print("Confusion Matrix - Testing:")
print(set(Y_Test))
print(skmet.confusion_matrix(Y_Test,Y_pred))
# Print importances
#cols = modelDF.columns.values
#for index,importance in enumerate(rfreg.feature_importances_):
#print(cols[index+1] + ' : ' + str(importance))
#plotPrediction(Y_Train,Y_Train_Pred,Y_Test,Y_pred)
return modelDF
def train_nn_reg_DEBUG(inDF):
'''
Train regression model using a neural network.
INPUT
inDF: (pandas Data Frame) Input dataframe should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
OUTPUT
outDF: (pandas Data Frame) Single column dataframe containing predicted values.
'''
# Variables
modelDF = inDF.copy()
numLayers = 100
# Normalize descriptors
normDesc = skp.normalize(modelDF.iloc[:,1:])
# Set number of hidden layers
numLayers = (normDesc.shape)[1]
# Dimensionality reduction
pca = skd.PCA(n_components=10)
#X = pca.fit_transform(normDesc)
X = normDesc
Y = (modelDF.iloc[:,0]).values
# Split into testing and training sets
X_Train, X_Test, Y_Train, Y_Test = skm.train_test_split(X,Y,test_size=0.05,random_state=None)
# Initialize neural network
mlreg = skn.MLPRegressor(hidden_layer_sizes=(numLayers,),
alpha=0.0001,
batch_size='auto',
learning_rate='constant',
learning_rate_init=0.01,
solver='lbfgs')
bagReg = ske.BaggingRegressor(base_estimator=mlreg,
n_estimators=100,
n_jobs=7,
random_state=None,
warm_start=True,
bootstrap=True)
# Train
#mlreg.fit(X_Train,Y_Train)
bagReg.fit(X_Train,Y_Train)
# Test
Y_pred = bagReg.predict(X_Test)
Y_Train_Pred = bagReg.predict(X_Train)
#Y_pred = mlreg.predict(X_Test)
#Y_Train_Pred = mlreg.predict(X_Train)
# Plot training
score_train = skmet.r2_score(Y_Train,Y_Train_Pred)
score_test = skmet.r2_score(Y_Test,Y_pred)
print("Training: " + str(score_train))
print("Testing: " + str(score_test))
#print("Training: " + str(bagReg.score(X_Train,Y_Train)))
#print("Testing: " + str(bagReg.score(X_Test,Y_Test)))
plotPrediction(Y_Train,Y_Train_Pred,Y_Test,Y_pred)
return modelDF
def consensus_sampling_class(X_Train,Y_Train,X_Test,Y_Test,model,samplingList):
'''
Consensus sampling technique. The idea is to use multiple models to give a better prediction for classification.
INPUT
X_Train: (numpy array) Training features.
Y_Train: (numpy array) Training labels.
X_Test: (numpy array) Testing features.
Y_Test: (numpy array) Testing labels.
model: (scikitlearn model) Model to use for fitting.
samplingList: (list of imblearn sampling methods) Sampling methods to use.
OUTPUT
Y_Pred: (numpy array) Predicted testing labels.
cat_stat: (list of floats) Category statistics.
'''
# Import
import copy
# Variables
weightList = []
Y_Testing_Pred_List = []
Y_Train_C = copy.deepcopy(Y_Train)
Y_Test_C = copy.deepcopy(Y_Test)
Y_Pred = np.zeros(len(Y_Test))
numClasses = len(set(Y_Train))
weightMatrix = np.zeros((len(Y_Test),numClasses))
# Set lowest class to 0
lowClass = min(set(Y_Train_C))
for index in range(len(Y_Train_C)):
Y_Train_C[index] = Y_Train_C[index]-lowClass
for index in range(len(Y_Test_C)):
Y_Test_C[index] = Y_Test_C[index]-lowClass
print('Consensus Sampling...')
# Loop over sampling methods
for smplNum,sampleMethod in enumerate(samplingList):
print("Sampling Method: " + str(smplNum))
# Initialize weight array
weights = np.zeros(numClasses)
if (sampleMethod == ''):
X_Train_Over,Y_Train_Over = X_Train,Y_Train_C
else:
# Fit and sample data
X_Train_Over,Y_Train_Over = sampleMethod.fit_sample(X_Train,Y_Train_C)
# Training
model.fit(X_Train_Over,Y_Train_Over)
# Prediction
#Y_Train_Pred = model.predict(X_Train_Over)
Y_Test_Pred = model.predict(X_Test)
# Calculate confusion matrix
conMat_Test = skmet.confusion_matrix(Y_Test_C,Y_Test_Pred)
print(conMat_Test)
# Calculate weights
for index in range(numClasses):
# Correct classification
weights[index] += conMat_Test[index][index]/np.sum(conMat_Test[index])
# Add results to appropriate lists
weightList.append(weights)
Y_Testing_Pred_List.append(Y_Test_Pred)
# Determine category for each compound from consensus prediction
print('Determining Categories...')
for sampleNum in range(len(samplingList)):
# Loop over testing compounds
for compNum in range(len(Y_Test_C)):
classIdx = Y_Testing_Pred_List[sampleNum][compNum]
weightVal = weightList[sampleNum][classIdx]
weightMatrix[compNum,classIdx] += weightVal
print(weightMatrix)
# Find prediction from consensus
print('Find Prediction...')
for compNum in range(len(Y_Test_C)):
Y_Pred[compNum] = np.argmax(weightMatrix[compNum,:])
# Show confusion matrix
confMat_Pred = skmet.confusion_matrix(Y_Test_C,Y_Pred)
print('Confusion Matrix - Testing')
print(set(Y_Test_C))
print(confMat_Pred)
# Caclulate statistics
TPR = []
accuracy = []
totalCmpds = 0
correctCmpds = 0
for classNum in range(numClasses):
correctCmpds += confMat_Pred[classNum,classNum]
totalCmpds += np.sum(confMat_Pred[classNum])
TPR.append(confMat_Pred[classNum,classNum]/np.sum(confMat_Pred[classNum]))
print("Accuracy: " + str(1.0*correctCmpds/totalCmpds))
print("TPR:")
print(TPR)
return Y_Pred
def ROC(X_Train, X_Test, Y_Train, Y_Test):
'''
Generate ROC diagram. This method relies on the automatic generation of weights to fill the parameter space.
'''
# Import
import matplotlib.pyplot as plt
# Variables
coordList = []
metric = lambda x: x
weights_0 = metric(np.linspace(0.0,1e-5,30))
weights_1 = 1-weights_0
dx = 0.00000001/10.0
#weights_0 = np.linspace(0,0.00000001,10)
#weights_0 = np.concatenate([weights_0, np.linspace(0.00000001+dx,0.0000001,10)])
#weights_0 = np.concatenate([weights_0, np.linspace(0.0000001+dx,0.000001,10)])
#weights_1 = 1-weights_0
weights = list(zip(weights_0,weights_1))
for index in range(len(weights)):
# Determine weight class
w = {0:weights[index][0],1:weights[index][1]}
print(w)
# Model
rfClass = ske.RandomForestClassifier(random_state=None,
n_estimators=1000,
max_features='auto',
min_samples_split=2,
oob_score=True,
class_weight=w,
n_jobs=7)
# Fit
rfClass.fit(X_Train,Y_Train)
# Prediction
Y_Pred = rfClass.predict(X_Test)
# Calculate confusion matrix
conMat_Test = skmet.confusion_matrix(Y_Test,Y_Pred)
# Calculate statistics
TPR = conMat_Test[1][1]/np.sum(conMat_Test[1])
FPR = conMat_Test[0][1]/np.sum(conMat_Test[0])
precision = conMat_Test[1][1]/(conMat_Test[0][1]+conMat_Test[1][1])
coordList.append((precision,TPR))
print((precision,TPR))
# Plot
xLine = np.linspace(0,1,1000)
coordList = np.asarray(coordList)
x,y = coordList.transpose()
#print(coordList)
#print('')
#print(x)
#print('')
#print(y)
#plt.plot(xLine,xLine,color='k')
plt.scatter(x,y)
plt.xlim([0,1])
plt.ylim([0,1])
plt.show()
def ModelTesting(inDF):
'''
Testing method.
!!!DEVELOPMENT ONLY!!!
'''
print('MODEL TESTING...')
# Variables
modelDF = inDF.copy()
#print(modelDF.max())
# Normalize descriptors
normDesc = skp.normalize(modelDF.iloc[:,1:])
# Dimensionality reduction
X = normDesc
Y = (modelDF.iloc[:,0]).values
# Split into testing and training sets
X_Train, X_Test, Y_Train, Y_Test = skm.train_test_split(X,Y,
test_size=0.25,
random_state=None,
stratify=None)
# ROC
#ROC(X_Train, X_Test, Y_Train, Y_Test)
# Modeling
rfclass = ske.RandomForestClassifier(random_state=None,
n_estimators=1000,
max_features='auto',
min_samples_split=2,
oob_score=True,
class_weight=None,
n_jobs=7)
# Set up sampling list
samplingList = []
# SMOTE oversampling
smote = imbl_over.SMOTE(random_state=None,
kind='borderline2',
k_neighbors=5,
m_neighbors=10,
n_jobs=7)
samplingList.append(smote)
smote = imbl_over.SMOTE(random_state=None,
kind='borderline1',
k_neighbors=5,
m_neighbors=10,
n_jobs=7)
samplingList.append(smote)
smote = imbl_over.SMOTE(random_state=None,
kind='regular',
k_neighbors=5,
m_neighbors=10,
n_jobs=7)
samplingList.append(smote)
# SMOTEENN
smote = imbl_over.SMOTE(random_state=None,
kind='borderline2',
k_neighbors=5,
m_neighbors=10,
n_jobs=7)
smoteenn = imbl_comb.SMOTEENN(smote=smote)
samplingList.append(smoteenn)
# SMOTE Tomek
smote = imbl_over.SMOTE(random_state=None,
kind='borderline2',
k_neighbors=5,
m_neighbors=10,
n_jobs=7)
smotetomek = imbl_comb.SMOTETomek(smote=smote)
samplingList.append(smotetomek)
# Consensus sampling
consensus_sampling_class(X_Train,Y_Train,X_Test,Y_Test,rfclass,samplingList)
def model_knnra_reg(TrainDF,TestDF,knn=3):
'''
Determine activities using k-nearest neighbors read across.
INPUT
TrainDF: (pandas Data Frame) Training data.
TestDF: (pandas Data Frame) Testing data.
knn: (int) Number of nearest neighbors to use.
OUTPUT
Y_Test: (numpy array) Numpy array containing measured values.
Y_Test_Pred: (numpy array) Numpy array containing predicted values.
NOTES
Input dataframes should be structured such that the activity is located in the first column and descriptors/features in all remaining columns.
REFERENCES
Willett, Peter, John M. Barnard, and Geoffrey M. Downs. "Chemical similarity searching." Journal of chemical information and computer sciences 38.6 (1998): 983-996.
'''
# Variables
TrainDF_cpy = TrainDF.copy()
TestDF_cpy = TestDF.copy()
testBool = True
# Only use test sets with data
if ((TestDF.shape)[0] == 0):
testBool = False
# Get numpy arrays for the activity and descriptors
X_Train = (TrainDF_cpy.iloc[:,1:]).values
Y_Train = (TrainDF_cpy.iloc[:,0]).values
# Only initialize for test sets with data
if (testBool):
X_Test = (TestDF_cpy.iloc[:,1:]).values
Y_Test = (TestDF_cpy.iloc[:,0]).values
else:
X_Test = []
Y_Test = []
# Set up model
class_knnra = knnra.knnRARegressor(knn=knn)
# Fitting
print("Fitting...")
class_knnra.fit(TrainDF_cpy)
if (testBool):
Y_Pred = class_RF.predict(TestDF_cpy)
print("Confusion Matrix - Testing:")
print(set(Y_Test))
print(skmet.confusion_matrix(Y_Test,Y_Pred))
else:
Y_Pred = []
return Y_Train,Y_Pred,Y_Test,class_knnra
# Main
if (__name__ == '__main__'):
pass
|
# coding=utf-8
__author__ = 'stefano'
import logging
from django.core.management.base import BaseCommand
from django.db.transaction import set_autocommit, commit
from openaid.projects.models import Initiative, Project, Activity
# This one-time procedure get data from projects and transfer it to Initiative.
# for the logic of this mapping look for "Mappatura Project - initiative" on Google drive
class Command(BaseCommand):
help = 'Fills Initiative selected fields with data from the most recent Project.' \
' Plus links Reports, Problems, Doc, Photos objs to Initiative'
logger = logging.getLogger('openaid')
# maps the field name between project (keys) and initiative (value)
field_map = {
'description_it': 'description_temp_it',
'description_en': 'description_temp_en',
'recipient': 'recipient_temp',
'outcome_it': 'outcome_temp_it',
'outcome_en': 'outcome_temp_en',
'beneficiaries_it': 'beneficiaries_temp_it',
'beneficiaries_en': 'beneficiaries_temp_en',
'beneficiaries_female': 'beneficiaries_female_temp',
'status': 'status_temp',
'is_suspended': 'is_suspended_temp',
'other_financiers_it': 'other_financiers_temp_it',
'other_financiers_en': 'other_financiers_temp_en',
'counterpart_authority_it': 'counterpart_authority_temp_it',
'counterpart_authority_en': 'counterpart_authority_temp_en',
'email': 'email_temp',
'location_it': 'location_temp_it',
'location_en': 'location_temp_en',
'sector': 'purpose_temp',
}
status_order = ['100','75','50','25','0','-']
def get_most_advanced_status(self, project_set):
# gets more advanced status in the project set
for status in self.status_order:
if project_set.filter(status=status).count() > 0:
return status
return '-'
def update_fields(self, initiative):
project_set = Project.objects.filter(initiative=initiative).order_by('-last_update')
if project_set.count() == 0:
return initiative
# gets the project with last update most recent
project_last_update = project_set[0]
project_last_activity = project_set[0]
activity_set = Activity.objects.filter(project__initiative=initiative).order_by('-year')
#gets project with most recent activity connected
if activity_set.count() > 0:
project_last_activity_pk = Activity.objects.filter(project__initiative=initiative).order_by('-year').values_list('project',flat=True)[0]
project_last_activity = Project.objects.get(pk=project_last_activity_pk)
# loops on every field that has to be updated and updates if the conditions apply
for project_fieldname, initiative_fieldname in self.field_map.iteritems():
if project_fieldname == 'sector':
field_value = getattr(project_last_activity, project_fieldname)
if field_value is not None:
# only consider sector value that are LEAF nodes, no children
if field_value.get_children().count() != 0:
self.logger.error("Initiative:{}. Cannot copy SECTOR VALUE: {} from Project, this Sector is not a leaf node! SKIP".format(initiative,field_value))
continue
if project_fieldname == 'recipient':
field_value = getattr(project_last_activity, project_fieldname)
elif project_fieldname == 'status':
field_value = self.get_most_advanced_status(project_set)
# STATUS: if the proj.status is == 100 => Almost completed
# translates the value to 90 for Almost completed in Initiative
# because in Initiative there is a status for "COMPLETED' which has value=100
if field_value == '100':
field_value = '90'
else:
field_value = getattr(project_last_update, project_fieldname)
if field_value is not None:
initiative.__setattr__(initiative_fieldname, field_value)
return initiative
def update_related_objects(self, initiative):
# updates documents and photo set getting the photos and docs from the projects
initiative.document_set = initiative.documents()
initiative.photo_set = initiative.photos()
# updates reports and problems with initiative link (project link will later be removed by migrations)
for r in initiative.reports():
r.initiative = initiative
r.save()
for prob in initiative.problems():
prob.initiative = initiative
prob.save()
return initiative
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
set_autocommit(False)
self.logger.info(u"Start procedure")
for index, initiative in enumerate(Initiative.objects.all().order_by('code')):
self.logger.debug(u"Update Initiative:'{}'".format(initiative))
initiative = self.update_fields(initiative)
initiative = self.update_related_objects(initiative)
initiative.save()
# commits every N initiatives
if index % 500 == 0:
self.logger.info(u"Reached Initiative:'{}'".format(initiative))
commit()
# final commit
commit()
set_autocommit(True)
self.logger.info(u"Finished updating {} initiatives".format(Initiative.objects.all().count()))
|
# Copyright 2022 Deep Learning on Flink Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import Callable
import tensorflow as tf
from dl_on_flink_tensorflow.tensorflow_context import TFContext
from dl_on_flink_tensorflow.tensorflow_on_flink_ops import FlinkStreamDataSet
logger = logging.getLogger(__file__)
class PrintLayer(tf.keras.layers.Layer):
def __init__(self, log_id, *xargs, **kwargs):
super().__init__(*xargs, **kwargs)
self.log_id = log_id
def call(self, inputs, **kwargs):
tf.print(self.log_id, inputs)
return inputs
def train(node_id, dataset_provider: Callable[[], tf.data.Dataset],
epochs=sys.maxsize):
model = tf.keras.Sequential([PrintLayer(node_id)])
model.compile()
model.fit(dataset_provider(), epochs=epochs)
def map_func(context):
context: TFContext = TFContext(context)
dataset: FlinkStreamDataSet = context.get_tfdataset_from_flink()
train(f"{context.get_node_type()}:{context.get_index()}", lambda: dataset,
epochs=20)
|
import sys
import os
import glob
root_path = os.path.join(os.getcwd(), 'kinect_leap_dataset', 'acquisitions')
p_id = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14']
g_id = ['G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10']
dest_path = os.path.join(os.getcwd(), 'rgb_kinect_leap_dataset')
if not os.path.exists(dst_train):
os.makedirs(dest_path)
for p in p_id:
for g in g_id:
path = os.path.join(root_path, p, g)
src_image_names = glob.glob(os.path.join(path, '*rgb.png'))
for i in range(len(src_image_names)):
os.rename(src_image_names[i], os.path.join(dest_path, p + '_' + g + '_' + str(i) + '.png'))
|
from django.core.management.base import BaseCommand
from ...models import Deal
import requests
def make_records(deals_data):
for deal in deals_data:
Deal.objects.update_or_create(
dealID = deal['dealID'],
defaults = {
'title': deal['title'],
'storeID': deal['storeID'],
'salePrice': deal['salePrice'],
'normalPrice': deal['normalPrice'],
'savings': deal['savings'],
'steamRatingText': deal['steamRatingText'],
'releaseDate': deal['releaseDate'],
'dealRating': deal['dealRating'],
'thumb': deal['thumb']
}
)
API_ENDPOINT = 'https://www.cheapshark.com/api/1.0/deals?storeID=1,7,11&sortBy=recent&pageSize=16'
class Command(BaseCommand):
def handle(self, *args, **options):
try:
response = requests.get(API_ENDPOINT)
if response.status_code != 200:
# raise Exception("Cannot fetch deals from external api")
self.stdout.write(self.style.ERROR("Cannot fetch deals from external api."))
return
deals_data = response.json()
make_records(deals_data=deals_data)
self.stdout.write(self.style.SUCCESS("Successfully added new deals."))
return
except:
self.stdout.write(self.style.ERROR("An error occurred. Deals not saved."))
return
|
'''
BINARY TREE DISPLAY
Description: A utility help visualize binary trees by using ASCII text.
Author: Thanh Trung Nguyen
thanh.it1995 (at) gmail.com
License: 3-Clause BSD License
'''
from .parsingnode import ParsingNode
from .valueutil import ValueUtil
from .matrixbuffer import MatrixBuffer
from .displayparser import DisplayParser
#
#
class BinTreeDisplay:
'''
Binary tree display. A utility help visualize binary trees by using ASCII text.
'''
#
#
#################################################################
# METHODS (PUBLIC)
#################################################################
#
#
def __init__(self):
self._vutil = ValueUtil()
self._parser = DisplayParser(self._vutil)
self._buffer = None
self._margin_left = 0
self.config(struct_node=('key', 'left', 'right'))
#
#
def get(self, inp_root) -> str:
'''
Gets ASCII display string of tree. Output result can be configured by calling method "config".
Args:
inp_root: Input root of tree.
Returns:
String result. If inp_root is None, return an empty string.
'''
if inp_root is None:
return ''
self._process(inp_root)
assert self._buffer is not None
res = self._buffer.get_str()
self._buffer = None
return res
#
#
def get_lst_rows(self, inp_root) -> list:
'''
Gets ASCII display string of tree. Output result can be configured by calling method "config".
Args:
inp_root: Input root of tree.
Returns:
List of rows. Each row is a string. If inp_root is None, return an empty list.
'''
if inp_root is None:
return []
self._process(inp_root)
assert self._buffer is not None
res = self._buffer.get_lst_rows()
self._buffer = None
return res
#
#
def config(self, struct_node: tuple = None, line_char: str = '-', line_brsp: int = 1, margin_left: int = 0, float_pre: int = 2):
'''
Configures settings.
Args:
struct_node: Structure information of input node. This is a tuple which comprises 3 elemenets:
(name_key, name_left_child, name_right_child)
line_char: Display character for the horizontal line connecting left-right branches.
line_brsp: Branch spacing value for the horizontal line connecting left-right branches.
margin_left: Left margin of output string result.
float_pre: Maximum precision of floating-point numbers when displays.
'''
if struct_node is not None:
if type(struct_node) is not tuple or len(struct_node) != 3:
raise ValueError('Invalid argument: struct_node must be a tuple of 3 elements')
if type(margin_left) is not int or margin_left < 0:
raise ValueError('Invalid argument: margin_left must be a non-negative integer')
if type(line_char) is not str or len(line_char) != 1:
raise ValueError('Invalid argument: line_char must be a string of length 1')
if type(line_brsp) is not int or line_brsp < 1:
raise ValueError('Invalid argument: line_brsp must be a positive integer')
if type(float_pre) is not int or float_pre < 0:
raise ValueError('Invalid argument: float_pre must be a non-negative integer')
# Finish arguments validation
if struct_node is not None:
self._parser.config_struct_input_node(struct_node[0], struct_node[1], struct_node[2])
self._parser.config_line(line_char, line_brsp)
self._vutil.set_float_precision(float_pre)
self._margin_left = margin_left
#
#
#################################################################
# METHODS (PROTECTED)
#################################################################
#
#
def _process(self, inp_root):
'''
Backend function for "get" method.
'''
height_inp_root = self._parser.get_height(inp_root)
height = height_inp_root * 3 - 2
parsing_tree = self._parser.build_tree(inp_root)
self._buffer = MatrixBuffer(parsing_tree.width + self._margin_left, height)
self._fill_buffer(parsing_tree, 1, self._margin_left)
self._parser.destroy_tree(parsing_tree)
#
#
def _fill_buffer(self, node: ParsingNode, depth: int, margin_global: int):
if node is None:
return
margin_key = margin_global + node.margin_key
margin_left = margin_global + node.margin_left_child
margin_right = margin_global + node.margin_right_child
margin_global_right = margin_key + 1 + node.size_right_line
self._buffer.fill(margin_key, depth * 3 - 3, node.key)
if node.left is not None or node.right is not None:
self._buffer.fill(margin_key, depth * 3 - 2, '|')
if node.left is not None:
self._fill_line('left', node.left.key, depth * 3 - 1, margin_left, margin_key)
self._fill_buffer(node.left, depth + 1, margin_global)
if node.right is not None:
self._fill_line('right', node.right.key, depth * 3 - 1, margin_key, margin_right)
self._fill_buffer(node.right, depth + 1, margin_global_right)
#
#
def _fill_line(self, direction: str, child_key: str, y: int, margin_a: int, margin_b: int):
if direction not in ('left', 'right'):
raise ValueError('Invalid argument: direction')
if direction == 'left':
margin_a += len(child_key) - 1
self._buffer.fill_line(self._parser.line_char, y, margin_a, margin_b)
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for `gcloud network-management`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.core import exceptions
class NetworkManagementError(exceptions.Error):
"""Top-level exception for all Network Management errors."""
class InvalidInputError(NetworkManagementError):
"""Exception for invalid input."""
def AppendLocationsGlobalToParent(unused_ref, unused_args, request):
"""Add locations/global to parent path, since it isn't automatically populated by apitools."""
request.parent += "/locations/global"
return request
def UpdateOperationRequestNameVariable(unused_ref, unused_args, request):
request.name += "/locations/global"
return request
def AddFieldToUpdateMask(field, patch_request):
"""Adds name of field to update mask."""
update_mask = patch_request.updateMask
if not update_mask:
patch_request.updateMask = field
elif field not in update_mask:
patch_request.updateMask = update_mask + "," + field
return patch_request
def ClearSingleEndpointAttr(patch_request, endpoint_type, endpoint_name):
"""Checks if given endpoint can be removed from Connectivity Test and removes it."""
test = patch_request.connectivityTest
endpoint = getattr(test, endpoint_type)
endpoint_fields = {
"instance", "ipAddress", "gkeMasterCluster", "cloudSqlInstance"
}
non_empty_endpoint_fields = 0
for field in endpoint_fields:
if getattr(endpoint, field, None):
non_empty_endpoint_fields += 1
if (non_empty_endpoint_fields > 1 or
not getattr(endpoint, endpoint_name, None)):
setattr(endpoint, endpoint_name, "")
setattr(test, endpoint_type, endpoint)
patch_request.connectivityTest = test
return AddFieldToUpdateMask(endpoint_type + "." + endpoint_name,
patch_request)
else:
raise InvalidInputError(
"Invalid Connectivity Test. At least one of --{endpoint_type}-instance, --{endpoint_type}-ip-address, --{endpoint_type}-gke_master_cluster or --{endpoint_type}-cloud_sql_instance must be specified."
.format(endpoint_type=endpoint_type))
def ClearEndpointAttrs(unused_ref, args, patch_request):
"""Handles clear_source_* and clear_destination_* flags."""
flags_and_endpoints = [
("clear_source_instance", "source", "instance"),
("clear_source_ip_address", "source", "ipAddress"),
("clear_source_gke_master_cluster", "source", "gkeMasterCluster"),
("clear_source_cloud_sql_instance", "source", "cloudSqlInstance"),
("clear_destination_instance", "destination", "instance"),
("clear_destination_ip_address", "destination", "ipAddress"),
("clear_destination_gke_master_cluster", "destination",
"gkeMasterCluster"),
("clear_destination_cloud_sql_instance", "destination",
"cloudSqlInstance"),
]
for flag, endpoint_type, endpoint_name in flags_and_endpoints:
if args.IsSpecified(flag):
patch_request = ClearSingleEndpointAttr(
patch_request,
endpoint_type,
endpoint_name,
)
return patch_request
def ClearSingleEndpointAttrBeta(patch_request, endpoint_type, endpoint_name):
"""Checks if given endpoint can be removed from Connectivity Test and removes it."""
test = patch_request.connectivityTest
endpoint = getattr(test, endpoint_type)
endpoint_fields = {
"instance", "ipAddress", "gkeMasterCluster", "cloudSqlInstance"
}
non_empty_endpoint_fields = 0
for field in endpoint_fields:
if getattr(endpoint, field, None):
non_empty_endpoint_fields += 1
if (non_empty_endpoint_fields > 1 or
not getattr(endpoint, endpoint_name, None)):
setattr(endpoint, endpoint_name, "")
setattr(test, endpoint_type, endpoint)
patch_request.connectivityTest = test
return AddFieldToUpdateMask(endpoint_type + "." + endpoint_name,
patch_request)
else:
raise InvalidInputError(
"Invalid Connectivity Test. At least one of --{endpoint_type}-instance, --{endpoint_type}-ip-address, --{endpoint_type}-gke_master_cluster or --{endpoint_type}-cloud_sql_instance must be specified."
.format(endpoint_type=endpoint_type))
def ClearEndpointAttrsBeta(unused_ref, args, patch_request):
"""Handles clear_source_* and clear_destination_* flags."""
flags_and_endpoints = [
("clear_source_instance", "source", "instance"),
("clear_source_ip_address", "source", "ipAddress"),
("clear_source_gke_master_cluster", "source", "gkeMasterCluster"),
("clear_source_cloud_sql_instance", "source", "cloudSqlInstance"),
("clear_destination_instance", "destination", "instance"),
("clear_destination_ip_address", "destination", "ipAddress"),
("clear_destination_gke_master_cluster", "destination",
"gkeMasterCluster"),
("clear_destination_cloud_sql_instance", "destination",
"cloudSqlInstance"),
]
for flag, endpoint_type, endpoint_name in flags_and_endpoints:
if args.IsSpecified(flag):
patch_request = ClearSingleEndpointAttrBeta(
patch_request,
endpoint_type,
endpoint_name,
)
return patch_request
def ValidateInstanceNames(unused_ref, args, request):
"""Checks if all provided instances are in valid format."""
flags = [
"source_instance",
"destination_instance",
]
instance_pattern = re.compile(
r"projects/(?:[a-z][a-z0-9-\.:]*[a-z0-9])/zones/[-\w]+/instances/[-\w]+"
)
for flag in flags:
if args.IsSpecified(flag):
instance = getattr(args, flag)
if not instance_pattern.match(instance):
raise InvalidInputError(
"Invalid value for flag {}: {}\n"
"Expected instance in the following format:\n"
" projects/my-project/zones/zone/instances/my-instance".format(
flag, instance))
return request
def ValidateNetworkURIs(unused_ref, args, request):
"""Checks if all provided networks are in valid format."""
flags = [
"source_network",
"destination_network",
]
network_pattern = re.compile(
r"projects/(?:[a-z][a-z0-9-\.:]*[a-z0-9])/global/networks/[-\w]+")
for flag in flags:
if args.IsSpecified(flag):
network = getattr(args, flag)
if not network_pattern.match(network):
raise InvalidInputError(
"Invalid value for flag {}: {}\n"
"Expected network in the following format:\n"
" projects/my-project/global/networks/my-network".format(
flag, network))
return request
def ValidateGKEMasterClustersURIs(unused_ref, args, request):
"""Checks if all provided GKE Master Clusters URIs are in correct format."""
flags = [
"source_gke_master_cluster",
"destination_gke_master_cluster",
]
instance_pattern = re.compile(
r"projects/(?:[a-z][a-z0-9-\.:]*[a-z0-9])/(zones|locations)/[-\w]+/clusters/[-\w]+"
)
for flag in flags:
if args.IsSpecified(flag):
cluster = getattr(args, flag)
if not instance_pattern.match(cluster):
raise InvalidInputError(
"Invalid value for flag {}: {}\n"
"Expected Google Kubernetes Engine master cluster in the following format:\n"
" projects/my-project/location/location/clusters/my-cluster"
.format(flag, cluster))
return request
def ValidateCloudSQLInstancesURIs(unused_ref, args, request):
"""Checks if all provided Cloud SQL Instances URIs are in correct format."""
flags = [
"source_cloud_sql_instance",
"destination_cloud_sql_instance",
]
instance_pattern = re.compile(
r"projects/(?:[a-z][a-z0-9-\.:]*[a-z0-9])/instances/[-\w]+"
)
for flag in flags:
if args.IsSpecified(flag):
instance = getattr(args, flag)
if not instance_pattern.match(instance):
raise InvalidInputError(
"Invalid value for flag {}: {}\n"
"Expected Cloud SQL instance in the following format:\n"
" projects/my-project/instances/my-instance".format(
flag, instance))
return request
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph runtime factory."""
import warnings
from tvm._ffi.base import string_types
from tvm._ffi.registry import get_global_func
from tvm.runtime import ndarray
class GraphRuntimeFactoryModule(object):
"""Graph runtime factory module.
This is a module of graph runtime factory
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name of
PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
"""
def __init__(self, graph_json_str, libmod, libmod_name, params):
assert isinstance(graph_json_str, string_types)
fcreate = get_global_func("tvm.graph_runtime_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.module = fcreate(graph_json_str, libmod, libmod_name, *args)
self.graph_json = graph_json_str
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
# Sometimes we want to get params explicitly.
# For example, we want to save its params value to
# an independent file.
def get_params(self):
return self.params
def get_json(self):
return self.graph_json
def get_lib(self):
return self.lib
def __getitem__(self, item):
return self.module.__getitem__(item)
def __iter__(self):
warnings.warn(
"legacy graph runtime behaviour of producing json / lib / params will be "
"removed in the next release ",
DeprecationWarning, 2)
return self
def __next__(self):
if self.iter_cnt > 2:
raise StopIteration
objs = [self.graph_json, self.lib, self.params]
obj = objs[self.iter_cnt]
self.iter_cnt += 1
return obj
|
# -*- coding: utf-8 -*-
"""Utilities for `BandsData` nodes."""
def get_highest_occupied_band(bands, threshold=0.005):
"""Retun the index of the highest-occupied molecular orbital.
The expected structure of the bands node is the following:
* an array called `occupations`
* with 3 dimensions if spin polarized, otherwise 2 dimensions
* dimensions loop over, spin channel, kpoints, bands
.. note::
The threshold is used both as a limit below which a band is considered as unoccupied as well as a measure of
numerical noise in the occupancies. The first band to have an occupancy below the threshold at all kpoints is
marked as the LUMO. All subsequent bands are then expected to have an occupation that is less than twice the
threshold. The reason for the factor two is that in the worst case when the LUMO has an occupation exactly equal
to the threshold and a subsequent has twice that value, we can still consider that as not to break the
requirement of all bands above the LUMO to be empty. It can be considered as having the exact same value (equal
to the threshold) plus an numerical error also equal to the threshold.
:param bands: the `BandsData` node
:param threshold: raise a `ValueError` if the last band has an occupation above this threshold at any k-point
:raises ValueError: if `bands` is not a `BandsData` node
:raises ValueError: if `bands` does not contain the array `occupations`
:raises ValueError: if `occupations` array has an invalid shape
:raises ValueError: if any occupation above LUMO exceeds `2 * threshold`
:raises ValueError: if the last band has an occupation above the threshold
"""
from aiida.orm import BandsData
from numpy import shape
if not isinstance(bands, BandsData):
raise ValueError(f'bands should be a `{BandsData.__name__}` node')
try:
occupations = bands.get_array('occupations')
except KeyError as exception:
raise ValueError('BandsData does not contain a `occupations` array') from exception
lumo_indices = []
# For spin-polarized calculations the `occupations` array should have 3 dimensions, otherwise just 2.
if len(shape(occupations)) == 3:
spin_channels = occupations
elif len(shape(occupations)) == 2:
spin_channels = [occupations]
else:
raise ValueError('invalid shape for `occupations` array')
for l, spin_channel in enumerate(spin_channels): # pylint: disable=invalid-name
for k, kpoint in enumerate(spin_channel):
lumo_index = None
lumo_occupation = None
for n, occupation in enumerate(kpoint): # pylint: disable=invalid-name
if lumo_index is not None:
# If the occupation of this band exceeds twice that of the threshold, it is considered to be not
# empty and since it comes after the LUMO, we raise
if occupation > 2 * threshold:
warning_args = [occupation, n, lumo_occupation, lumo_index, l, k]
raise ValueError('Occupation of {} at n={} after lumo lkn<{},{},{}>'.format(*warning_args))
elif occupation < threshold:
lumo_index = n
lumo_occupation = occupation
lumo_indices.append(lumo_index)
else: # pylint: disable=useless-else-on-loop
if kpoint[-1] >= threshold:
warning_args = [kpoint[-1], l, k, len(kpoint)]
raise ValueError('Occupation of {} at last band lkn<{},{},{}>'.format(*warning_args))
# Note that the LUMO band indices are 0-indexed, so the actual band number is one higher, but the band number of the
# HOMO is one lower than that, which therefore corresponds exactly to the 0-indexed LUMO index
homo = max(lumo_indices)
return homo
|
import time
from discord.ext import commands
import os
import psutil
import platform
uname = platform.uname()
class Status(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="status", aliases=["stats", "dash", "dashboard", "übersicht", "performance", "stat"])
async def mc_command(self, ctx):
pid = os.getpid()
process = psutil.Process(pid)
process.create_time()
memoryuse = process.memory_full_info()
await ctx.send(f"```prolog\n"
f"Discord Stuff:\n"
f"Servers: {len(self.client.guilds)}\n"
f"Users: {len(set(self.client.get_all_members()))}\n"
"-------\n"
f"Bot Technical:\n"
f"RAM-Usage: {memoryuse.rss / 1024000} MB \n"
f'Running Since: {time.strftime("%d.%m.%Y %H:%M", time.localtime(process.create_time()))}\n'
f"Websocket Latency: {round(self.client.latency * 1000)}ms\n"
f"Shard Count: {len(list(self.client.shards))}\n"
"-------\n"
f"System:\n"
f"CPU-Usage: {psutil.cpu_percent()}%\n"
f"RAM-Usage : {psutil.virtual_memory()[2]}%\n"
f"OS: {uname.system} {uname.version}\n"
f"Systemarchitecture: {uname.machine}\n"
f"```")
def setup(client):
client.add_cog(Status(client))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkelasticsearch.endpoint import endpoint_data
class CancelDeletionRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'elasticsearch', '2017-06-13', 'CancelDeletion','elasticsearch')
self.set_uri_pattern('/openapi/instances/[InstanceId]/actions/cancel-deletion')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_path_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_path_param('InstanceId',InstanceId)
def get_clientToken(self):
return self.get_query_params().get('clientToken')
def set_clientToken(self,clientToken):
self.add_query_param('clientToken',clientToken)
|
from django.contrib.auth import authenticate
from django.db import IntegrityError
from django.db.models import Q
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from account.models import User
from account.serializers import UserAccountSerializer
from account.task import send_welcome_mail
from middleware.response import JSONResponse
class RegisterUserProfileView(APIView):
def post(self, request):
""" this method is used to register user profile """
username = request.data['username']
email_id = request.data['email_id']
password = request.data['password']
profile_picture_url = request.data['profile_picture_url']
if 'first_name' in request.data:
first_name = request.data['first_name'].strip()
else:
first_name = None
if 'last_name' in request.data:
last_name = request.data['last_name'].strip()
else:
last_name = None
# check to see if user with the given email or username already exist
# or the combination is in RESERVED_USERNAME_EMAIL_LIST
if User.objects.filter(Q(username=username) | Q(email_id=email_id)).exists():
response = {
'message': 'Username or Email Already Taken',
'status': False,
'result': None
}
return JSONResponse(response, status=status.HTTP_409_CONFLICT)
User.objects.create_user(username=username, email_id=email_id, first_name=first_name,
last_name=last_name,
profile_picture_url=profile_picture_url, password=password)
send_welcome_mail.delay(username, email_id)
response = {
'message': 'user created successfully',
'status': True,
'result': None
}
return JSONResponse(response, status=status.HTTP_201_CREATED)
class LoginView(APIView):
def post(self, request):
username = request.data['username']
password = request.data['password']
user = authenticate(username=username, password=password)
if user:
result = UserAccountSerializer(instance=user).data
result['token'] = Token.objects.get_or_create(user=user)[0].key
response = {
'message': 'User Logged In Successfully',
'status': True,
'result': result
}
return JSONResponse(response)
else:
response = {
'message': 'Provided Credentials Are Wrong',
'status': False,
'result': None
}
return JSONResponse(response, status=status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
user = request.user
Token.objects.filter(user=user).delete()
response = {
'message': 'User Logged Out Successfully',
'status': True,
'result': None
}
return JSONResponse(response)
class UserProfileView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user_id = request.query_params['user_id']
user = User.objects.filter(id=user_id).first()
if user:
result = UserAccountSerializer(instance=user).data
response = {
'message': 'User Details Fetched Successfully',
'status': True,
'result': result
}
return JSONResponse(response)
else:
response = {
'message': 'No User Exist With The Given Id',
'status': False,
'result': None
}
return JSONResponse(response, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
def post(self, request):
user_details = request.data['user_details']
user = request.user
try:
for attr, value in user_details.items():
setattr(user, attr, value)
user.save()
response = {
'message': 'User Details Updated Successfully',
'status': True,
'result': None
}
return JSONResponse(response)
except IntegrityError:
response = {
'message': 'Username or Email Already Taken',
'status': False,
'result': None
}
return JSONResponse(response, status=status.HTTP_409_CONFLICT)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=40
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=34
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.X.on(input_qubit[1])) # number=29
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.X.on(input_qubit[0])) # number=38
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=32
c.append(cirq.X.on(input_qubit[1])) # number=33
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq881.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Data import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
from System.Collections.Generic import List
import decimal as d
### <summary>
### In this algorithm we demonstrate how to perform some technical analysis as
### part of your coarse fundamental universe selection
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="indicators" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
class EmaCrossUniverseSelectionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2010,01,01) #Set Start Date
self.SetEndDate(2015,01,01) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.UniverseSettings.Resolution = Resolution.Daily
self.UniverseSettings.Leverage = 2
self.coarse_count = 10
self.averages = { };
# this add universe method accepts two parameters:
# - coarse selection function: accepts an IEnumerable<CoarseFundamental> and returns an IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# We are going to use a dictionary to refer the object that will keep the moving averages
for cf in coarse:
if cf.Symbol not in self.averages:
self.averages[cf.Symbol] = SymbolData(cf.Symbol)
# Updates the SymbolData object with current EOD price
avg = self.averages[cf.Symbol]
avg.update(cf.EndTime, cf.Price)
# Filter the values of the dict: we only want up-trending securities
values = filter(lambda x: x.is_uptrend, self.averages.values())
# Sorts the values of the dict: we want those with greater difference between the moving averages
values.sort(key=lambda x: x.scale, reverse=True)
for x in values[:self.coarse_count]:
self.Log('symbol: ' + str(x.symbol.Value) + ' scale: ' + str(x.scale))
# we need to return only the symbol objects
return [ x.symbol for x in values[:self.coarse_count] ]
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
# liquidate removed securities
for security in changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 20% allocation in each security in our universe
for security in changes.AddedSecurities:
self.SetHoldings(security.Symbol, 0.1)
class SymbolData(object):
def __init__(self, symbol):
self.symbol = symbol
self.tolerance = d.Decimal(1.01)
self.fast = ExponentialMovingAverage(100)
self.slow = ExponentialMovingAverage(300)
self.is_uptrend = False
self.scale = 0
def update(self, time, value):
if self.fast.Update(time, value) and self.slow.Update(time, value):
fast = self.fast.Current.Value
slow = self.slow.Current.Value
self.is_uptrend = fast > slow * self.tolerance
if self.is_uptrend:
self.scale = (fast - slow) / ((fast + slow) / d.Decimal(2.0))
|
import os
import torch
import shutil
from collections import OrderedDict
import logging
import numpy as np
def load_pretrained_models(model, pretrained_model, phase, ismax=True): # ismax means max best
if ismax:
best_value = -np.inf
else:
best_value = np.inf
epoch = -1
if pretrained_model:
if os.path.isfile(pretrained_model):
logging.info("===> Loading checkpoint '{}'".format(pretrained_model))
checkpoint = torch.load(pretrained_model)
try:
best_value = checkpoint['best_value']
if best_value == -np.inf or best_value == np.inf:
show_best_value = False
else:
show_best_value = True
except:
best_value = best_value
show_best_value = False
model_dict = model.state_dict()
ckpt_model_state_dict = checkpoint['state_dict']
# rename ckpt (avoid name is not same because of multi-gpus)
is_model_multi_gpus = True if list(model_dict)[0][0][0] == 'm' else False
is_ckpt_multi_gpus = True if list(ckpt_model_state_dict)[0][0] == 'm' else False
if not (is_model_multi_gpus == is_ckpt_multi_gpus):
temp_dict = OrderedDict()
for k, v in ckpt_model_state_dict.items():
if is_ckpt_multi_gpus:
name = k[7:] # remove 'module.'
else:
name = 'module.'+k # add 'module'
temp_dict[name] = v
# load params
ckpt_model_state_dict = temp_dict
model_dict.update(ckpt_model_state_dict)
model.load_state_dict(ckpt_model_state_dict)
if show_best_value:
logging.info("The pretrained_model is at checkpoint {}. \t "
"Best value: {}".format(checkpoint['epoch'], best_value))
else:
logging.info("The pretrained_model is at checkpoint {}.".format(checkpoint['epoch']))
if phase == 'train':
epoch = checkpoint['epoch']
else:
epoch = -1
else:
raise ImportError("===> No checkpoint found at '{}'".format(pretrained_model))
else:
logging.info('===> No pre-trained model')
return model, best_value, epoch
def load_pretrained_optimizer(pretrained_model, optimizer, scheduler, lr, use_ckpt_lr=True):
if pretrained_model:
if os.path.isfile(pretrained_model):
checkpoint = torch.load(pretrained_model)
if 'optimizer_state_dict' in checkpoint.keys():
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if 'scheduler_state_dict' in checkpoint.keys():
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
if use_ckpt_lr:
try:
lr = scheduler.get_lr()[0]
except:
lr = lr
return optimizer, scheduler, lr
def save_checkpoint(state, is_best, save_path, postname):
filename = '{}/{}_ckpt_imou{}_epoch{}_loss{}.pth'.format(save_path, postname,float(state['test_miou']) , int(state['epoch']),float(state['test_loss']) )
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}/{}_model_best.pth'.format(save_path, postname))
def change_ckpt_dict(model, optimizer, scheduler, opt):
for _ in range(opt.epoch):
scheduler.step()
is_best = (opt.test_value < opt.best_value)
opt.best_value = min(opt.test_value, opt.best_value)
model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
# optim_cpu = {k: v.cpu() for k, v in optimizer.state_dict().items()}
save_checkpoint({
'epoch': opt.epoch,
'state_dict': model_cpu,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_value': opt.best_value,
}, is_best, opt.save_path, opt.post)
|
__author__ = 'Casey Bajema'
import logging
from jcudc24ingesterapi import typed, APIDomainObject, ValidationError
from jcudc24ingesterapi.schemas.data_types import DataType
logger = logging.getLogger(__name__)
class TypedList(list):
def __init__(self, valid_type):
self.valid_type = valid_type
def append(self, item):
if not isinstance(item, self.valid_type):
raise TypeError, 'item is not of type %s' % self.valid_type
super(TypedList, self).append(item) #append the item to itself (the list)
class SchemaAttrDict(dict):
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, value):
if key != value.name:
raise ValueError("The provided key and the fields name do not match")
# optional processing here
super(SchemaAttrDict, self).__setitem__(key, value)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
class Schema(APIDomainObject):
"""
Base class for all calibration schemas that provide a known type.
All calibration schemas that will be used need to be setup when creating the dataset.
Each calibration schema has a 1:1 relationship with each data_entry. This means that there can only
be 1 QualitySchema calibration for a specific data_entry but there may be a different calibration
(sub-classed from _CalibrationSchema) added to the same data_entry. Sending a duplicate calibration
will overwrite previous values.
"""
id = typed("_id", int)
version = typed("_version", int)
name = typed("_name", (str, unicode) )
repository_id = typed("_repository_id", (str))
def __init__(self, name=None):
self.name = name
self.__attrs = SchemaAttrDict()
self.__extends = TypedList(int)
def addAttr(self, data_type):
if not isinstance(data_type, DataType):
raise ValueError("Not a subclass of DataType")
self.attrs[data_type.name] = data_type
@property
def attrs(self):
return self.__attrs
@property
def extends(self):
return self.__extends
@extends.setter
def extends(self, values):
"""Check that the list is valid before replacing it"""
tmp = TypedList(int)
for v in values:
tmp.append(v)
self.__extends = tmp
def validate(self):
valid = []
if self.name == None:
valid.append(ValidationError("name", "Name must be set"))
return valid
class ConcreteSchema(object):
"""The concrete schema composites all the individual schemas into
a domain object.
"""
def __init__(self, schemas=None):
if schemas == None: schemas = []
self.__attrs = SchemaAttrDict()
# Add all the passed schemas to the concrete schema
for schema in schemas:
self.add(schema)
def add(self, schema):
"""Add all the attributes to the concrete schema's list"""
for attr in schema.attrs:
if attr in self.__attrs: raise ValueError("Duplicate attributes: " + attr)
self.__attrs[attr] = schema.attrs[attr]
@property
def attrs(self):
return self.__attrs
|
import ctypes
class Struct(ctypes.Structure):
"""
This class exists to add common python functionality to ctypes.Structure.
This includes:
Value equality via the `==` operator.
Showing contents in `repr(struct)`.
"""
def __eq__(self, other):
"""
Note: if your Struct contains pointers, pointer equality is used
as opposed to following the value at the pointer.
"""
if type(self) != type(other):
return False
for field_name, field_type in self._fields_:
if getattr(self, field_name) != getattr(other, field_name):
return False
return True
def __repr__(self):
fields_string = ', '.join(
f'{field_name}: {repr(getattr(self, field_name))}'
for field_name, field_type in self._fields_
)
return f'{self.__class__.__name__}<{fields_string}>'
|
from django.contrib import admin
from .models import Image, Profile, Comments
# Register your models here.
admin.site.register(Image)
admin.site.register(Profile)
admin.site.register(Comments)
|
import os
import pytest
from locuspocus import Locus, Loci
from locuspocus.exceptions import MissingLocusError, StrandError
import minus80 as m80
"""
Unit tests for Loci
"""
NUM_GENES = 39656
def test_init(testRefGen):
try:
testRefGen
return True
except NameError:
return False
def test_len(testRefGen):
"quick check to make sure that the refgen reports the correct number of features"
assert len(testRefGen) == NUM_GENES
def test_get_locus_by_LID(testRefGen):
"make sure that fetching a locus by its LID yields the same locus"
rand_locus = testRefGen.rand()
assert rand_locus == testRefGen._get_locus_by_LID(rand_locus._LID)
def test_get_locus_by_LID_missing(testRefGen):
"make sure that fetching a locus by its LID yields the same locus"
with pytest.raises(MissingLocusError):
testRefGen._get_locus_by_LID(-1)
def test_get_LID_missing(testRefGen):
"Make sure that fetching a locus by LID returns the same locus"
with pytest.raises(MissingLocusError):
assert testRefGen._get_LID(Locus("na", 1, 1))
def test_get_LID_from_name_missing(testRefGen):
"Make sure that fetching a locus by LID returns the same locus"
with pytest.raises(MissingLocusError):
assert testRefGen._get_LID("DoesNotExist")
def test_add_locus():
"add a locus to an empty refloci db and then retrieve it"
if m80.exists("Loci", "empty"):
m80.delete("Loci", "empty")
empty = Loci("empty")
assert len(empty) == 0
empty.add_locus(Locus("1", 1, 1, feature_type="gene", attrs={"foo": "bar"}))
assert len(empty) == 1
m80.delete("Loci", "empty")
def test_add_locus_with_attrs():
"add a locus to an empty refloci db and then retrieve it"
if m80.exists("Loci", "empty"):
m80.delete("Loci", "empty")
empty = Loci("empty")
assert len(empty) == 0
LID = empty.add_locus(Locus("1", 1, 1, feature_type="gene", attrs={"foo": "bar"}))
assert len(empty) == 1
l = empty._get_locus_by_LID(LID)
assert l["foo"] == "bar"
m80.delete("Loci", "empty")
def test_nuke_tables():
"add a locus to an empty refloci db and then retrieve it"
if m80.exists("Loci", "empty"):
m80.delete("Loci", "empty")
empty = Loci("empty")
assert len(empty) == 0
x = Locus("1", 1, 1, feature_type="gene", attrs={"foo": "bar"})
y = Locus("1", 2, 2, feature_type="exon", attrs={"baz": "bat"})
x.add_sublocus(y)
empty.add_locus(x)
assert len(empty) == 1
empty._nuke_tables()
assert len(empty) == 0
m80.delete("Loci", "empty")
def test_add_locus_with_subloci():
"add a locus to an empty refloci db and then retrieve it"
if m80.exists("Loci", "empty"):
m80.delete("Loci", "empty")
empty = Loci("empty")
assert len(empty) == 0
x = Locus("1", 1, 1, feature_type="gene", attrs={"foo": "bar"})
y = Locus("1", 2, 2, feature_type="exon", attrs={"baz": "bat"})
x.add_sublocus(y)
LID = empty.add_locus(x)
assert len(empty) == 1
l = empty._get_locus_by_LID(LID)
assert l["foo"] == "bar"
assert len(l.subloci) == 1
m80.delete("Loci", "empty")
def test_import_gff(testRefGen):
"test importing loci from a GFF file"
# as the testRefGen fixture is built from a GFF
# this will only pass if it is built
assert testRefGen
# def test_contains_true(testRefGen):
# 'get a random locus and then test it is in the Loci object'
# assert testRefGen.rand() in testRefGen
def test_contains_false(testRefGen):
assert ("NO" in testRefGen) is False
def test_get_item(testRefGen):
"""
Get a random locus and then
retrieve that locus again
by its id
"""
random_locus = testRefGen.rand()
assert random_locus == testRefGen[random_locus.name]
def test_iter(testRefGen):
"test that the iter interface works"
i = 0
for locus in testRefGen:
i += 1
assert i == NUM_GENES
def test_rand(testRefGen):
"test instance type"
assert isinstance(testRefGen.rand(), Locus)
def test_rand_length(testRefGen):
"test the length of rand with n specified"
assert len(testRefGen.rand(n=100)) == 100
def test_rand_distinct(testRefGen):
assert len(testRefGen.rand(2000, distinct=True)) == 2000
def test_rand_too_many(testRefGen):
try:
testRefGen.rand(100000)
except ValueError:
assert True
def test_rand_no_autopop(testRefGen):
assert len(testRefGen.rand(1, autopop=False)) == 1
# The first 4 genes on chromosome 9
# 1 ensembl gene 4854 9652 . - . ID=GRMZM2G059865;Name=GRMZM2G059865;biotype=protein_coding
# 1 ensembl gene 9882 10387 . - . ID=GRMZM5G888250;Name=GRMZM5G888250;biotype=protein_coding
# 1 ensembl gene 109519 111769 . - . ID=GRMZM2G093344;Name=GRMZM2G093344;biotype=protein_coding
# 1 ensembl gene 136307 138929 . + . ID=GRMZM2G093399;Name=GRMZM2G093399;biotype=protein_coding
def test_within(testRefGen):
"simple within to get chromosomal segment"
assert len(list(testRefGen.within(Locus("1", 1, 139000), partial=False))) == 4
def test_within_bad_strand(testRefGen):
"simple within to get chromosomal segment"
with pytest.raises(StrandError):
assert (
len(
list(
testRefGen.within(Locus("1", 1, 139000, strand="="), partial=False)
)
)
== 4
)
def test_within_yields_nothing(testRefGen):
l = Locus("0", 1, 1, strand="+")
assert len(list(testRefGen.within(l, partial=False))) == 0
def test_within_partial_false(testRefGen):
"put the locus boundaries within gene [1] and [4] and exclude them with partial"
assert len(list(testRefGen.within(Locus("1", 6000, 137000), partial=False))) == 2
def test_within_partial_true(testRefGen):
"put the locus boundaries within gene [1] and [4] and exclude them with partial"
assert len(list(testRefGen.within(Locus("1", 6000, 137000), partial=True))) == 4
def test_within_same_strand(testRefGen):
"test fetching loci only on the same strand"
assert (
len(
list(
testRefGen.within(
Locus("1", 1, 139000, strand="+"), partial=True, same_strand=True
)
)
)
== 1
)
def test_within_same_strand_and_ignore_strand(testRefGen):
"test fetching loci only on the same strand"
with pytest.raises(ValueError):
list(
testRefGen.within(
Locus("1", 1, 139000, strand="+"), ignore_strand=True, same_strand=True
)
)
def test_within_same_strand_minus(testRefGen):
"test fetching loci only on the same strand"
assert (
len(
list(
testRefGen.within(
Locus("1", 1, 139000, strand="-"), partial=True, same_strand=True
)
)
)
== 3
)
def test_within_strand_order(testRefGen):
# should return the locus at the beginning of the chromosome
loci = list(testRefGen.within(Locus("1", 1, 139000, strand="+")))
assert loci[0].start == 4854
def test_within_strand_order_minus(testRefGen):
# should return fourth gene first
loci = list(testRefGen.within(Locus("1", 1, 139000, strand="-")))
assert loci[0].start == 136307
def test_within_error_on_both_same_strand_and_ignore_strand(testRefGen):
try:
testRefGen.within(
Locus("1", 1, 139000, strand="-"), ignore_strand=True, same_strand=True
)
except ValueError:
assert True
def test_upstream_plus_strand(testRefGen):
# Below is GRMZM2G093399, but on the minus strand
x = Locus("1", 136307, 138929)
l = [x.name for x in testRefGen.upstream_loci(x, n=3)]
assert l[0] == "GRMZM2G093344"
assert l[1] == "GRMZM5G888250"
assert l[2] == "GRMZM2G059865"
def test_upstream_minus_strand(testRefGen):
x = testRefGen["GRMZM5G888250"]
l = [x.name for x in testRefGen.upstream_loci(x, n=2)]
assert l[0] == "GRMZM2G093344"
assert l[1] == "GRMZM2G093399"
def test_upstream_accepts_loci(testRefGen):
loci = [testRefGen["GRMZM2G093399"], testRefGen["GRMZM2G093399"]]
l1, l2 = map(list, testRefGen.upstream_loci(loci, n=2))
assert len(l1) == 2
assert len(l2) == 2
def test_upstream_same_strand(testRefGen):
x = testRefGen["GRMZM5G888250"]
for x in testRefGen.upstream_loci(x, n=5, same_strand=True):
assert x.strand == "-"
def test_upstream_limit_n(testRefGen):
g = testRefGen.upstream_loci(testRefGen["GRMZM2G093399"], n=2)
assert len(list(g)) == 2
def test_upstream_filter_same_strand(testRefGen):
g = testRefGen.upstream_loci(testRefGen["GRMZM2G093399"], n=3, same_strand=True)
assert len(list(g)) == 0
def test_downstream(testRefGen):
l = [x.name for x in testRefGen.downstream_loci(Locus("1", 4854, 9652), n=3)]
assert l[0] == "GRMZM5G888250"
assert l[1] == "GRMZM2G093344"
assert l[2] == "GRMZM2G093399"
def test_downstream_accepts_loci(testRefGen):
x = Locus("1", 4854, 9652)
loci = [x, x]
l1, l2 = map(list, testRefGen.downstream_loci(loci, n=2))
assert len(l1) == 2
assert len(l2) == 2
def test_downstream_limit_n(testRefGen):
l = [x.name for x in testRefGen.downstream_loci(Locus("1", 4854, 9652), n=1)]
assert l[0] == "GRMZM5G888250"
assert len(l) == 1
def test_downstream_same_strand_limit_n(testRefGen):
l = [
x.name
for x in testRefGen.downstream_loci(
Locus("1", 4854, 9652), n=1, same_strand=True
)
]
assert len(l) == 1
# This skips GRMZM5G888250 and GRMZM2G093344 since they are on the - strand
assert l[0] == "GRMZM2G093399"
def test_flank_loci_limited_n(testRefGen):
x = Locus("1", 10500, 10500)
up, down = testRefGen.flanking_loci(x, n=2)
assert len(list(up)) == 2
assert len(list(down)) == 2
def test_encompassing_loci(testRefGen):
x = Locus("1", 10000, 10000)
loci = list(testRefGen.encompassing_loci(x))
assert loci[0].name == "GRMZM5G888250"
def test_full_import_gff():
if m80.exists("Loci", "ZmSmall"):
m80.delete("Loci", "ZmSmall")
gff = os.path.expanduser(os.path.join("raw", "maize_small.gff"))
x = Loci("ZmSmall")
x.import_gff(gff)
m80.delete("Loci", "ZmSmall")
def test_import_gff_gzipped():
if m80.exists("Loci", "ZmSmall"):
m80.delete("Loci", "ZmSmall")
gff = os.path.expanduser(os.path.join("raw", "maize_small.gff.gz"))
x = Loci("ZmSmall")
x.import_gff(gff)
m80.delete("Loci", "ZmSmall")
|
# Generated by Django 3.2.10 on 2021-12-19 13:49
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail_references.blocks
class Migration(migrations.Migration):
dependencies = [
('example', '0002_blogpage_bib_reference'),
]
operations = [
migrations.AlterField(
model_name='blogpage',
name='bib_reference',
field=wagtail.core.fields.StreamField([('ref', wagtail_references.blocks.ReferenceChooserBlock(target_model='wagtail_references.reference')), ('richtext', wagtail.core.blocks.RichTextBlock())], blank=True),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import os
import collections
import api
import monitorfrontend
import tablesfrontend
import sprocsfrontend
import indexesfrontend
import report
import performance
import hosts
import export
import hostsfrontend
import welcomefrontend
import datadb
import tplE
import yaml
from argparse import ArgumentParser
from cherrypy._cpdispatch import Dispatcher
class Healthcheck(object):
def default(self, *args, **kwargs):
return {}
default.exposed = True
class HostIdAndShortnameDispatcher(Dispatcher):
def __call__(self, path_info):
splits = path_info.split('/')
if len(splits) > 1 and splits[1]:
if splits[1].isdigit() or splits[1] in hosts.getAllHostUinamesSorted():
return Dispatcher.__call__(self, '/host' + path_info)
return Dispatcher.__call__(self, path_info.lower())
def main():
parser = ArgumentParser(description='PGObserver Frontend')
parser.add_argument('-c', '--config', help='Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]')
parser.add_argument('--s3-config-path', help='Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
parser.add_argument('-p', '--port', help='Web server port. Overrides value from config file', type=int)
args = parser.parse_args()
settings = collections.defaultdict(dict)
if args.s3_config_path: # S3 has precedence if specified
import aws_s3_configreader
settings = aws_s3_configreader.get_config_as_dict_from_s3_file(args.s3_config_path)
elif args.config:
args.config = os.path.expanduser(args.config)
if not os.path.exists(args.config):
print 'WARNING. Config file {} not found! exiting...'.format(args.config)
return
print "trying to read config file from {}".format(args.config)
with open(args.config, 'rb') as fd:
settings = yaml.load(fd)
# Make env vars overwrite yaml file, to run via docker without changing config file
settings['database']['host'] = (os.getenv('PGOBS_HOST') or settings['database'].get('host'))
settings['database']['port'] = (os.getenv('PGOBS_PORT') or settings['database'].get('port') or 5432)
settings['database']['name'] = (os.getenv('PGOBS_DATABASE') or settings['database'].get('name'))
settings['database']['frontend_user'] = (os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
settings['database']['frontend_password'] = (os.getenv('PGOBS_PASSWORD') or settings['database'].get('frontend_password'))
if not (settings['database'].get('host') and settings['database'].get('name') and settings['database'].get('frontend_user')):
print 'Mandatory datastore connect details missing!'
print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
print ''
parser.print_help()
return
conn_string = ' '.join((
'dbname=' + settings['database']['name'],
'host=' + settings['database']['host'],
'user=' + settings['database']['frontend_user'],
'port=' + str(settings['database']['port']),
))
print 'Setting connection string to ... ' + conn_string
# finished print conn_string to the world, password can be added
conn_string = conn_string + ' password=' + settings['database']['frontend_password']
datadb.setConnectionString(conn_string)
current_dir = os.path.dirname(os.path.abspath(__file__))
conf = {
'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
{}).get('port') or 8080},
'/': {'tools.staticdir.root': current_dir, 'request.dispatch': HostIdAndShortnameDispatcher()},
'/healthcheck': {'tools.sessions.on': False},
'/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True, 'tools.sessions.on': False},
'/manifest.info': {'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(current_dir, '..',
'MANIFEST.MF'), 'tools.auth_basic.on': False, 'tools.sessions.on': False},
}
tplE.setup(settings) # setup of global variables and host data for usage in views
root = welcomefrontend.WelcomeFrontend()
root.host = monitorfrontend.MonitorFrontend()
root.report = report.Report()
root.export = export.Export()
root.perftables = performance.PerfTables()
root.perfapi = performance.PerfApi()
root.perfindexes = performance.PerfIndexes()
root.perfschemas = performance.PerfUnusedSchemas()
root.perflocks = performance.PerfLocksReport()
root.perfstatstatements = performance.PerfStatStatementsReport()
root.perfbloat = performance.PerfBloat()
root.sprocs = sprocsfrontend.SprocFrontend()
root.tables = tablesfrontend.TableFrontend()
root.indexes = indexesfrontend.IndexesFrontend()
root.hosts = hostsfrontend.HostsFrontend()
root.api = api.Root(root) # JSON api exposure, enabling integration with other monitoring tools
root.healthcheck = Healthcheck()
if settings.get('oauth', {}).get('enable_oauth', False):
print 'switching on oauth ...'
import oauth
root.oauth = oauth.Oauth(settings['oauth'])
cherrypy.config.update({'tools.oauthtool.on': True, 'tools.sessions.on': True,
'tools.sessions.timeout': settings['oauth'].get('session_timeout', 43200)})
cherrypy.quickstart(root, config=conf)
if __name__ == '__main__':
main()
|
import os
from .vendored import colorconv
import numpy as np
import vispy.color
_matplotlib_list_file = os.path.join(os.path.dirname(__file__),
'matplotlib_cmaps.txt')
with open(_matplotlib_list_file) as fin:
matplotlib_colormaps = [line.rstrip() for line in fin]
def _all_rgb():
"""Return all 256**3 valid rgb tuples."""
base = np.arange(256, dtype=np.uint8)
r, g, b = np.meshgrid(base, base, base, indexing='ij')
return np.stack((r, g, b), axis=-1).reshape((-1, 3))
# obtained with colorconv.rgb2luv(_all_rgb().reshape((-1, 256, 3)))
LUVMIN = np.array([0., -83.07790815, -134.09790293])
LUVMAX = np.array([100., 175.01447356, 107.39905336])
LUVRNG = LUVMAX - LUVMIN
# obtained with colorconv.rgb2lab(_all_rgb().reshape((-1, 256, 3)))
LABMIN = np.array([0., -86.18302974, -107.85730021])
LABMAX = np.array([100., 98.23305386, 94.47812228])
LABRNG = LABMAX - LABMIN
def _validate_rgb(colors, *, tolerance=0.):
"""Return the subset of colors that is in [0, 1] for all channels.
Parameters
----------
colors : array of float, shape (N, 3)
Input colors in RGB space.
Other Parameters
----------------
tolerance : float, optional
Values outside of the range by less than ``tolerance`` are allowed and
clipped to be within the range.
Returns
-------
filtered_colors : array of float, shape (M, 3), M <= N
The subset of colors that are in valid RGB space.
Examples
--------
>>> colors = np.array([[ 0. , 1., 1. ],
... [ 1.1, 0., -0.03],
... [ 1.2, 1., 0.5 ]])
>>> _validate_rgb(colors)
array([[0., 1., 1.]])
>>> _validate_rgb(colors, tolerance=0.15)
array([[0., 1., 1.],
[1., 0., 0.]])
"""
lo = 0 - tolerance
hi = 1 + tolerance
valid = np.all((colors > lo) & (colors < hi), axis=1)
filtered_colors = np.clip(colors[valid], 0, 1)
return filtered_colors
def _low_discrepancy(dim, n, seed=0.5):
"""Generate a 1d, 2d, or 3d low discrepancy sequence of coordinates.
Parameters
----------
dim : one of {1, 2, 3}
The dimensionality of the sequence.
n : int
How many points to generate.
seed : float or array of float, shape (dim,)
The seed from which to start the quasirandom sequence.
Returns
-------
pts : array of float, shape (n, dim)
The sampled points.
References
----------
..[1]: http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
"""
phi1 = 1.6180339887498948482
phi2 = 1.32471795724474602596
phi3 = 1.22074408460575947536
seed = np.broadcast_to(seed, (1, dim))
phi = np.array([phi1, phi2, phi3])
g = 1 / phi
n = np.reshape(np.arange(n), (n, 1))
pts = (seed + (n * g[:dim])) % 1
return pts
def _color_random(n, *, colorspace='lab', tolerance=0.0, seed=0.5):
"""Generate n random RGB colors uniformly from LAB or LUV space.
Parameters
----------
n : int
Number of colors to generate.
colorspace : str, one of {'lab', 'luv', 'rgb'}
The colorspace from which to get random colors.
tolerance : float
How much margin to allow for out-of-range RGB values (these are
clipped to be in-range).
seed : float or array of float, shape (3,)
Value from which to start the quasirandom sequence.
Returns
-------
rgb : array of float, shape (n, 3)
RGB colors chosen uniformly at random from given colorspace.
"""
factor = 6 # about 1/5 of random LUV tuples are inside the space
expand_factor = 2
rgb = np.zeros((0, 3))
while len(rgb) < n:
random = _low_discrepancy(3, n * factor, seed=seed)
if colorspace == 'luv':
raw_rgb = colorconv.luv2rgb(random * LUVRNG + LUVMIN)
elif colorspace == 'rgb':
raw_rgb = random
else: # 'lab' by default
raw_rgb = colorconv.lab2rgb(random * LABRNG + LABMIN)
rgb = _validate_rgb(raw_rgb, tolerance=tolerance)
factor *= expand_factor
return rgb[:n]
def label_colormap(labels, seed=0.5, max_label=None):
"""Produce a colormap suitable for use with a given label set.
Parameters
----------
labels : array of int
A set of labels or label image.
seed : float or array of float, length 3
The seed for the low discrepancy sequence generator.
max_label : int, optional
The maximum label in `labels`. Computed if not given.
Returns
-------
cmap : vispy.color.Colormap
A colormap for use with ``labels``. The labels are remapped so that
the maximum label falls on 1.0, since vispy requires colormaps to map
within [0, 1].
Notes
-----
0 always maps to fully transparent.
"""
unique_labels = np.unique(labels)
if unique_labels[0] != 0:
unique_labels = np.concatenate([[0], unique_labels])
n = len(unique_labels)
max_label = max_label or np.max(unique_labels)
unique_labels_float = unique_labels / max_label
midpoints = np.convolve(unique_labels_float, [0.5, 0.5], mode='valid')
control_points = np.concatenate(([0.], midpoints, [1.]))
# make sure to add an alpha channel to the colors
colors = np.concatenate((_color_random(n, seed=seed),
np.full((n, 1), 0.7)), axis=1)
colors[0, :] = 0 # ensure alpha is 0 for label 0
cmap = vispy.color.Colormap(colors=colors, controls=control_points,
interpolation='zero')
return cmap
|
from socket import socket
path = "/~bsetzer/4720sp19/nanoc/output/index.html"
host = "ksuweb.kennesaw.edu"
PORT = 80
conn = socket()
conn.connect((host,PORT))
try:
first_line = "GET " + path + " HTTP/1.1\r\n"
header1 = "Host: " + host + "\r\n"
black_line = "\r\n"
message = first_line + header1 + blank_line
conn.sendall(message.encode("UTF-8))
conn.shutdown(1)
response_bytes = b""
gbytes = conn.recv(4096)
while len(gbytes) > 0:
response_bytes += gbytes
gbytes = conn.recv(4096)
print(response_bytes.decode("UTF-8"))
finally:
conn.shutdown(0)
conn.close()
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import logging
from time import time
from azure.conf import AZURE_ACCOUNTS, get_default_refresh_url
from desktop.lib.rest import http_client, resource
LOG = logging.getLogger(__name__)
class ActiveDirectory(object):
def __init__(self, url=None, aws_access_key_id=None, aws_secret_access_key=None, version=None):
self._access_key_id = aws_access_key_id
self._secret_access_key = aws_secret_access_key
self._url = url;
self._client = http_client.HttpClient(url, logger=LOG)
self._root = resource.Resource(self._client)
self._token = None
self._version = version
def get_token(self):
if not self._version:
return self._get_token({"resource": "https://management.core.windows.net/"})
else:
return self._get_token({"scope": "https://storage.azure.com/.default"})
def _get_token(self, params=None):
is_token_expired = self._token is None or time() >= self._token["expires_on"]
if is_token_expired:
LOG.debug("Authenticating to Azure Active Directory: %s" % self._url)
data = {
"grant_type" : "client_credentials",
"client_id" : self._access_key_id,
"client_secret" : self._secret_access_key
}
data.update(params)
self._token = self._root.post("/", data=data, log_response=False);
self._token["expires_on"] = int(self._token.get("expires_on", self._token.get("expires_in")))
return self._token["token_type"] + " " + self._token["access_token"]
@classmethod
def from_config(cls, conf='default', version=None):
access_key_id = AZURE_ACCOUNTS['default'].CLIENT_ID.get()
secret_access_key = AZURE_ACCOUNTS['default'].CLIENT_SECRET.get()
if None in (access_key_id, secret_access_key):
raise ValueError('Can\'t create azure client, credential is not configured')
url = get_default_refresh_url(version)
return cls(
url,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
version=version
)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
https://github.com/cgloeckner/pyvtt/
Copyright (c) 2020-2021 Christian Glöckner
License: MIT (see LICENSE for details)
"""
import unittest, tempfile, pathlib
from buildnumber import BuildNumber
class BuildNumberTest(unittest.TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile()
def tearDown(self):
pass
def test_init(self):
v = BuildNumber()
self.assertEqual(0, v.version[0])
self.assertEqual(0, v.version[1])
self.assertEqual(1, v.version[2])
def test_str(self):
v = BuildNumber()
v.version = ['a', 'b', 'c']
s = '{0}'.format(v)
self.assertEqual(s, 'a.b.c')
def test_loadFromFile(self):
# create js version file
with open(self.file.name, 'w') as h:
h.write('const version = "15.624.115";')
v = BuildNumber()
v.loadFromFile(self.file.name)
self.assertEqual(15, v.version[0])
self.assertEqual(624, v.version[1])
self.assertEqual(115, v.version[2])
def test_saveToFile(self):
v = BuildNumber()
v.version = [23, 73, 234]
v.saveToFile(self.file.name)
# load js version file
with open(self.file.name, 'r') as h:
c = h.read()
self.assertEqual(c, 'const version = "23.73.234";')
def test_major(self):
v = BuildNumber()
v.version = [23, 73, 234]
v.major()
self.assertEqual(24, v.version[0])
self.assertEqual( 0, v.version[1])
self.assertEqual( 0, v.version[2])
def test_minor(self):
v = BuildNumber()
v.version = [23, 73, 234]
v.minor()
self.assertEqual(23, v.version[0])
self.assertEqual(74, v.version[1])
self.assertEqual( 0, v.version[2])
def test_fix(self):
v = BuildNumber()
v.version = [23, 73, 234]
v.fix()
self.assertEqual( 23, v.version[0])
self.assertEqual( 73, v.version[1])
self.assertEqual(235, v.version[2])
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.slo_list_response_metadata_page import SLOListResponseMetadataPage
globals()["SLOListResponseMetadataPage"] = SLOListResponseMetadataPage
class SLOListResponseMetadata(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"page": (SLOListResponseMetadataPage,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"page": "page", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SLOListResponseMetadata - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
page (SLOListResponseMetadataPage): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
import logging
import re
from flask import abort, has_request_context, request
from flask_login import current_user
from notifications_python_client import __version__
from notifications_python_client.base import BaseAPIClient
from notifications_python_client.errors import HTTP503Error
logger = logging.getLogger(__name__)
def _attach_current_user(data):
return dict(created_by=current_user.id, **data)
class NotifyAdminAPIClient(BaseAPIClient):
def __init__(self):
super().__init__("a" * 73, "b")
def init_app(self, app):
self.base_url = app.config["API_HOST_NAME"]
self.service_id = app.config["ADMIN_CLIENT_USER_NAME"]
self.api_key = app.config["ADMIN_CLIENT_SECRET"]
self.route_secret = app.config["ROUTE_SECRET_KEY_1"]
def generate_headers(self, api_token):
headers = {
"Content-type": "application/json",
"Authorization": "Bearer {}".format(api_token),
"X-Custom-Forwarder": self.route_secret,
"User-agent": "NOTIFY-API-PYTHON-CLIENT/{}".format(__version__),
}
return self._add_request_id_header(headers)
@staticmethod
def _add_request_id_header(headers):
if not has_request_context():
return headers
headers["X-B3-TraceId"] = request.request_id
headers["X-B3-SpanId"] = request.span_id
return headers
def check_inactive_service(self):
# this file is imported in app/__init__.py before current_service is initialised, so need to import later
# to prevent cyclical imports
from app import current_service
# if the current service is inactive and the user isn't a platform admin, we should block them from making any
# stateful modifications to that service
if current_service and not current_service.active and not current_user.platform_admin:
abort(403)
def log_admin_call(self, url, method):
if hasattr(current_user, "platform_admin") and current_user.platform_admin:
user = current_user.email_address + "|" + current_user.id
logger.warn("Admin API request {} {} {} ".format(method, url, user))
def get(self, url, params=None):
if (
re.search(
r"\/user\/[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z",
url,
)
is None
):
self.log_admin_call(url, "GET")
return super().request("GET", url, params=params)
def post(self, *args, **kwargs):
if "url" in kwargs:
self.log_admin_call(kwargs["url"], "POST")
if len(args) > 0:
self.log_admin_call(args[0], "POST")
self.check_inactive_service()
return super().post(*args, **kwargs)
def put(self, *args, **kwargs):
if "url" in kwargs:
self.log_admin_call(kwargs["url"], "PUT")
if len(args) > 0:
self.log_admin_call(args[0], "PUT")
self.check_inactive_service()
return super().put(*args, **kwargs)
def delete(self, *args, **kwargs):
if "url" in kwargs:
self.log_admin_call(kwargs["url"], "DELETE")
if len(args) > 0:
self.log_admin_call(args[0], "DELETE")
self.check_inactive_service()
return super().delete(*args, **kwargs)
def _perform_request(self, method, url, kwargs):
# Retry requests to the Notify API up to 3 times
# if they fail with a 503 status, thrown when
# the admin can't connect to the API.
for i in [1, 2, 3]:
try:
return super()._perform_request(method, url, kwargs)
except HTTP503Error as e:
logger.warn("Retrying API request after failure {} {}".format(method, url))
if i == 3:
raise e
class InviteTokenError(Exception):
pass
|
# Generated by Django 2.1.5 on 2019-07-14 02:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_course_prerequisite'),
]
operations = [
migrations.AlterField(
model_name='course',
name='units',
field=models.CharField(default='0', max_length=30),
),
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AzureResourceBase(msrest.serialization.Model):
"""Common properties for all Azure resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureResourceBase, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class DeploymentScript(AzureResourceBase):
"""Deployment script object.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureCliScript, AzurePowerShellScript.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
_subtype_map = {
'kind': {'AzureCLI': 'AzureCliScript', 'AzurePowerShell': 'AzurePowerShellScript'}
}
def __init__(
self,
**kwargs
):
super(DeploymentScript, self).__init__(**kwargs)
self.identity = kwargs['identity']
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.kind = 'DeploymentScript' # type: str
self.system_data = None
class AzureCliScript(DeploymentScript):
"""Object model for the Azure CLI script.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.SystemData
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration". Default value: "Always".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is P1D.
:type timeout: ~datetime.timedelta
:param az_cli_version: Required. Azure CLI module version to be used.
:type az_cli_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'az_cli_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'container_settings': {'key': 'properties.containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'properties.storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'properties.cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'ScriptStatus'},
'outputs': {'key': 'properties.outputs', 'type': '{object}'},
'primary_script_uri': {'key': 'properties.primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'properties.supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'properties.scriptContent', 'type': 'str'},
'arguments': {'key': 'properties.arguments', 'type': 'str'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'properties.retentionInterval', 'type': 'duration'},
'timeout': {'key': 'properties.timeout', 'type': 'duration'},
'az_cli_version': {'key': 'properties.azCliVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureCliScript, self).__init__(**kwargs)
self.kind = 'AzureCLI' # type: str
self.container_settings = kwargs.get('container_settings', None)
self.storage_account_settings = kwargs.get('storage_account_settings', None)
self.cleanup_preference = kwargs.get('cleanup_preference', "Always")
self.provisioning_state = None
self.status = None
self.outputs = None
self.primary_script_uri = kwargs.get('primary_script_uri', None)
self.supporting_script_uris = kwargs.get('supporting_script_uris', None)
self.script_content = kwargs.get('script_content', None)
self.arguments = kwargs.get('arguments', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
self.retention_interval = kwargs['retention_interval']
self.timeout = kwargs.get('timeout', "P1D")
self.az_cli_version = kwargs['az_cli_version']
class ScriptConfigurationBase(msrest.serialization.Model):
"""Common configuration settings for both Azure PowerShell and Azure CLI scripts.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is P1D.
:type timeout: ~datetime.timedelta
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(ScriptConfigurationBase, self).__init__(**kwargs)
self.primary_script_uri = kwargs.get('primary_script_uri', None)
self.supporting_script_uris = kwargs.get('supporting_script_uris', None)
self.script_content = kwargs.get('script_content', None)
self.arguments = kwargs.get('arguments', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
self.retention_interval = kwargs['retention_interval']
self.timeout = kwargs.get('timeout', "P1D")
class DeploymentScriptPropertiesBase(msrest.serialization.Model):
"""Common properties for the deployment script.
Variables are only populated by the server, and will be ignored when sending a request.
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration". Default value: "Always".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
"""
_validation = {
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
}
_attribute_map = {
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(DeploymentScriptPropertiesBase, self).__init__(**kwargs)
self.container_settings = kwargs.get('container_settings', None)
self.storage_account_settings = kwargs.get('storage_account_settings', None)
self.cleanup_preference = kwargs.get('cleanup_preference', "Always")
self.provisioning_state = None
self.status = None
self.outputs = None
class AzureCliScriptProperties(DeploymentScriptPropertiesBase, ScriptConfigurationBase):
"""Properties of the Azure CLI script object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is P1D.
:type timeout: ~datetime.timedelta
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration". Default value: "Always".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param az_cli_version: Required. Azure CLI module version to be used.
:type az_cli_version: str
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'az_cli_version': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
'az_cli_version': {'key': 'azCliVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureCliScriptProperties, self).__init__(**kwargs)
self.primary_script_uri = kwargs.get('primary_script_uri', None)
self.supporting_script_uris = kwargs.get('supporting_script_uris', None)
self.script_content = kwargs.get('script_content', None)
self.arguments = kwargs.get('arguments', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
self.retention_interval = kwargs['retention_interval']
self.timeout = kwargs.get('timeout', "P1D")
self.az_cli_version = kwargs['az_cli_version']
self.container_settings = kwargs.get('container_settings', None)
self.storage_account_settings = kwargs.get('storage_account_settings', None)
self.cleanup_preference = kwargs.get('cleanup_preference', "Always")
self.provisioning_state = None
self.status = None
self.outputs = None
self.az_cli_version = kwargs['az_cli_version']
class AzurePowerShellScript(DeploymentScript):
"""Object model for the Azure PowerShell script.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.SystemData
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration". Default value: "Always".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is P1D.
:type timeout: ~datetime.timedelta
:param az_power_shell_version: Required. Azure PowerShell module version to be used.
:type az_power_shell_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'az_power_shell_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'container_settings': {'key': 'properties.containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'properties.storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'properties.cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'ScriptStatus'},
'outputs': {'key': 'properties.outputs', 'type': '{object}'},
'primary_script_uri': {'key': 'properties.primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'properties.supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'properties.scriptContent', 'type': 'str'},
'arguments': {'key': 'properties.arguments', 'type': 'str'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'properties.retentionInterval', 'type': 'duration'},
'timeout': {'key': 'properties.timeout', 'type': 'duration'},
'az_power_shell_version': {'key': 'properties.azPowerShellVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzurePowerShellScript, self).__init__(**kwargs)
self.kind = 'AzurePowerShell' # type: str
self.container_settings = kwargs.get('container_settings', None)
self.storage_account_settings = kwargs.get('storage_account_settings', None)
self.cleanup_preference = kwargs.get('cleanup_preference', "Always")
self.provisioning_state = None
self.status = None
self.outputs = None
self.primary_script_uri = kwargs.get('primary_script_uri', None)
self.supporting_script_uris = kwargs.get('supporting_script_uris', None)
self.script_content = kwargs.get('script_content', None)
self.arguments = kwargs.get('arguments', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
self.retention_interval = kwargs['retention_interval']
self.timeout = kwargs.get('timeout', "P1D")
self.az_power_shell_version = kwargs['az_power_shell_version']
class AzurePowerShellScriptProperties(DeploymentScriptPropertiesBase, ScriptConfigurationBase):
"""Properties of the Azure PowerShell script object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is P1D.
:type timeout: ~datetime.timedelta
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration". Default value: "Always".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param az_power_shell_version: Required. Azure PowerShell module version to be used.
:type az_power_shell_version: str
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'az_power_shell_version': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
'az_power_shell_version': {'key': 'azPowerShellVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzurePowerShellScriptProperties, self).__init__(**kwargs)
self.primary_script_uri = kwargs.get('primary_script_uri', None)
self.supporting_script_uris = kwargs.get('supporting_script_uris', None)
self.script_content = kwargs.get('script_content', None)
self.arguments = kwargs.get('arguments', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
self.retention_interval = kwargs['retention_interval']
self.timeout = kwargs.get('timeout', "P1D")
self.az_power_shell_version = kwargs['az_power_shell_version']
self.container_settings = kwargs.get('container_settings', None)
self.storage_account_settings = kwargs.get('storage_account_settings', None)
self.cleanup_preference = kwargs.get('cleanup_preference', "Always")
self.provisioning_state = None
self.status = None
self.outputs = None
self.az_power_shell_version = kwargs['az_power_shell_version']
class ContainerConfiguration(msrest.serialization.Model):
"""Settings to customize ACI container instance.
:param container_group_name: Container group name, if not specified then the name will get
auto-generated. Not specifying a 'containerGroupName' indicates the system to generate a unique
name which might end up flagging an Azure Policy as non-compliant. Use 'containerGroupName'
when you have an Azure Policy that expects a specific naming convention or when you want to
fully control the name. 'containerGroupName' property must be between 1 and 63 characters long,
must contain only lowercase letters, numbers, and dashes and it cannot start or end with a dash
and consecutive dashes are not allowed. To specify a 'containerGroupName', add the following
object to properties: { "containerSettings": { "containerGroupName": "contoso-container" } }.
If you do not want to specify a 'containerGroupName' then do not add 'containerSettings'
property.
:type container_group_name: str
"""
_validation = {
'container_group_name': {'max_length': 63, 'min_length': 1},
}
_attribute_map = {
'container_group_name': {'key': 'containerGroupName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerConfiguration, self).__init__(**kwargs)
self.container_group_name = kwargs.get('container_group_name', None)
class DeploymentScriptListResult(msrest.serialization.Model):
"""List of deployment scripts.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of deployment scripts.
:type value:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.DeploymentScript]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentScript]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeploymentScriptListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class DeploymentScriptsError(msrest.serialization.Model):
"""Deployment scripts error response.
:param error: Common error response for all Azure Resource Manager APIs to return error details
for failed operations. (This also follows the OData error response format.).
:type error: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ErrorResponse
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
**kwargs
):
super(DeploymentScriptsError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class DeploymentScriptUpdateParameter(AzureResourceBase):
"""Deployment script parameters to be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param tags: A set of tags. Resource tags to be updated.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(DeploymentScriptUpdateParameter, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class EnvironmentVariable(msrest.serialization.Model):
"""The environment variable to pass to the script in the container instance.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: The value of the environment variable.
:type value: str
:param secure_value: The value of the secure environment variable.
:type secure_value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'secure_value': {'key': 'secureValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentVariable, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
self.secure_value = kwargs.get('secure_value', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed identity generic object.
:param type: Type of the managed identity. Possible values include: "UserAssigned".
:type type: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ManagedServiceIdentityType
:param tenant_id: ID of the Azure Active Directory.
:type tenant_id: str
:param user_assigned_identities: The list of user-assigned managed identities associated with
the resource. Key is the Azure resource Id of the managed identity.
:type user_assigned_identities: dict[str,
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.UserAssignedIdentity]
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class ScriptLog(AzureResourceBase):
"""Script execution log object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar log: Script execution logs in text format.
:vartype log: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'log': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'log': {'key': 'properties.log', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ScriptLog, self).__init__(**kwargs)
self.log = None
class ScriptLogsList(msrest.serialization.Model):
"""Deployment script execution logs.
:param value: Deployment scripts logs.
:type value: list[~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ScriptLog]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ScriptLog]'},
}
def __init__(
self,
**kwargs
):
super(ScriptLogsList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ScriptStatus(msrest.serialization.Model):
"""Generic object modeling results of script execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar container_instance_id: ACI resource Id.
:vartype container_instance_id: str
:ivar storage_account_id: Storage account resource Id.
:vartype storage_account_id: str
:ivar start_time: Start time of the script execution.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the script execution.
:vartype end_time: ~datetime.datetime
:ivar expiration_time: Time the deployment script resource will expire.
:vartype expiration_time: ~datetime.datetime
:param error: Error that is relayed from the script execution.
:type error: ~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.ErrorResponse
"""
_validation = {
'container_instance_id': {'readonly': True},
'storage_account_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'expiration_time': {'readonly': True},
}
_attribute_map = {
'container_instance_id': {'key': 'containerInstanceId', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
**kwargs
):
super(ScriptStatus, self).__init__(**kwargs)
self.container_instance_id = None
self.storage_account_id = None
self.start_time = None
self.end_time = None
self.expiration_time = None
self.error = kwargs.get('error', None)
class StorageAccountConfiguration(msrest.serialization.Model):
"""Settings to use an existing storage account. Valid storage account kinds are: Storage, StorageV2 and FileStorage.
:param storage_account_name: The storage account name.
:type storage_account_name: str
:param storage_account_key: The storage account access key.
:type storage_account_key: str
"""
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'storage_account_key': {'key': 'storageAccountKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountConfiguration, self).__init__(**kwargs)
self.storage_account_name = kwargs.get('storage_account_name', None)
self.storage_account_key = kwargs.get('storage_account_key', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_01_preview.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class UserAssignedIdentity(msrest.serialization.Model):
"""User-assigned managed identity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Azure Active Directory principal ID associated with this identity.
:vartype principal_id: str
:ivar client_id: Client App Id associated with this identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
|
"""Invokes the Java semgrex on a document
The server client has a method "semgrex" which sends text to Java
CoreNLP for processing with a semgrex (SEMantic GRaph regEX) query:
https://nlp.stanford.edu/nlp/javadoc/javanlp/edu/stanford/nlp/semgraph/semgrex/SemgrexPattern.html
However, this operates on text using the CoreNLP tools, which means
the dependency graphs may not align with stanza's depparse module, and
this also limits the languages for which it can be used. This module
allows for running semgrex commands on the graphs produced by
depparse.
To use, first process text into a doc using stanza.Pipeline
Next, pass the processed doc and a list of semgrex patterns to
process_doc in this module. It will run the java semgrex module as a
subprocess and return the result in the form of a SemgrexResponse,
whose description is in the proto file included with stanza.
A minimal example is the main method of this module.
Note that launching the subprocess is potentially quite expensive
relative to the search if used many times on small documents. Ideally
larger texts would be processed, and all of the desired semgrex
patterns would be run at once. The worst thing to do would be to call
this multiple times on a large document, one invocation per semgrex
pattern, as that would serialize the document each time. There are of
course multiple ways of making this more efficient, such as including
it as a separate call in the server or keeping the subprocess alive
for multiple queries, but we didn't do any of those. We do, however,
accept pull requests...
"""
import stanza
from stanza.protobuf import SemgrexRequest, SemgrexResponse
from stanza.server.java_protobuf_requests import send_request, add_token, add_word_to_graph, JavaProtobufContext
SEMGREX_JAVA = "edu.stanford.nlp.semgraph.semgrex.ProcessSemgrexRequest"
def send_semgrex_request(request):
return send_request(request, SemgrexResponse, SEMGREX_JAVA)
def build_request(doc, semgrex_patterns):
request = SemgrexRequest()
for semgrex in semgrex_patterns:
request.semgrex.append(semgrex)
for sent_idx, sentence in enumerate(doc.sentences):
query = request.query.add()
word_idx = 0
for token in sentence.tokens:
for word in token.words:
add_token(query.token, word, token)
add_word_to_graph(query.graph, word, sent_idx, word_idx)
word_idx = word_idx + 1
return request
def process_doc(doc, *semgrex_patterns):
"""
Returns the result of processing the given semgrex expression on the stanza doc.
Currently the return is a SemgrexResponse from CoreNLP.proto
"""
request = build_request(doc, semgrex_patterns)
return send_semgrex_request(request)
class Semgrex(JavaProtobufContext):
"""
Semgrex context window
This is a context window which keeps a process open. Should allow
for multiple requests without launching new java processes each time.
"""
def __init__(self, classpath=None):
super(Semgrex, self).__init__(classpath, SemgrexResponse, SEMGREX_JAVA)
def process(self, doc, *semgrex_patterns):
request = build_request(doc, semgrex_patterns)
return self.process_request(request)
def main():
nlp = stanza.Pipeline('en',
processors='tokenize,pos,lemma,depparse')
doc = nlp('Uro ruined modern. Fortunately, Wotc banned him.')
#print(doc.sentences[0].dependencies)
print(doc)
print(process_doc(doc, "{}=source >obj=zzz {}=target"))
if __name__ == '__main__':
main()
|
from guardian.shortcuts import get_objects_for_user
from .serializers import MungerSerializer, DataFieldSerializer, PivotFieldSerializer, FieldTypeSerializer
from rest_framework.response import Response
from rest_framework import status, filters, mixins, generics, permissions
class MungerPermissions(permissions.DjangoObjectPermissions):
perms_map = {
'GET': ['%(app_label)s.change_%(model_name)s'],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
'OPTIONS': ['%(app_label)s.change_%(model_name)s'],
'HEAD': ['%(app_label)s.change_%(model_name)s'],
}
class MungerBuilderAPIView(MungerPermissions,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
generics.GenericAPIView):
def get_queryset(self):
return self.serializer_class.Meta.model.objects.all()
def get(self, request, *args, **kwargs):
if 'pk' in kwargs:
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = request.user
if self.under_limit(user):
return self.create(request, *args, **kwargs)
else:
error_string = 'Cannot Create more {} - Delete some to make space'.format(
self.__class__.__name__
)
return Response(error_string, status=status.HTTP_403_FORBIDDEN)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def under_limit(self, user):
meta_name = self.serializer_class.Meta.model._meta.model_name
permission_name = 'script_builder.change_{}'.format(meta_name)
current_objects = get_objects_for_user(user, permission_name)
return len(current_objects) <= self.USER_OBJECT_LIMIT or user.is_superuser
class Mungers(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 5
serializer_class = MungerSerializer
filter_backends = (filters.DjangoObjectPermissionsFilter,)
class DataFields(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 100
serializer_class = DataFieldSerializer
class PivotFields(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 100
serializer_class = PivotFieldSerializer
class FieldTypes(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 10
serializer_class = FieldTypeSerializer
|
import cv2
from pyzbar import pyzbar
def read_barcodes(frame):
barcodes = pyzbar.decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect
barcode_text = barcode.data.decode('utf-8')
print(barcode_text)
cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
return frame
def main():
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while ret:
ret, frame = camera.read()
frame = read_barcodes(frame)
cv2.imshow('Barcode reader', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# This example shows how to automatically move and measure a set of points using a laser tracker.
from robolink import * # API to communicate with RoboDK for simulation and offline/online programming
from robodk import * # Robotics toolbox for industrial robots
# Any interaction with RoboDK must be done through RDK:
RDK = Robolink()
# Enter the points (joint coordinates) to measure manually
JOINTS_LIST = []
JOINTS_LIST.append([60.851545, 60.851555, 60.851598, 60.851598, 60.851555, 60.851545])
JOINTS_LIST.append([65.851545, 65.851555, 65.851598, 65.851598, 65.851555, 65.851545])
JOINTS_LIST.append([55.851545, 55.851555, 55.851598, 55.851598, 55.851555, 55.851545])
# Alternatively: load the list from a CSV file
#JOINTS_LIST = LoadMat("Path-to-file.csv")
# Select a robot (popup is displayed if more than one robot is available)
robot = RDK.ItemUserPick('Select a robot', ITEM_TYPE_ROBOT)
if not robot.Valid():
raise Exception('No robot selected or available')
# As if we want to just simulate the movements
RUN_ON_ROBOT = mbox("Run on real robot?")
# Important: by default, the run mode is RUNMODE_SIMULATE
# If the program is generated offline manually the runmode will be RUNMODE_MAKE_ROBOTPROG,
# Therefore, we should not run the program on the robot
if RDK.RunMode() != RUNMODE_SIMULATE:
RUN_ON_ROBOT = False
# Connect to the robot if we are moving the real robot:
if RUN_ON_ROBOT:
# Update connection parameters if required:
# robot.setConnectionParams('192.168.2.35',30000,'/', 'anonymous','')
# Connect to the robot using default IP
success = robot.Connect() # Try to connect once
#success robot.ConnectSafe() # Try to connect multiple times
status, status_msg = robot.ConnectedState()
if status != ROBOTCOM_READY:
# Stop if the connection did not succeed
print(status_msg)
raise Exception("Failed to connect: " + status_msg)
# This will set to run the API programs on the robot and the simulator (online programming)
RDK.setRunMode(RUNMODE_RUN_ROBOT)
# Note: This is set automatically when we Connect() to the robot through the API
#else:
# This will run the API program on the simulator (offline programming)
# RDK.setRunMode(RUNMODE_SIMULATE)
# Note: This is the default setting if we do not execute robot.Connect()
# We should not set the RUNMODE_SIMULATE if we want to be able to generate the robot programm offline
# Get the current joint position of the robot
# (updates the position on the robot simulator)
joints_ref = robot.Joints()
# get the current position of the TCP with respect to the reference frame:
# (4x4 matrix representing position and orientation)
#target_ref = robot.Pose()
#pos_ref = target_ref.Pos()
#print(Pose_2_TxyzRxyz(target_ref))
# move the robot to the first point:
#robot.MoveJ(target_ref)
# It is important to provide the reference frame and the tool frames when generating programs offline
# It is important to update the TCP on the robot mostly when using the driver
#robot.setPoseFrame(robot.PoseFrame())
#robot.setPoseTool(robot.PoseTool())
#robot.setZoneData(-1) # Set the rounding parameter (Also known as: CNT, APO/C_DIS, ZoneData, Blending radius, cornering, ...)
robot.setSpeed(100) # Set linear speed in mm/s
# Run the measurements
njoints = len(JOINTS_LIST)
# Store the measurements in an array
DATA = []
for i in range(njoints):
joints = JOINTS_LIST[i]
print("Moving robot to %i = %s" % (i,str(joints)))
robot.MoveJ(joints)
print("Done")
if not RUN_ON_ROBOT:
continue
#measure = mbox("Robot at position %i=%s. Select OK to take measurement or Cancel to continue." % (i, str(joints)))
measure = True
pause(1)
while measure:
print("Measuring target")
xyz = RDK.LaserTracker_Measure()
if xyz is None:
if mbox("Measurment not visible. Try again?"):
continue
else:
measure = False
break
x,y,z = xyz
# Display the data as [time, x,y,z]
data = '%i, %.3f, %.6f, %.6f, %.6f' % (i, toc(), x, y, z)
print(data)
measure = False
# Optionally, save the data to a file
#DATA_MAT = Mat(DATA).tr()
#DATA_MAT.SaveMat("Path-to-measurements-file.csv")
print('Done')
|
'''This module implements concrete agent controllers for the rollout worker'''
import copy
import time
from collections import OrderedDict
import math
import numpy as np
import rospy
import logging
import json
from threading import RLock
from gazebo_msgs.msg import ModelState
from std_msgs.msg import Float64, String
from shapely.geometry import Point
import markov.agent_ctrl.constants as const
from markov.agent_ctrl.agent_ctrl_interface import AgentCtrlInterface
from markov.agent_ctrl.utils import (set_reward_and_metrics,
send_action, load_action_space, get_speed_factor,
get_normalized_progress, Logger)
from markov.track_geom.constants import (AgentPos, TrackNearDist, ObstacleDimensions, ParkLocation)
from markov.track_geom.track_data import FiniteDifference, TrackData
from markov.track_geom.utils import euler_to_quaternion, pose_distance, apply_orientation
from markov.metrics.constants import StepMetrics, EpisodeStatus
from markov.cameras.camera_manager import CameraManager
from markov.common import ObserverInterface
from markov.log_handler.deepracer_exceptions import RewardFunctionError, GenericRolloutException
from markov.reset.constants import AgentPhase, AgentCtrlStatus, AgentInfo, RaceCtrlStatus, ZERO_SPEED_AGENT_PHASES
from markov.reset.utils import construct_reset_rules_manager
from markov.utils import get_racecar_idx
from markov.virtual_event.constants import (WebRTCCarControl, CarControlMode,
CarControlStatus, MAX_SPEED, MIN_SPEED,
CarControlTopic, WEBRTC_CAR_CTRL_FORMAT)
from markov.visual_effects.effects.blink_effect import BlinkEffect
from markov.visualizations.reward_distributions import RewardDataPublisher
from markov.constants import DEFAULT_PARK_POSITION
from markov.gazebo_tracker.trackers.get_link_state_tracker import GetLinkStateTracker
from markov.gazebo_tracker.trackers.get_model_state_tracker import GetModelStateTracker
from markov.gazebo_tracker.trackers.set_model_state_tracker import SetModelStateTracker
from markov.gazebo_tracker.abs_tracker import AbstractTracker
from markov.gazebo_tracker.constants import TrackerPriority
from markov.boto.s3.constants import ModelMetadataKeys
from markov.virtual_event.constants import PAUSE_TIME_BEFORE_START
from rl_coach.core_types import RunPhase
LOG = Logger(__name__, logging.INFO).get_logger()
class RolloutCtrl(AgentCtrlInterface, ObserverInterface, AbstractTracker):
'''Concrete class for an agent that drives forward'''
def __init__(self, config_dict, run_phase_sink, metrics):
'''config_dict (dict): containing all the keys in ConfigParams
run_phase_sink (RunPhaseSubject): Sink to receive notification of a change in run phase
metrics (EvalMetrics/TrainingMetrics): Training or evaluation metrics
'''
self._current_sim_time = 0
self._ctrl_status = dict()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PREPARE.value
self._pause_duration = 0.0
# virtual event
self._is_virtual_event = config_dict.get(const.ConfigParams.IS_VIRTUAL_EVENT.value, False)
self._speed_mode = CarControlMode.MODEL_SPEED.value
self._speed_value = 0.0
self._race_car_ctrl_status = CarControlStatus.RESUME.value
self._start_sim_time = None
if self._is_virtual_event:
# Subscriber to udpate car speed if it's virtual event
rospy.Subscriber(WEBRTC_CAR_CTRL_FORMAT.format(CarControlTopic.SPEED_CTRL.value),
String,
self._get_speed_mode_value)
# Subscriber to udpate car status if it's virtual event
rospy.Subscriber(WEBRTC_CAR_CTRL_FORMAT.format(CarControlTopic.STATUS_CTRL.value),
String,
self._update_car_status)
# thread lock
self._lock = RLock()
# reset rules manager
self._metrics = metrics
self._is_continuous = config_dict[const.ConfigParams.IS_CONTINUOUS.value]
self._reset_rules_manager = construct_reset_rules_manager(config_dict)
self._config_dict = config_dict
self._done_condition = config_dict.get(const.ConfigParams.DONE_CONDITION.value, any)
self._number_of_resets = config_dict[const.ConfigParams.NUMBER_OF_RESETS.value]
self._penalties = {EpisodeStatus.OFF_TRACK.value: config_dict.get(const.ConfigParams.OFF_TRACK_PENALTY.value, 0.0),
EpisodeStatus.CRASHED.value: config_dict.get(const.ConfigParams.COLLISION_PENALTY.value, 0.0),
EpisodeStatus.REVERSED.value: config_dict.get(const.ConfigParams.REVERSE_PENALTY.value,
config_dict.get(const.ConfigParams.OFF_TRACK_PENALTY.value, 0.0)),
EpisodeStatus.IMMOBILIZED.value: config_dict.get(const.ConfigParams.IMMOBILIZED_PENALTY.value, 0.0)}
self._reset_count = 0
self._curr_crashed_object_name = ''
self._simapp_version_ = config_dict[const.ConfigParams.VERSION.value]
# simapp_version speed scale
self._speed_scale_factor_ = get_speed_factor(config_dict[const.ConfigParams.VERSION.value])
# Store the name of the agent used to set agents position on the track
self._agent_name_ = config_dict[const.ConfigParams.AGENT_NAME.value]
# In virtual event, start index is always None. The reason is that if start index is not None, start lane will
# use its index to figure out whether racecar should be placed in inner or outer lane. If start index is None for
# virtual event, start lane will be center line.
# In other cases, we will parse agent name in the format of racecar_x where x can be casted as an integer
self._agent_idx_ = None if self._is_virtual_event else get_racecar_idx(self._agent_name_)
# Get track data
self._track_data_ = TrackData.get_instance()
# Set start lane
if self._agent_idx_ is not None:
self._start_lane_ = self._track_data_.inner_lane \
if self._agent_idx_ % 2 else self._track_data_.outer_lane
else:
self._start_lane_ = self._track_data_.center_line
# Store the name of the links in the agent, this should be const
self._agent_link_name_list_ = config_dict[const.ConfigParams.LINK_NAME_LIST.value]
# Store the reward function
self._reward_ = config_dict[const.ConfigParams.REWARD.value]
# Create publishers for controlling the car
self._velocity_pub_dict_ = OrderedDict()
self._steering_pub_dict_ = OrderedDict()
for topic in config_dict[const.ConfigParams.VELOCITY_LIST.value]:
self._velocity_pub_dict_[topic] = rospy.Publisher(topic, Float64, queue_size=1)
for topic in config_dict[const.ConfigParams.STEERING_LIST.value]:
self._steering_pub_dict_[topic] = rospy.Publisher(topic, Float64, queue_size=1)
#Create default reward parameters
self._reward_params_ = const.RewardParam.make_default_param()
#Create the default metrics dictionary
self._step_metrics_ = StepMetrics.make_default_metric()
# Dictionary of bools indicating starting position behavior
self._start_pos_behavior_ = \
{'change_start': config_dict[const.ConfigParams.CHANGE_START.value],
'alternate_dir': config_dict[const.ConfigParams.ALT_DIR.value]}
# Dictionary to track the previous way points
self._prev_waypoints_ = {'prev_point' : Point(0, 0), 'prev_point_2' : Point(0, 0)}
# Normalized distance of new start line from the original start line of the track.
start_ndist = 0.0
# Normalized start position offset w.r.t to start_ndist, which is the start line of the track.
start_pos_offset = config_dict.get(const.ConfigParams.START_POSITION.value, 0.0)
self._start_line_ndist_offset = start_pos_offset / self._track_data_.get_track_length()
# Dictionary containing some of the data for the agent
# - During the reset call, every value except start_ndist will get wiped out by self._clear_data
# (reset happens prior to every episodes begin)
# - If self._start_line_ndist_offset is not 0 (usually some minus value),
# then initial current_progress suppose to be non-zero (usually some minus value) as progress
# suppose to be based on start_ndist.
# - This will be correctly calculated by first call of utils.compute_current_prog function.
# As prev_progress will be initially 0.0 and physical position is not at start_ndist,
# utils.compute_current_prog will return negative progress if self._start_line_ndist_offset is negative value
# (meaning behind start line) and will return positive progress if self._start_line_ndist_offset is
# positive value (meaning ahead of start line).
self._data_dict_ = {'max_progress': 0.0,
'current_progress': 0.0,
'prev_progress': 0.0,
'steps': 0.0,
'start_ndist': start_ndist,
'prev_car_pose': 0.0}
# Load the action space
self._model_metadata_ = config_dict[const.ConfigParams.MODEL_METADATA.value]
self._action_space_ = load_action_space(self._model_metadata_)
#! TODO evaluate if this is the best way to reset the car
# subscriber to time to update camera position
self.camera_manager = CameraManager.get_instance()
# True if the agent is in the training phase
self._is_training_ = False
# Register to the phase sink
run_phase_sink.register(self)
# Make sure velocity and angle are set to 0
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
# start_dist should be hypothetical start line (start_ndist) plus
# start position offset (start_line_ndist_offset).
start_pose = self._start_lane_.interpolate_pose(
(self._data_dict_['start_ndist'] + self._start_line_ndist_offset) * self._track_data_.get_track_length(),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
self._track_data_.initialize_object(self._agent_name_, start_pose, \
ObstacleDimensions.BOT_CAR_DIMENSION)
self.make_link_points = lambda link_state: Point(link_state.pose.position.x,
link_state.pose.position.y)
self.reference_frames = ['' for _ in self._agent_link_name_list_]
# pause pose for car at pause state
self._pause_car_model_pose = self._track_data_.get_object_pose(self._agent_name_)
# prepare pose for car at prepare state
self._prepare_car_model_pose = self._track_data_.get_object_pose(self._agent_name_)
self._park_position = DEFAULT_PARK_POSITION
AbstractTracker.__init__(self, TrackerPriority.HIGH)
def update_tracker(self, delta_time, sim_time):
"""
Callback when sim time is updated
Args:
delta_time (float): time diff from last call
sim_time (Clock): simulation time
"""
if self._pause_duration > 0.0:
self._pause_duration -= delta_time
self._current_sim_time = sim_time.clock.secs + 1.e-9 * sim_time.clock.nsecs
@property
def action_space(self):
return self._action_space_
@property
def model_metadata(self):
return self._model_metadata_
@property
def simapp_version(self):
return self._simapp_version_
def reset_agent(self):
'''reset agent by reseting member variables, reset s3 metrics, and reset agent to
starting position at the beginning of each episode
'''
LOG.info("Reset agent")
self._clear_data()
self._metrics.reset()
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
start_model_state = self._get_car_start_model_state()
# set_model_state and get_model_state is actually occurred asynchronously
# in tracker with simulation clock subscription. So, when the agent is
# entering next step function call, either set_model_state
# or get_model_state may not actually happened and the agent position may be outdated.
# To avoid such case, use blocking to actually update the model position in gazebo
# and GetModelstateTracker to reflect the latest agent position right away when start.
SetModelStateTracker.get_instance().set_model_state(start_model_state, blocking=True)
GetModelStateTracker.get_instance().get_model_state(self._agent_name_, '', blocking=True)
# reset view cameras
self.camera_manager.reset(car_pose=start_model_state.pose,
namespace=self._agent_name_)
self._track_data_.update_object_pose(self._agent_name_, start_model_state.pose)
# update pause car model pose to the new start model state pose
self._prepare_car_model_pose = start_model_state.pose
LOG.info("Reset agent finished")
def _pause_car_model(self, car_model_pose, should_reset_camera=False, blocking=False):
"""Pause agent immediately at the current position for both pause and prepare state
Args:
car_model_pose (Pose): Pose instance
should_reset_camera (bool): True if reset camera. False, otherwise
"""
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = car_model_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
SetModelStateTracker.get_instance().set_model_state(car_model_state, blocking)
if blocking:
# Let GetModelStateTracker retrieves the agent's latest model state instantly after synchronous set,
# so next call of get_model_state without blocking will return the latest model_state.
GetModelStateTracker.get_instance().get_model_state(self._agent_name_, '', blocking=True)
if should_reset_camera:
self.camera_manager.reset(car_pose=car_model_state.pose,
namespace=self._agent_name_)
def _park_car_model(self):
'''Park agent after racer complete F1 race
'''
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
park_location = self._track_data_.park_location
if park_location == ParkLocation.LEFT:
yaw = 3.0 * math.pi / 2.0 if self._track_data_.is_ccw else math.pi / 2.0
elif park_location == ParkLocation.RIGHT:
yaw = math.pi / 2.0 if self._track_data_.is_ccw else 3.0 * math.pi / 2.0
elif park_location == ParkLocation.TOP:
yaw = math.pi if self._track_data_.is_ccw else 0.0
else: # park_location == ParkLocation.BOTTOM:
yaw = 0.0 if self._track_data_.is_ccw else math.pi
orientation = euler_to_quaternion(yaw=yaw)
car_model_state.pose.position.x = self._park_position[0]
car_model_state.pose.position.y = self._park_position[1]
car_model_state.pose.position.z = 0.0
car_model_state.pose.orientation.x = orientation[0]
car_model_state.pose.orientation.y = orientation[1]
car_model_state.pose.orientation.z = orientation[2]
car_model_state.pose.orientation.w = orientation[3]
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
SetModelStateTracker.get_instance().set_model_state(car_model_state)
self.camera_manager.reset(car_pose=car_model_state.pose,
namespace=self._agent_name_)
def _get_closest_obj(self, start_dist, name_filter=None):
'''get the closest object dist and pose both ahead and behind
Args:
start_dist (float): start distance
name_filter (str): name to filter for the closest object check
Returns:
tuple (float, ModelStates.pose): tuple of closest object distance and closest
object pose
'''
closest_object_dist = None
closest_object_pose = None
closest_obj_gap = const.CLOSEST_OBJ_GAP
for object_name, object_pose in self._track_data_.object_poses.items():
if object_name != self._agent_name_:
if name_filter and name_filter not in object_name:
continue
object_point = Point([object_pose.position.x, object_pose.position.y])
object_dist = self._track_data_.center_line.project(object_point)
abs_object_gap = abs(object_dist - start_dist) % \
self._track_data_.get_track_length()
if abs_object_gap < closest_obj_gap:
closest_obj_gap = abs_object_gap
closest_object_dist = object_dist
closest_object_pose = object_pose
return closest_object_dist, closest_object_pose
def _get_reset_poses(self, dist):
"""
Return center, outer, inner reset position based on given dist
Args:
dist(float): interpolated track dist
Returns: tuple of center, outer, and inner rest positions
"""
# It is extremely important to get the interpolated pose of cur_dist
# using center line first. And then use the center line pose to
# interpolate the inner and outer reset pose.
# If cur_dist is directly used with inner lane and outer lane pose
# interpolation then the result's pose difference from actual reset pose (where it should be)
# is too large.
cur_center_pose = self._track_data_.center_line.interpolate_pose(
dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
inner_reset_pose = self._track_data_.inner_lane.interpolate_pose(
self._track_data_.inner_lane.project(Point(cur_center_pose.position.x,
cur_center_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
outer_reset_pose = self._track_data_.outer_lane.interpolate_pose(
self._track_data_.outer_lane.project(Point(cur_center_pose.position.x,
cur_center_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
return cur_center_pose, inner_reset_pose, outer_reset_pose
def _is_obstacle_inner(self, obstacle_pose):
"""Return whether given object is in inner lane.
Args:
obstacle_pose (Pose): Obstacle pose object
Returns:
bool: True for inner. False otherwise
"""
obstacle_point = Point([obstacle_pose.position.x, obstacle_pose.position.y])
obstacle_nearest_pnts_dict = self._track_data_.get_nearest_points(obstacle_point)
obstacle_nearest_dist_dict = self._track_data_.get_nearest_dist(obstacle_nearest_pnts_dict,
obstacle_point)
return obstacle_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
obstacle_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]
def _get_car_reset_model_state(self, car_pose):
'''Get car reset model state when car goes offtrack or crash into a static obstacle
Args:
car_pose (Pose): current car pose
Returns:
ModelState: reset state
'''
cur_dist = self._data_dict_['current_progress'] * \
self._track_data_.get_track_length() / 100.0
closest_object_dist, closest_obstacle_pose = self._get_closest_obj(cur_dist, const.OBSTACLE_NAME_PREFIX)
if closest_obstacle_pose is not None:
# If static obstacle is in circumference of reset position,
# put the car to opposite lane and 1m back.
cur_dist = closest_object_dist - const.RESET_BEHIND_DIST
cur_center_pose, inner_reset_pose, outer_reset_pose = self._get_reset_poses(dist=cur_dist)
is_object_inner = self._is_obstacle_inner(obstacle_pose=closest_obstacle_pose)
new_pose = outer_reset_pose if is_object_inner else inner_reset_pose
else:
cur_center_pose, inner_reset_pose, outer_reset_pose = self._get_reset_poses(dist=cur_dist)
# If there is no obstacle interfering reset position, then
# put the car back to closest lane from the off-track position.
inner_distance = pose_distance(inner_reset_pose, car_pose)
outer_distance = pose_distance(outer_reset_pose, car_pose)
new_pose = inner_reset_pose if inner_distance < outer_distance else outer_reset_pose
# check for whether reset pose is valid or not
new_pose = self._check_for_invalid_reset_pose(pose=new_pose, dist=cur_dist)
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = new_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
return car_model_state
def _get_car_start_model_state(self):
'''Get car start model state. For training, if start position has an object,
reset to the opposite lane. We assume that during training, there are no objects
at both lane in the same progress. For evaluation, always start at progress 0.
Returns:
ModelState: start state
'''
# start_dist should be hypothetical start line (start_ndist) plus
# start position offset (start_line_ndist_offset).
start_dist = (self._data_dict_['start_ndist'] + self._start_line_ndist_offset) * self._track_data_.get_track_length()
if self._is_training_:
_, closest_object_pose = self._get_closest_obj(start_dist)
# Compute the start pose based on start distance
start_pose = self._track_data_.center_line.interpolate_pose(
start_dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
# If closest_object_pose is not None, for example bot car is around agent
# start position. The below logic checks for whether inner or outer lane
# is available for placement. Then, it updates start_pose accordingly.
if closest_object_pose is not None:
object_point = Point([closest_object_pose.position.x, closest_object_pose.position.y])
object_nearest_pnts_dict = self._track_data_.get_nearest_points(object_point)
object_nearest_dist_dict = self._track_data_.get_nearest_dist(object_nearest_pnts_dict,
object_point)
object_is_inner = object_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
object_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]
if object_is_inner:
start_pose = self._track_data_.outer_lane.interpolate_pose(
self._track_data_.outer_lane.project(Point(start_pose.position.x,
start_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
else:
start_pose = self._track_data_.inner_lane.interpolate_pose(
self._track_data_.inner_lane.project(Point(start_pose.position.x,
start_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
else:
start_pose = self._start_lane_.interpolate_pose(
start_dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
# check for whether reset pose is valid or not
start_pose = self._check_for_invalid_reset_pose(pose=start_pose, dist=start_dist)
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = start_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
return car_model_state
def _check_for_invalid_reset_pose(self, pose, dist):
# if current reset position/orientation is inf/-inf or nan, reset to the starting position centerline
pose_list = [pose.position.x, pose.position.y, pose.position.z,
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
if math.inf in pose_list or -math.inf in pose_list or math.nan in pose_list:
LOG.info("invalid reset pose %s for distance %s", pose_list, dist)
pose, _, _ = self._get_reset_poses(dist=0.0)
# if is training job, update to start_ndist to 0.0
if self._is_training_:
self._data_dict_['start_ndist'] = 0.0
return pose
def send_action(self, action):
'''Publish action topic to gazebo to render
Args:
action (int or list): model metadata action_space index for discreet action spaces
or [steering, speed] float values for continuous action spaces
Raises:
GenericRolloutException: Agent phase is not defined
'''
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
json_action = self._model_metadata_.get_action_dict(action)
steering_angle = float(json_action[ModelMetadataKeys.STEERING_ANGLE.value]) * math.pi / 180.0
action_speed = self._update_speed(action)
send_action(self._velocity_pub_dict_, self._steering_pub_dict_,
steering_angle, action_speed)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] in ZERO_SPEED_AGENT_PHASES:
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
else:
raise GenericRolloutException('Agent phase {} is not defined'.\
format(self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value]))
def _get_agent_pos(self, car_pose, car_link_points, relative_pos):
'''Returns a dictionary with the keys defined in AgentPos which contains
the position of the agent on the track, the location of the desired
links, and the orientation of the agent.
car_pose - Gazebo Pose of the agent
car_link_points (Point[]) - List of car's links' Points.
relative_pos - List containing the x-y relative position of the front of
the agent
'''
try:
# Compute the model's orientation
model_orientation = np.array([car_pose.orientation.x,
car_pose.orientation.y,
car_pose.orientation.z,
car_pose.orientation.w])
# Compute the model's location relative to the front of the agent
model_location = np.array([car_pose.position.x,
car_pose.position.y,
car_pose.position.z]) + \
apply_orientation(model_orientation, np.array(relative_pos))
model_point = Point(model_location[0], model_location[1])
return {AgentPos.ORIENTATION.value: model_orientation,
AgentPos.POINT.value: model_point,
AgentPos.LINK_POINTS.value: car_link_points}
except Exception as ex:
raise GenericRolloutException("Unable to get position: {}".format(ex))
def update_agent(self, action):
'''Update agent reward and metrics ater the action is taken
Args:
action (int): model metadata action_space index
Returns:
dict: dictionary of agent info with agent name as the key and info as the value
Raises:
GenericRolloutException: Cannot find position
'''
# get car state
# if off-track, using blocking call to get immediate car reset pause position
# from last step judge_action call.
car_model_state = GetModelStateTracker.get_instance().get_model_state(self._agent_name_, '')
self._track_data_.update_object_pose(self._agent_name_, car_model_state.pose)
link_states = [GetLinkStateTracker.get_instance().get_link_state(link_name, reference_frame).link_state
for link_name, reference_frame in zip(self._agent_link_name_list_, self.reference_frames)]
link_points = [self.make_link_points(link_state) for link_state in link_states]
current_car_pose = car_model_state.pose
try:
# Get the position of the agent
pos_dict = self._get_agent_pos(current_car_pose,
link_points,
const.RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = pos_dict[AgentPos.POINT.value]
self._data_dict_['steps'] += 1
except Exception as ex:
raise GenericRolloutException('Cannot find position: {}'.format(ex))
# Set the reward and training metrics
set_reward_and_metrics(self._reward_params_, self._step_metrics_,
self._agent_name_, pos_dict, self._track_data_,
self._data_dict_, action, self._model_metadata_.get_action_dict(action),
current_car_pose)
prev_pnt_dist = min(model_point.distance(self._prev_waypoints_['prev_point']),
model_point.distance(self._prev_waypoints_['prev_point_2']))
self._data_dict_['current_progress'] = self._reward_params_[const.RewardParam.PROG.value[0]]
self._data_dict_['max_progress'] = max(self._data_dict_['max_progress'],
self._data_dict_['current_progress'])
self._prev_waypoints_['prev_point_2'] = self._prev_waypoints_['prev_point']
self._prev_waypoints_['prev_point'] = model_point
self._ctrl_status[AgentCtrlStatus.POS_DICT.value] = pos_dict
self._ctrl_status[AgentCtrlStatus.STEPS.value] = self._data_dict_['steps']
self._ctrl_status[AgentCtrlStatus.CURRENT_PROGRESS.value] = self._data_dict_['current_progress']
self._ctrl_status[AgentCtrlStatus.PREV_PROGRESS.value] = self._data_dict_['prev_progress']
self._ctrl_status[AgentCtrlStatus.PREV_PNT_DIST.value] = prev_pnt_dist
self._ctrl_status[AgentCtrlStatus.START_NDIST.value] = self._data_dict_['start_ndist']
# Sending race control status for virtual event
if self._is_virtual_event:
if self._start_sim_time is None:
self._start_sim_time = self._current_sim_time + PAUSE_TIME_BEFORE_START
self._ctrl_status[RaceCtrlStatus.RACE_START_TIME.value] = self._start_sim_time
self._ctrl_status[RaceCtrlStatus.RACE_CURR_TIME.value] = self._current_sim_time
return {self._agent_name_: self._reset_rules_manager.update(self._ctrl_status)}
def _update_car_status(self, content):
"""Update car status based on the car control ros message.
Args:
content (std_msgs.msg.String): The ros message as a json in String format.
"""
LOG.info("[car control] Recevied status_ctrl data %s.", content.data)
msg_dict = json.loads(content.data)
new_status = msg_dict[WebRTCCarControl.STATUS_MODE.value]
with self._lock:
try:
self._race_car_ctrl_status = CarControlStatus(new_status).value
except ValueError as ex:
# If car_status is unknown, then defaulting to RESUME mode
# which will allow car to run without manual interference
# We don't expect unknown speed mode in normal situation,
# but as we receive this message directly from customer's browser,
# there is possibility that customer may tamper the message sent
# to SimApp. In such case, faulting and restarting SimApp will
# cause large delay to Virtual Event. Thus, log unknown mode
# for the debugging purpose and continue the event.
self._race_car_ctrl_status = CarControlStatus.RESUME.value
LOG.error("Unknow car control status received %s", ex)
def _get_speed_mode_value(self, content):
"""Update car speed model and speed value based on the car control ros message.
Args:
content (std_msgs.msg.String): The ros message as a json in String format.
"""
LOG.info("[car control] Recevied speed control data %s.", content.data)
msg_dict = json.loads(content.data)
with self._lock:
self._speed_mode = msg_dict[WebRTCCarControl.SPEED_MODE.value]
self._speed_value = float(msg_dict[WebRTCCarControl.SPEED_VALUE.value])
def _update_speed(self, action, should_clamp_max=False):
"""Update the speed based on the speed mode and the speed value.
Args:
action (str): The action to look up from the json action dict.
should_clamp_max (bool, optional): If the speed need clamping.
Defaults to False.
Returns:
action_speed [float]: The next action speed.
"""
with self._lock:
# if speed mode is not one of the enum values in CarControlMode.
# we take the speed specifed by the trained model and send action update to car.
json_action = self._model_metadata_.get_action_dict(action)
new_speed = float(json_action[ModelMetadataKeys.SPEED.value])
# check for which speed model we are in.
if self._speed_mode == CarControlMode.ABSOLUTE.value:
new_speed = float(self._speed_value)
elif self._speed_mode == CarControlMode.MULTIPLIER.value:
new_speed *= float(self._speed_value)
elif self._speed_mode == CarControlMode.PERCENT_MAX.value:
new_speed = float(MAX_SPEED * self._speed_value)
elif self._speed_mode == CarControlMode.OFFSET.value:
new_speed += float(self._speed_value)
elif self._speed_mode != CarControlMode.MODEL_SPEED.value:
# If speed_mode is unknown, then defaulting to MODEL_SPEED
# mode which is using speed from the model directly.
# We don't expect unknown speed mode in normal situation,
# but as we receive this message directly from customer's browser,
# there is possibility that customer may tamper the message sent to
# SimApp. In such case, faulting and restarting SimApp will
# cause large delay to Virtual Event. Thus, log unknown mode for the
# debugging purpose and continue the event by defaulting to MODEL_SPEED.
LOG.error("[car control] Unknown speed mode received %s", self._speed_mode)
# Clamp the minimum speed so that it's greater than zero
if new_speed < MIN_SPEED:
new_speed = MIN_SPEED
# TODO: maybe clamping the value will provide a better customer experience.
# need to see custmer feedback through user study.
# default to false now.
if should_clamp_max:
# clamp on the new_speed to make sure it's within boundary
if new_speed > MAX_SPEED:
new_speed = MAX_SPEED
return float(new_speed / const.WHEEL_RADIUS) * self._speed_scale_factor_
def judge_action(self, agents_info_map):
'''Judge the action that agent just take
Args:
agents_info_map: Dictionary contains all agents info with agent name as the key
and info as the value
Returns:
tuple (float, bool, dict): tuple of reward, done flag, and step metrics
Raises:
RewardFunctionError: Reward function exception
GenericRolloutException: reward is nan or inf
'''
# check agent status to update reward and done flag
reset_rules_status = self._reset_rules_manager.get_dones()
self._reward_params_[const.RewardParam.CRASHED.value[0]] = \
reset_rules_status[EpisodeStatus.CRASHED.value]
self._reward_params_[const.RewardParam.OFFTRACK.value[0]] = \
reset_rules_status[EpisodeStatus.OFF_TRACK.value]
episode_status, pause, done = self._check_for_episode_termination(reset_rules_status, agents_info_map)
if not pause and not done:
# If episode termination check returns status as not paused and not done, and
# if reset_rules_status's CRASHED is true, then the crashed object must have smaller normalize progress
# compare to rollout agent.
# - if reset_rules_status's CRASHED is false, then reward params' CRASHED should be already false.
# In such case, from rollout_agent's perspective, it should consider it as there is no crash.
# Therefore, setting reward params' CRASHED as false if not paused and not done.
self._reward_params_[const.RewardParam.CRASHED.value[0]] = False
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
reward = self._judge_action_at_run_phase(episode_status=episode_status, pause=pause)
# for passing control from the virtual event console
self._check_for_ctrl_status_pause(is_car_in_pause_state=pause)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PAUSE.value:
reward, episode_status = self._judge_action_at_pause_phase(episode_status=episode_status, done=done)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.MANUAL_PAUSE.value:
# for passing control from the virtual event console
reward, episode_status = self._judge_action_at_manual_pause_phase(done=done)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PREPARE.value:
reward, episode_status = self._judge_action_at_prepare_phase()
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PARK.value:
self._park_car_model()
episode_status, pause, done = EpisodeStatus.PARK.value, False, True
reward = const.ZERO_REWARD
else:
raise GenericRolloutException('Agent phase {} is not defined'.\
format(self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value]))
# update and upload metrics
self._step_metrics_[StepMetrics.REWARD.value] = reward
self._step_metrics_[StepMetrics.DONE.value] = done
self._step_metrics_[StepMetrics.TIME.value] = self._current_sim_time
self._step_metrics_[StepMetrics.EPISODE_STATUS.value] = episode_status
self._step_metrics_[StepMetrics.PAUSE_DURATION.value] = self._pause_duration
self._data_dict_['prev_progress'] = 0.0 if self._step_metrics_[StepMetrics.PROG.value] == 100 \
else self._step_metrics_[StepMetrics.PROG.value]
if self._data_dict_['current_progress'] == 100:
self._data_dict_['max_progress'] = 0.0
self._data_dict_['current_progress'] = 0.0
self._metrics.upload_step_metrics(self._step_metrics_)
if self._is_continuous and self._reward_params_[const.RewardParam.PROG.value[0]] == 100:
self._metrics.append_episode_metrics()
self._metrics.reset()
self._reset_rules_manager.reset()
if episode_status == EpisodeStatus.TIME_UP.value:
self._metrics.append_episode_metrics(is_complete=False)
self._metrics.update_mp4_video_metrics(self._step_metrics_)
return reward, done, self._step_metrics_
def _check_for_ctrl_status_pause(self, is_car_in_pause_state):
"""Check if we need to change to pause status because a maunal pause ctrl is sent.
Args:
is_car_in_pause_state (bool): Whether or not the car is already in pause state.
"""
if is_car_in_pause_state:
# the off-track or crash pause behavior takes precedent of maunal pause
return
# Check for race car control status
with self._lock:
pause = self._race_car_ctrl_status == CarControlStatus.PAUSE.value
if pause:
LOG.info("[car control] Pausing because virtual event status: %s", self._race_car_ctrl_status)
current_car_pose = self._track_data_.get_object_pose(self._agent_name_)
self._pause_car_model_pose = current_car_pose
self._pause_car_model(car_model_pose=self._pause_car_model_pose,
should_reset_camera=False)
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.MANUAL_PAUSE.value
def _judge_action_at_manual_pause_phase(self, done):
"""If the current car control status received from customer has changed,
and it's set to not pause. We allow the car to run.
Args:
done (bool): If the episode is done.
Returns:
reward (float): The reward at maunal pause phase.
episode_status (str): The current episode status.
"""
self._pause_car_model(car_model_pose=self._pause_car_model_pose)
with self._lock:
if self._race_car_ctrl_status == CarControlStatus.RESUME.value:
LOG.info("[car control] Unpausing because virtual event status: %s", self._race_car_ctrl_status)
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
reward = const.ZERO_REWARD
if not done:
# When car is being paused on the track, there are two possible conditions of done
# 1. done: True, this means that the car that was being reset has "slipped" to more
# than 100% progress, we want to keep the EpisodeStatus that has been determined
# _check_for_episode_termination
# 2. done: False, _check_for_episode_termination will set the EpisodeStatus to
# IN_PROGRESS we want to overwrite it to EpisodeStatus.PAUSE so that the sim_trace
# does not confuse customers.
episode_status = EpisodeStatus.PAUSE.value
return reward, episode_status
def _judge_action_at_run_phase(self, episode_status, pause):
self._pause_duration = 0.0
current_car_pose = self._track_data_.get_object_pose(self._agent_name_)
try:
reward = float(self._reward_(copy.deepcopy(self._reward_params_)))
except Exception as ex:
raise RewardFunctionError('Reward function exception {}'.format(ex))
if math.isnan(reward) or math.isinf(reward):
raise RewardFunctionError('{} returned as reward'.format(reward))
# transition to AgentPhase.PARK.value when episode complete and done condition is all
if episode_status == EpisodeStatus.EPISODE_COMPLETE.value and \
self._done_condition == all:
self._park_position = self._track_data_.pop_park_position()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PARK.value
self._park_car_model()
# transition to AgentPhase.PAUSE.value
if pause:
should_reset_camera = False
pause_car_model_pose = current_car_pose
penalty = self._penalties[episode_status]
# add pause time based on different paused status
if episode_status == EpisodeStatus.CRASHED.value:
self._pause_duration += penalty
# add blink effect and remove current agent from collision list
if penalty > 0.0:
effect = BlinkEffect(model_name=self._agent_name_,
min_alpha=const.BLINK_MIN_ALPHA,
interval=const.BLINK_INTERVAL,
duration=penalty)
effect.attach()
# If crash into an static obstacle, reset first and then pause. This will prevent
# agent and obstacle wiggling around because bit mask is not used between agent
# and static obstacle
if 'obstacle' in self._curr_crashed_object_name:
pause_car_model_pose = self._get_car_reset_model_state(
car_pose=current_car_pose).pose
should_reset_camera = True
elif episode_status in \
[EpisodeStatus.OFF_TRACK.value,
EpisodeStatus.REVERSED.value,
EpisodeStatus.IMMOBILIZED.value]:
self._pause_duration += penalty
# add blink effect and remove current agent from collision list
if penalty > 0.0:
effect = BlinkEffect(model_name=self._agent_name_,
min_alpha=const.BLINK_MIN_ALPHA,
interval=const.BLINK_INTERVAL,
duration=penalty)
effect.attach()
# when agent off track current car pose might be closer
# to other part of the track. Therefore, instead of using
# current car pose to calculate reset position, the previous
# car pose is used.
pause_car_model_pose = self._get_car_reset_model_state(
car_pose=self._data_dict_['prev_car_pose']).pose
should_reset_camera = True
self._pause_car_model_pose = pause_car_model_pose
# pause car model through blocking call to make sure agent pose
# is updated in sync. Non blocking can cause problem during off track reset
# especially when there is a small gap betwee two parts of the track.
self._pause_car_model(car_model_pose=self._pause_car_model_pose,
should_reset_camera=should_reset_camera,
blocking=True)
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PAUSE.value
self._data_dict_['prev_car_pose'] = current_car_pose
return reward
def _judge_action_at_pause_phase(self, episode_status, done):
reward = const.ZERO_REWARD
self._pause_car_model(car_model_pose=self._pause_car_model_pose)
# transition to AgentPhase.RUN.value
if self._pause_duration <= 0.0:
# if reset during pause, do not reset again after penalty seconds is over
self._reset_rules_manager.reset()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
if not done:
# When car is being paused/reset on the track, there are two possible conditions of done
# 1. done: True, this means that the car that was being reset has "slipped" to more
# than 100% progress, we want to keep the EpisodeStatus that has been determined
# _check_for_episode_termination
# 2. done: False, _check_for_episode_termination will set the EpisodeStatus to
# IN_PROGRESS we want to overwrite it to EpisodeStatus.PAUSE so that the sim_trace
# does not confuse customers.
episode_status = EpisodeStatus.PAUSE.value
return reward, episode_status
def _judge_action_at_prepare_phase(self):
reward = const.ZERO_REWARD
self._pause_car_model(car_model_pose=self._prepare_car_model_pose)
# transition to AgentPhase.RUN.value
if self._pause_duration <= 0.0:
# if reset during pause, do not reset again after penalty seconds is over
self._reset_rules_manager.reset()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
episode_status = EpisodeStatus.PREPARE.value
return reward, episode_status
def _check_for_episode_termination(self, reset_rules_status, agents_info_map):
'''Check for whether a episode should be terminated
Args:
reset_rules_status: dictionary of reset rules status with key as reset rule names and value as
reset rule bool status
agents_info_map: dictionary of agents info map with key as agent name and value as agent info
Returns:
tuple (string, bool, bool): episode status, pause flag, and done flag
'''
episode_status = EpisodeStatus.get_episode_status(reset_rules_status)
pause = False
done = False
if reset_rules_status.get(EpisodeStatus.TIME_UP.value, False):
LOG.info("Issuing done because time is greater than race duration.")
done = True
# Note: check EPISODE_COMPLETE as the first item because agent might crash
# at the finish line.
elif EpisodeStatus.EPISODE_COMPLETE.value in reset_rules_status and \
reset_rules_status[EpisodeStatus.EPISODE_COMPLETE.value]:
done = True
episode_status = EpisodeStatus.EPISODE_COMPLETE.value
elif EpisodeStatus.CRASHED.value in reset_rules_status and \
reset_rules_status[EpisodeStatus.CRASHED.value]:
# only check for crash when at RUN phase
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
# check crash with all other objects besides static obstacle
self._curr_crashed_object_name = agents_info_map[self._agent_name_][AgentInfo.CRASHED_OBJECT_NAME.value]
if 'obstacle' not in self._curr_crashed_object_name:
current_progress = agents_info_map[self._agent_name_][AgentInfo.CURRENT_PROGRESS.value]
crashed_obj_info = agents_info_map[self._curr_crashed_object_name]
crashed_obj_progress = crashed_obj_info[AgentInfo.CURRENT_PROGRESS.value]
crashed_obj_start_ndist = crashed_obj_info[AgentInfo.START_NDIST.value]
crashed_object_progress = get_normalized_progress(crashed_obj_progress,
start_ndist=crashed_obj_start_ndist)
current_progress = get_normalized_progress(current_progress,
start_ndist=self._data_dict_['start_ndist'])
if current_progress < crashed_object_progress:
done, pause = self._check_for_phase_change()
else:
episode_status = EpisodeStatus.IN_PROGRESS.value
else:
done, pause = self._check_for_phase_change()
else:
pause = True
elif any(reset_rules_status.values()):
done, pause = self._check_for_phase_change()
return episode_status, pause, done
def _check_for_phase_change(self):
'''check whether to pause a agent
Returns:
tuple(bool, bool): done flag and pause flag
'''
done, pause = True, False
if self._reset_count < self._number_of_resets:
self._reset_count += 1
self._reset_rules_manager.reset()
done, pause = False, True
return done, pause
def finish_episode(self):
'''finish episode by appending episode metrics, upload metrics, and alternate direction
if needed
'''
if not self._is_continuous:
self._metrics.append_episode_metrics()
self._metrics.upload_episode_metrics()
if self._start_pos_behavior_['change_start'] and self._is_training_:
self._data_dict_['start_ndist'] = (self._data_dict_['start_ndist']
+ const.ROUND_ROBIN_ADVANCE_DIST) % 1.0
# For multi-agent case, alternating direction will NOT work!
# Reverse direction will be set multiple times
# However, we are not supporting multi-agent training for now
if self._start_pos_behavior_['alternate_dir'] and self._is_training_:
self._track_data_.reverse_dir = not self._track_data_.reverse_dir
def _clear_data(self):
'''clear data at the beginning of a new episode
'''
self._curr_crashed_object_name = ''
self._reset_count = 0
self._pause_duration = 0.0
self._reset_rules_manager.reset()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PREPARE.value
if self._is_virtual_event:
self._pause_duration = PAUSE_TIME_BEFORE_START
for key in self._prev_waypoints_:
self._prev_waypoints_[key] = Point(0, 0)
for key in self._data_dict_:
if key != 'start_ndist':
self._data_dict_[key] = 0.0
def update(self, data):
self._is_training_ = data == RunPhase.TRAIN
|
from xviz.builder.base_builder import XVIZBaseBuilder, CATEGORY
from xviz.v2.core_pb2 import TimeSeriesState
class XVIZTimeSeriesBuilder(XVIZBaseBuilder):
def __init__(self, metadata, logger=None):
super().__init__(CATEGORY.TIME_SERIES, metadata, logger)
# Stores time_series data by timestamp then id
# They will then be group when constructing final object
self._data = {}
self._reset()
def id(self, identifier):
self._validate_prop_set_once('_id')
self._id = identifier
return self
def value(self, value):
self._validate_prop_set_once('_value')
if isinstance(value, list):
self._logger.error("Input `value` must be single value")
self._value = value
return self
def timestamp(self, timestamp):
self._validate_prop_set_once('_timestamp')
if isinstance(timestamp, list):
self._logger.error("Input `value` must be single value")
self._timestamp = timestamp
return self
def get_data(self):
self._flush()
if not self._data:
return None
time_series_data = []
for timestamp, ids in self._data.items():
for id_, fields in ids.items():
for tsdata in fields.values():
entry = TimeSeriesState(
timestamp=timestamp,
streams=tsdata['streams'],
values=tsdata['values'],
object_id=id_
)
time_series_data.append(entry)
return time_series_data
def _add_timestamp_entry(self):
# this._data structure
# timestamp: {
# id: {
# fieldName: {
# streams: []
# values: []
# }
# }
# }
if not self._data_pending():
return
if isinstance(self._value, str):
field_name = "strings"
elif isinstance(self._value, bool):
field_name = "bools"
elif isinstance(self._value, int):
field_name = "int32s"
elif isinstance(self._value, float):
field_name = "doubles"
else:
self._logger.error("The type of input value is not recognized!")
ts_entry = self._data.get(self._timestamp)
if ts_entry:
id_entry = ts_entry.get(self._id)
if id_entry:
field_entry = id_entry.get(field_name)
if field_entry:
field_entry['streams'].append(self._stream_id)
field_entry['values'][field_name].append(self._value)
else:
id_entry[field_name] = self._get_field_entry(field_name)
else:
ts_entry[self._id] = self._get_id_entry(field_name)
else:
ts_entry = {self._id: self._get_id_entry(field_name)}
self._data[self._timestamp] = ts_entry
def _get_id_entry(self, field_name):
return {field_name: self._get_field_entry(field_name)}
def _get_field_entry(self, field_name):
return dict(streams=[self._stream_id], values={field_name: [self._value]})
def _data_pending(self):
return self._value or self._timestamp or self._id
def _validate(self):
if self._data_pending():
super()._validate()
self._validate_has_prop("_value")
self._validate_has_prop("_timestamp")
def _flush(self):
self._validate()
self._add_timestamp_entry()
self._reset()
def _reset(self):
self._id = None
self._value = None
self._timestamp = None
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import image_embedding_pb2 as image__embedding__pb2
class ImageEmbeddingStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Embedding = channel.unary_unary(
'/image_embedding.ImageEmbedding/Embedding',
request_serializer=image__embedding__pb2.EmbeddingRequest.SerializeToString,
response_deserializer=image__embedding__pb2.EmbeddingResponse.FromString,
)
self.Dimension = channel.unary_unary(
'/image_embedding.ImageEmbedding/Dimension',
request_serializer=image__embedding__pb2.Empty.SerializeToString,
response_deserializer=image__embedding__pb2.DimensionResponse.FromString,
)
self.Info = channel.unary_unary(
'/image_embedding.ImageEmbedding/Info',
request_serializer=image__embedding__pb2.Empty.SerializeToString,
response_deserializer=image__embedding__pb2.SimpleReponse.FromString,
)
class ImageEmbeddingServicer(object):
# missing associated documentation comment in .proto file
pass
def Embedding(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Dimension(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Info(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImageEmbeddingServicer_to_server(servicer, server):
rpc_method_handlers = {
'Embedding': grpc.unary_unary_rpc_method_handler(
servicer.Embedding,
request_deserializer=image__embedding__pb2.EmbeddingRequest.FromString,
response_serializer=image__embedding__pb2.EmbeddingResponse.SerializeToString,
),
'Dimension': grpc.unary_unary_rpc_method_handler(
servicer.Dimension,
request_deserializer=image__embedding__pb2.Empty.FromString,
response_serializer=image__embedding__pb2.DimensionResponse.SerializeToString,
),
'Info': grpc.unary_unary_rpc_method_handler(
servicer.Info,
request_deserializer=image__embedding__pb2.Empty.FromString,
response_serializer=image__embedding__pb2.SimpleReponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'image_embedding.ImageEmbedding', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
from .datatypes import Image
from .eval_info import EvalInfo
from .renderer import Renderer
from .glsl_renderer import GLSLRenderer
from .registry import RegisterNode, UnregisterNode, NODE_REGISTRY
from .project_file import ProjectFileIO
|
#! /usr/bin/env python3
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ["SMTPException", "SMTPNotSupportedError", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
bCRLF = b"\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
_MAXCHALLENGE = 5 # Maximum number of AUTH challenges sent
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, use it as is and hope for the best.
if addrstring.strip().startswith('<'):
return addrstring
return "<%s>" % addrstring
return "<%s>" % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
# Legacy method kept for backward compatibility.
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
def _fix_eols(data):
return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
sock = None
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = False
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
self._auth_challenge_count = 0
if host:
(code, msg) = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print(datetime.datetime.now().time(), *args, file=sys.stderr)
else:
print(*args, file=sys.stderr)
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if timeout is not None and not timeout:
raise ValueError('Non-blocking socket (timeout=0) is not supported')
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
self.source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
sys.audit("smtplib.connect", self, host, port)
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return (code, msg)
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if self.sock:
if isinstance(s, str):
# send is used by the 'data' command, where command_encoding
# should not be used, but 'data' needs to convert the string to
# binary itself anyway, so that's not a problem.
s = s.encode(self.command_encoding)
sys.audit("smtplib.send", self, s)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = True
#parse the ehlo response -ddm
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode("latin-1").split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd("rset")
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=()):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower()=='smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=()):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b"." + bCRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
an ASCII string that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
# RFC 4954 allows auth methods to provide an initial response. Not all
# methods support it. By definition, if they return something other
# than None when challenge is None, then they do. See issue #15014.
mechanism = mechanism.upper()
initial_response = (authobject() if initial_response_ok else None)
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
(code, resp) = self.docmd("AUTH", mechanism + " " + response)
self._auth_challenge_count = 1
else:
(code, resp) = self.docmd("AUTH", mechanism)
self._auth_challenge_count = 0
# If server responds with a challenge, send the response.
while code == 334:
self._auth_challenge_count += 1
challenge = base64.decodebytes(resp)
response = encode_base64(
authobject(challenge).encode('ascii'), eol='')
(code, resp) = self.docmd(response)
# If server keeps sending challenges, something is wrong.
if self._auth_challenge_count > _MAXCHALLENGE:
raise SMTPException(
"Server AUTH mechanism infinite loop. Last response: "
+ repr((code, resp))
)
if code in (235, 503):
return (code, resp)
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
# CRAM-MD5 does not support initial-response.
if challenge is None:
return None
return self.user + " " + hmac.HMAC(
self.password.encode('ascii'), challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return "\0%s\0%s" % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None or self._auth_challenge_count < 2:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPNotSupportedError(
"SMTP AUTH extension not supported by server.")
# Authentication methods the server claims to support
advertised_authlist = self.esmtp_features["auth"].split()
# Authentication methods we can handle in our preferred order:
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
# We try the supported authentications in our preferred order, if
# the server supports them.
authlist = [auth for auth in preferred_auths
if auth in advertised_authlist]
if not authlist:
raise SMTPException("No suitable authentication method found.")
# Some servers advertise authentication methods they don't really
# support, so if authentication fails, we continue until we've tried
# all methods.
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
(code, resp) = self.auth(
authmethod, getattr(self, method_name),
initial_response_ok=initial_response_ok)
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
if code in (235, 503):
return (code, resp)
except SMTPAuthenticationError as e:
last_exception = e
# We could not login successfully. Return result of last attempt.
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPNotSupportedError(
"STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock,
server_hostname=self._host)
self.file = None
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=(),
rcpt_options=()):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self._rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None,
mail_options=(), rcpt_options=()):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
# 'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
# Section 3.6.6). In such a case, we use the 'Resent-*' fields. However,
# if there is more than one 'Resent-' block there's no way to
# unambiguously determine which one is the most recent in all cases,
# so rather than guess we raise a ValueError in that case.
#
# TODO implement heuristics to guess the correct Resent-* block with an
# option allowing the user to enable the heuristics. (It should be
# possible to guess correctly almost all of the time.)
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block")
if from_addr is None:
# Prefer the sender field per RFC 2822:3.6.2.
from_addr = (msg[header_prefix + 'Sender']
if (header_prefix + 'Sender') in msg
else msg[header_prefix + 'From'])
from_addr = email.utils.getaddresses([from_addr])[0][1]
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'],
msg[header_prefix + 'Bcc'],
msg[header_prefix + 'Cc'])
if f is not None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
# Make a local copy so we can delete the bcc headers.
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
''.join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
"One or more source or delivery addresses require"
" internationalized email support, but the server"
" does not advertise the required SMTPUTF8 capability")
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(
bytesmsg, policy=msg.policy.clone(utf8=True))
mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME')
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = super()._get_socket(host, port, timeout)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance."""
super().__init__(host, port, local_hostname=local_hostname,
source_address=source_address, timeout=timeout)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return super().connect(host, port, source_address=source_address)
if self.timeout is not None and not self.timeout:
raise ValueError('Non-blocking socket (timeout=0) is not supported')
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
self.sock.settimeout(self.timeout)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ": ")
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print("Message length is %d" % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyperclip
import seaborn as sns
class QPCRAnalysis:
def __init__(self,
data,
genes,
samples,
ntc_cols=True,
columns_per_sample=2):
'''
data: can be a filename to a tsv as exported by the lightcycler or a dataframe
'''
self.samples = samples
self.genes = genes
if isinstance(data, pd.DataFrame):
df = data
else:
with open(data, 'r') as f:
self.experiment_name = re.match('Experiment: ([^ ]*)',
f.readline()).groups()[0]
df = pd.read_csv(data, sep='\t', skiprows=1)
df.drop('Name', inplace=True, axis=1)
df['ypos'] = df['Pos'].apply(lambda pos: ord(pos[0]) - ord('A'))
df['ypos'] -= df['ypos'].min()
ntc_row = df['ypos'].max()
df['xpos'] = df['Pos'].apply(lambda pos: int(pos[1:]))
# only consider non-NTCs here
df['xpos'] -= df.loc[df['ypos'] < ntc_row, 'xpos'].min()
if ntc_cols:
if ntc_cols is True:
ntc_cols = list(range(0, len(genes) * 2, 2))
self.ntcs = pd.Series(
df.loc[df['xpos'].isin(ntc_cols) & (df['ypos'] == ntc_row),
'Cp'].tolist(),
index=genes)
else:
self.ntcs = None
df = df[(df.ypos < ntc_row)
& (df.xpos < len(samples) * columns_per_sample)]
df['gene'] = self._assign_genes(df, genes, columns_per_sample)
df['sample'] = pd.Categorical(
df.xpos.apply(lambda xpos: samples[xpos // columns_per_sample]),
categories=samples,
ordered=True)
self.df = df[['sample', 'gene', 'Cp', 'xpos', 'ypos']]
def dropna(self):
na_index = self.df.index[self.df.Cp.isna()]
print(f'Deleting {len(na_index)} Nans')
self.df.drop(na_index, inplace=True)
def _assign_genes(self, df, genes, columns_per_sample):
def _gene(row):
index = ((row['ypos'] // 3) * columns_per_sample +
(row['xpos'] % columns_per_sample))
try:
return genes[index]
except IndexError:
print(index)
raise
return df.apply(_gene, axis=1)
def outliers(self):
df = self.df.copy()
triplet_deviations = df.groupby(['sample', 'gene'])['Cp'].std()
df['index'] = df.index
outlier_triplets = df.set_index(
['sample',
'gene']).loc[triplet_deviations.index[triplet_deviations > 0.5]]
outlier_ids = []
for name, group in outlier_triplets.groupby(level=['sample', 'gene']):
diffs = (group.Cp - group.Cp.mean())
if len(diffs) == 3:
sorted_diffs = diffs.abs().reset_index(drop=True).sort_values(ascending=False)
if sorted_diffs.iloc[0] > 1.5 * sorted_diffs.iloc[1]:
index = group.iloc[sorted_diffs.index[0]]['index']
outlier_ids.append(int(index))
# print(diffs.iloc[lid], df[df['index'] == index])
# elif len(diffs) == 2: # does this make sense? if we have two, then leave the two!
# # delete the larger one
# index = group.iloc[group.Cp.reset_index(
# drop=True).argmax()]['index']
return outlier_ids
def plot_outliers(self): # TODO
triplet_deviations = self.df.groupby(['sample', 'gene']).std()['Cp']
outlier_triplets = self.df.set_index(['sample', 'gene']).loc[
triplet_deviations.index[triplet_deviations > 0.5]].reset_index()
outlier_triplets['triplet_name'] = outlier_triplets['sample'].str.cat(
outlier_triplets.gene, sep='_')
sns.swarmplot(data=outlier_triplets.reset_index(),
x='triplet_name',
y='Cp')
plt.xticks(rotation=90)
def plot_cps(self, exclude_genes=[]):
plt.subplots(figsize=(10, 7))
# g = sns.barplot(y='Cp', hue='sample', x='gene', data=self.df.sort_values('sample'), dodge=True)
plot_df = self.df.sort_values('sample')
plot_df = plot_df[~plot_df.gene.isin(exclude_genes)]
g = sns.barplot(y='Cp',
hue='sample',
x='gene',
data=plot_df,
dodge=True)
plt.xticks(rotation=30)
plt.title('Raw Cp values')
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.15])
def normalized_df(self, normalizers, exclude_genes=[], exclude_samples=[], include_samples=None, include_genes=None, norm_to_one=None):
'''
:include_samples: takes precedence over exclude_samples. Can be used to determ̀ine the plotting order
:include_genes: takes precedence over exclude_genes. Can be used to determ̀ine the plotting order
'''
assert len(normalizers) > 0, 'At least one normalizer is required'
means = self.df.groupby(['sample', 'gene']).mean().unstack()['Cp']
stds = self.df.groupby(['sample', 'gene']).std().unstack()['Cp']
summed_normalizers = None
for normalizer in normalizers:
if summed_normalizers is None:
summed_normalizers = means[normalizer]
else:
summed_normalizers = summed_normalizers + means[normalizer]
summed_normalizers = summed_normalizers / len(normalizers)
delta = means.subtract(
summed_normalizers, axis=0
) # normalization like this results in geometric mean normalization on mRNA expression level
non_normalization_genes = [
gene for gene in self.genes if gene not in normalizers
]
if include_genes:
plot_genes = [
gene for gene in include_genes
if gene in non_normalization_genes
]
else:
plot_genes = [
gene for gene in non_normalization_genes
if gene not in exclude_genes
]
delta_low = delta - stds
delta_high = delta + stds
q = np.power(2, -delta)
q_low = np.power(2, -delta_low)
q_high = np.power(2, -delta_high)
q_std = pd.concat(dict(q=q, q_low=q_low, q_high=q_high),
axis=1).std(axis=1, level=1)
# fig, ax = plt.subplots(figsize=(10, 5))
plot_df = q[plot_genes].stack().reset_index().rename(
columns={0: 'expression'})
plot_df['error'] = q_std[plot_genes].stack().reset_index()[0]
if include_samples is not None:
plot_df = plot_df[plot_df['sample'].isin(include_samples)]
plot_df['sample'] = pd.Categorical(plot_df['sample'],
categories=include_samples,
ordered=True)
plot_df.sort_values('sample', inplace=True)
else:
plot_df = plot_df[~plot_df['sample'].isin(exclude_samples)]
if include_genes:
plot_df['gene'] = pd.Categorical(plot_df['gene'],
categories=plot_genes,
ordered=True)
plot_df.sort_values('gene', inplace=True, kind='mergesort') # mergesort is stable
if norm_to_one:
normed = plot_df.set_index(['sample', 'gene']).groupby('gene').apply(lambda group: pd.DataFrame({
'expression': group['expression'] / group.loc[norm_to_one, 'expression'],
'error': group['error'] / group.loc[norm_to_one, 'expression']}))
return normed.reset_index()
else:
return plot_df
def plot_normalized(self,
normalizers,
exclude_genes=[],
exclude_samples=[],
include_samples=None,
include_genes=None,
colors=None,
legend=True,
norm_to_one=None,
**kwargs
):
'''
This has been built using the Ciaudo Lab Excel qPCR analysis sheet as template
'''
from moritzsphd.plot import grouped_barplot
plot_df = self.normalized_df(normalizers, exclude_genes, exclude_samples, include_samples, include_genes, norm_to_one)
fig = grouped_barplot(data=plot_df,
y='expression',
x='gene',
hue='sample',
yerr='error',
split_yaxis='gene',
colors=colors,
**kwargs
)
#sns.barplot(data=plot_df, y='expression', x='gene', hue='sample')
#plt.errorbar(x=plot_df['gene'], y=plot_df['expression'], fmt='none', yerror=plot_df['error'], ecolor='k', elinewidth=2)
plt.subplots_adjust(top=0.85)
plt.suptitle(
f'gene expression normalized by {"geometric mean of " if len(normalizers) > 1 else ""}{" and ".join(normalizers)}'
)
sns.despine()
if legend:
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.1])
return fig
def drop_outliers(self):
self.df.drop(self.outliers(), inplace=True)
def plot_heatmap(self):
fig, ax = plt.subplots(figsize=(15, 7))
plot_df = self.df[['Cp', 'xpos', 'ypos']].pivot(index='ypos',
columns='xpos')
sns.heatmap(plot_df)
def export_excel(self, normalizer_gene, readout_gene):
df = self.df.copy()
normalizer = df.loc[df.gene == normalizer_gene].set_index(
'sample').sort_index().Cp
xx = df.loc[df.gene == readout_gene].set_index('sample').sort_index()
values = []
for i, x in enumerate(xx.iterrows()):
values.append(x[1].Cp)
if i % 3 == 2: # now gapdh
for j in range(i - 2, i + 1):
values.append(normalizer.iloc[j])
if len(values) == 0:
raise ValueError(f'gene {readout_gene} does not exist')
pyperclip.copy('\n'.join(
['' if pd.isnull(v) else str(v) for v in values]))
def normalized_prism_df(self, grouping_func, normalizer_genes=None, control_group='WT', repl_normalization=True):
'''
'''
df = self.normalized_df(normalizer_genes)
df['grouping_series'] = df.apply(grouping_func, axis=1)
# bring into PRISM form
df = df.groupby(['grouping_series', 'gene'])['expression'].apply(lambda v: pd.Series(list(v))).unstack(-2).unstack(-1)
# normalize to WT
if repl_normalization:
df = df / df.loc[control_group]
else:
wt_mean = df.loc[control_group].unstack(-1).mean(axis=1)
df = df.groupby(axis=1, level=0).apply(lambda v: v/wt_mean[v.name])
return df
def raw_prism_df(self):
df = self.df.copy()
df['repl'] = df['ypos'] % 3
return df.set_index(['sample', 'gene', 'repl'])['Cp'].unstack(-2).unstack(-1)
|
import sys
import csv
import requests
import json
from datetime import datetime, date
from io import StringIO
from sots import app, db
from flask import render_template, request, redirect, url_for, Response
from sqlalchemy import func, desc, or_, distinct, and_
#from sqlalchemy.orm import lazyload
from sots.models import FullTextIndex, FullTextCompositeIndex, BusMaster, Principal, BusFiling, Status, Subtype, Corp, \
DomLmtCmpy, ForLmtCmpy, ForLmtLiabPart, ForLmtPart, BusOther, ForStatTrust, NameChange, FilmIndx, PrincipalName, FilingDetails
from sots.forms import SearchForm, AdvancedSearchForm, FeedbackForm
from sots.config import BaseConfig as ConfigObject
from sots.helpers import corp_type_lookup, origin_lookup, category_lookup
from sots.helpers import check_empty as check_none
def corp_domesticity(bus_id):
r = Corp.query.filter(Corp.id_bus == str(bus_id)).first()
if r.cd_citizen == 'F':
domesticity = 'Foreign'
else:
domesticity = 'Domestic'
place_of_formation = r.cd_pl_of_form
type = corp_type_lookup[r.cd_bus_type]
return {'domesticity': "{} / {}".format(domesticity, place_of_formation),
'type':type,
'category':None}
def dom_domesticity(bus_id):
return {'domesticity': "Domestic / CT",'type': None, 'category':None}
def for_lmt_liab_cmpy_domesticity(bus_id):
r = ForLmtCmpy.query.filter(ForLmtCmpy.id_bus == str(bus_id)).first()
place_of_formation = r.cd_pl_of_form
return {'domesticity': "Foreign / {}".format(place_of_formation),
'type': None,
'category': None }
def for_lmt_liab_part_domesticity(bus_id):
# TODO Address data loading issues with this table. Currently 0 rows
# r = ForLmtLiabPart.query.filter(ForLmtLiabPart.id_bus = str(bus_id)).first()
# place_of_formation = r.cd_pl_of_form
return {'domesticity': "Foreign", 'type': None, 'category': None }
def for_lmt_part_domesticity(bus_id):
# TODO Address data loading issues with this table. Currently 0 rows
# r = ForLmtPart.query.filter(ForLiabPart.id_bus = str(bus_id)).first()
# place_of_formation = r.cd_pl_of_form
return {'domesticity': "Foreign", 'type':None, 'category':None}
def for_stat_trust_domesticity(bus_id):
r = ForStatTrust.query.filter(ForStatTrust.id_bus == str(bus_id)).first()
return {'domesticity': "Foreign / {}".format(r.cd_pl_of_form),
'type':None,
'category':None}
def gen_part_domesticity(bus_id):
return {'domesticity': "", 'type':None, 'category':None}
def other_domesticity(bus_id):
r = BusOther.query.filter(BusOther.id_bus == str(bus_id)).first()
type = corp_type_lookup[r.cd_bus_type]
origin = origin_lookup[r.cd_origin]
try:
category = category_lookup[r.cd_category]
except KeyError:
category = None
return {'domesticity': None, 'type': type, 'category': category}
def domesticity_lookup(bus_id, subtype):
type_table_lookup = {'B': corp_domesticity,
'C': corp_domesticity,
'G': dom_domesticity,
'I': dom_domesticity,
'D': dom_domesticity,
'L': dom_domesticity,
'P': corp_domesticity,
'H': for_lmt_liab_cmpy_domesticity,
'J': for_lmt_liab_part_domesticity,
'F': for_lmt_part_domesticity,
'M': for_stat_trust_domesticity,
'K': gen_part_domesticity,
'O': other_domesticity,
#added by ILYA:
'P': corp_domesticity,
'Q': corp_domesticity,
'R': corp_domesticity,
'S': corp_domesticity,
}
lookup = type_table_lookup[subtype]
return lookup(bus_id)
def redirect_url(default='index'):
return request.args.get('next') or \
request.referrer or \
url_for(default)
def get_latest_report(bus_id):
report_codes = ['CFRN', 'CFRS', 'CRLC', 'CRLCF', 'CRLLP', 'CRLLPF', 'CRLP',
'CRLPF', 'CRS', 'CRN', 'COB', 'CON', 'COS']
latest_report = BusFiling.query.filter(BusFiling.id_bus == bus_id).filter(
BusFiling.cd_trans_type.in_(report_codes)).order_by(BusFiling.id_bus_flng.desc()).first()
#latest_report = BusFiling.query.filter(BusFiling.id_bus == bus_id).order_by(BusFiling.id_bus_flng.desc()).first()
if latest_report:
return latest_report.tx_certif
else:
return 'No Reports Found'
#replace dt_origin with dt_filing
def query(q_object):
results = FullTextIndex.query.filter(FullTextIndex.index_name == q_object['index_field'])
if q_object['index_field'] == 'place_of_business_city':
str = FullTextIndex.city
results = results.filter(str.startswith(q_object['query'].upper()))
if q_object['query'] != '':
tq = func.plainto_tsquery('english', q_object['query'])
results = results.filter(FullTextIndex.document.op('@@')(tq))
if q_object['active'] == 'True':
results = results.filter(FullTextIndex.status == 'Active')
if q_object['start_date'] and q_object['end_date']:
results = results.filter(FullTextIndex.dt_filing >= q_object['start_date']).filter(FullTextIndex.dt_filing <= q_object['end_date'])
if q_object['business_type']:
if q_object['business_type'] == ['All Entities']:
pass
else:
results = results.filter(FullTextIndex.type.in_(q_object['business_type']))
if q_object['sort_order'] == 'desc':
results = results.order_by(desc(q_object['sort_by']))
else:
results = results.order_by(q_object['sort_by'])
return results
@app.route('/search_results', methods=['GET'])
def search_results():
page = int(request.args.get('page'))
q_object = {
'query': request.args.get('query'),
'index_field': request.args.get('index_field'),
'active': request.args.get('active'),
'sort_by': request.args.get('sort_by'),
'sort_order': request.args.get('sort_order')
}
if request.args.get('query_limit') is not None:
q_object['query_limit'] = request.args.get('query_limit')
else:
q_object['query_limit'] = ''
try:
q_object['start_date'] = datetime.strptime(request.args.get('start_date'), '%Y-%m-%d')
q_object['end_date'] = datetime.strptime(request.args.get('end_date'), '%Y-%m-%d')
except TypeError:
sd =[ int(x) for x in ConfigObject.START_DATE.split('-') ]
q_object['start_date'] = date(sd[0], sd[1], sd[2])
ed = [ int(x) for x in ConfigObject.END_DATE.split('-') ]
q_object['end_date'] = date(ed[0], ed[1], ed[2])
#q_object['start_date'] = date(1803, 1, 1)
#q_object['end_date'] = datetime.now()
#q_object['end_date'] = date(2019, 1, 1)
q_object['business_type'] = request.args.getlist('business_type')
if (len(q_object['query']) < 2 or len(q_object['query']) > 255):
q_object['query'] = 'sallys apizza'
q_object['query_limit'] = 10
results = query(q_object)
results = results.paginate(page, ConfigObject.RESULTS_PER_PAGE, False)
form = AdvancedSearchForm(**q_object)
return render_template('results.html', results=results, q_obj=q_object, form=form)
@app.route('/business/<id>', methods=['GET'])
def detail(id):
result = BusMaster.query.filter(BusMaster.id_bus == str(id)).first()
try:
domesticity = domesticity_lookup(result.id_bus, result.cd_subtype)
except AttributeError:
return redirect(url_for('index'))
principals = Principal.query.filter(Principal.id_bus == str(id)).all()
#filings = BusFiling.query.filter(BusFiling.id_bus == str(id)).order_by(BusFiling.dt_filing).all()
filings = FilingDetails.query.filter(FilingDetails.id_bus == str(id)).order_by(FilingDetails.dt_filing).all()
# TEMPORARY SOLUTION TO THE FOLLOWING PROBLEM:
# For some NH business registrations between Oct - Dec 2018, filing data are missing. I realized
# that althouh the records are present in BUS_FILING table, they are missing from FILMINDX table.
filings_ids = [x.id_bus_flng for x in filings]
filings_2 = BusFiling.query.filter(BusFiling.id_bus == str(id)).order_by(BusFiling.dt_filing).all()
for f in filings_2:
if f.id_bus_flng not in filings_ids:
filings.append(f)
#filings = FilingDetails.query.filter(FilingDetails.id_bus == '1287471').order_by(FilingDetails.dt_filing).all()
# filmindx = {}
# for filing in filings:
# response = FilmIndx.query.filter(FilmIndx.id_bus_flng == filing.id_bus_flng).all()
# if len(response) > 0:
# filmindx[str(filing.id_bus_flng)] = response[0]
# else:
# filmindx[str(filing.id_bus_flng)] = {'volume_type': 'No data', 'volume_number': 'No data', 'start_page': 'No data', 'pages': 'No data'}
name_changes = NameChange.query.filter(NameChange.id_bus == str(id)).order_by(desc(NameChange.dt_changed)).all()
report = get_latest_report(id)
return render_template('results_detail.html',
result=result,
report=report,
principals=principals,
domesticity=domesticity,
filings=filings,
# filmindx=filmindx,
name_changes=name_changes,
results_page=redirect_url())
@app.route('/', methods=['GET', 'POST'])
def index():
form = SearchForm()
if form.validate_on_submit():
return redirect(url_for('search_results',
query=form.query.data,
index_field=form.index_field.data,
sort_by=form.sort_by.data,
sort_order=form.sort_order.data,
page=1))
return render_template('index.html', form=form)
@app.route('/download', methods=['POST'])
def download():
form = AdvancedSearchForm()
form.business_type.default = 'All Entities'
if form.validate_on_submit():
q_object = {
'query': form.query.data,
'query_limit': form.query_limit.data,
'index_field': form.index_field.data,
'active': form.active.data,
'sort_by': form.sort_by.data,
'sort_order': form.sort_order.data
}
try:
q_object['start_date'] = datetime.strftime(form.start_date.data, '%Y-%m-%d')
q_object['end_date'] = datetime.strftime(form.end_date.data, '%Y-%m-%d')
except TypeError:
sd =[ int(x) for x in ConfigObject.START_DATE.split('-') ]
q_object['start_date'] = date(sd[0], sd[1], sd[2])
ed = [ int(x) for x in ConfigObject.END_DATE.split('-') ]
q_object['end_date'] = date(ed[0], ed[1], ed[2])
#q_object['start_date'] = date(year=1900, month=1, day=1)
#q_object['end_date'] = datetime.now()
#q_object['end_date'] = date(year=2019, month=1, day=1)
q_object['business_type'] = form.business_type.data
results = query(q_object)
file = StringIO()
writer = csv.DictWriter(file, fieldnames=['name', 'id', 'principal', 'agent', 'date formed', 'status', 'type', 'street', 'city', 'state', 'zip'])
writer.writeheader()
for biz in results.all():
row = {'name': biz.nm_name, 'id': biz.id_bus, 'principal': biz.principal_name, 'agent': biz.nm_agt, 'date formed': biz.dt_filing2, 'status': biz.status,
'type': biz.type, 'street': biz.street, 'city': biz.city, 'state': biz.state, 'zip': biz.zip}
writer.writerow(row)
file.seek(0)
response = Response(file, content_type='text/csv')
response.headers['Content-Disposition'] = 'attachment; filename=sots_search_results.csv'
return response
@app.route('/advanced_search', methods=['GET', 'POST'])
def advanced():
form = AdvancedSearchForm()
form.business_type.default = 'All Entities'
if form.validate_on_submit():
return redirect(url_for('search_results',
query=form.query.data,
query_limit=form.query_limit.data,
index_field=form.index_field.data,
business_type=form.business_type.data,
start_date=form.start_date.data,
end_date=form.end_date.data,
active=form.active.data if form.active.data else '', # if `False` is passed, the checkbox will still appear "checked"
# in browser because `checked=""` will be assigned to the input tag
sort_by=form.sort_by.data,
sort_order=form.sort_order.data,
page=1))
return render_template('advanced.html', form=form)
@app.route('/technical_details', methods=['GET'])
def technical_details():
return render_template('technical_details.html')
def to_markup(form):
text = "## Feedback: \n{}\n".format(form.goal.data)
text += "## Contact: \n{}\n".format(form.general.data)
text += "## User Agent: \n - {}".format(form.user_agent.data)
return text
def create_github_issue(form):
'''Create an issue on github.com using the given parameters.'''
# Our url to create issues via POST
url = 'https://api.github.com/repos/%s/%s/issues' % (ConfigObject.GITHUB_OWNER, ConfigObject.GITHUB_REPO)
headers = {'Authorization': 'token %s' % ConfigObject.GITHUB_TOKEN}
# Create our issue
issue = {'title': form.goal.data,
'body': to_markup(form),
'labels': ['site feedback']}
# Add the issue to our repository
return requests.post(url, data=json.dumps(issue), headers=headers)
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
repo = ConfigObject.GITHUB_REPO
owner = ConfigObject.GITHUB_OWNER
agent = request.headers.get('User-Agent')
form = FeedbackForm(user_agent=agent)
if form.validate_on_submit() and (form.general.data != form.goal.data):
r = create_github_issue(form)
if r.status_code == 201:
return render_template('feedback_confirm.html', url=r.json()['html_url'])
else:
return render_template('feedback.html', form=form, repo=repo, owner=owner)
return render_template('feedback.html', form=form, repo=repo, owner=owner)
# Filters for Jinja
app.template_filter('checknone')(check_none)
@app.template_filter('simpledate')
def simple_date(value, format='%b %d, %Y'):
# try:
return value.strftime(format)
issue = {'title': 'test',
'body': 'test issue'}
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ENG/Sun-ExtA_8/udhr_Latn.ENG_Sun-ExtA_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
from bottle import route, view, template, request, response, redirect
import pymysql.cursors
import pymysql.err
from database import dbapi
@route('/create_profile', method=['GET'])
@view('create_profile')
def view_create_profile():
return {'message':''}
@route('/create_profile', method=['POST'])
@view('create_profile')
def new_profile():
email = request.forms.get('email', '').strip()
first_name = request.forms.get('first_name', '').strip()
last_name = request.forms.get('last_name', '').strip()
password = request.forms.get('password', '').strip()
address = request.forms.get('address', '').strip()
work_phone_cc = request.forms.get('work_phone_cc', '').strip()
work_phone_number = request.forms.get('work_phone_number', '').strip()
home_phone_cc = request.forms.get('home_phone_cc', '').strip()
home_phone_number = request.forms.get('home_phone_number', '').strip()
try:
connection = dbapi.connect() # return db connection
c = connection.cursor()
sql = "INSERT INTO customers(email, first_name, last_name, password, address, work_phone_cc, work_phone_number, home_phone_cc, home_phone_number) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
c.execute(sql, (email, first_name, last_name, password, address, work_phone_cc, work_phone_number, home_phone_cc, home_phone_number))
customer_id = c.lastrowid
connection.commit()
except pymysql.err.IntegrityError:
return template('create_profile.tpl', message="The profile already exists.")
except pymysql.err.Error as e:
return template('error.tpl', message='An error occurred. Error {!r}, errno is {}'.format(e, e.args[0]))
else:
c.close()
response.set_cookie("customer_id", str(customer_id))
redirect('/customer_main_menu')
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'redis'
versions = ['v1', 'v1beta1']
config_pattern = '/google/cloud/redis/artman_redis_{version}.yaml'
for version in versions:
java.gapic_library(
service=service,
version=version,
config_pattern=config_pattern,
package_pattern='com.google.cloud.{service}.{version}',
gapic=gapic,
)
java.common_templates(excludes=[
'.kokoro/continuous/integration.cfg',
'.kokoro/nightly/integration.cfg',
'.kokoro/presubmit/integration.cfg'
])
|
# Natural Language Toolkit: SemCor Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Nathan Schneider <nschneid@cs.cmu.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the SemCor Corpus.
"""
from __future__ import absolute_import, unicode_literals
__docformat__ = 'epytext en'
from nltk.corpus.reader.api import *
from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView
from nltk.tree import Tree
class SemcorCorpusReader(XMLCorpusReader):
"""
Corpus reader for the SemCor Corpus.
For access to the complete XML data structure, use the ``xml()``
method. For access to simple word lists and tagged word lists, use
``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
"""
def __init__(self, root, fileids, wordnet, lazy=True):
XMLCorpusReader.__init__(self, root, fileids)
self._lazy = lazy
self._wordnet = wordnet
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return self._items(fileids, 'word', False, False, False)
def chunks(self, fileids=None):
"""
:return: the given file(s) as a list of chunks,
each of which is a list of words and punctuation symbols
that form a unit.
:rtype: list(list(str))
"""
return self._items(fileids, 'chunk', False, False, False)
def tagged_chunks(self, fileids=None, tag=('pos' or 'sem' or 'both')):
"""
:return: the given file(s) as a list of tagged chunks, represented
in tree form.
:rtype: list(Tree)
:param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
to indicate the kind of tags to include. Semantic tags consist of
WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
without a specific entry in WordNet. (Named entities of type 'other'
have no lemma. Other chunks not in WordNet have no semantic tag.
Punctuation tokens have `None` for their part of speech tag.)
"""
return self._items(fileids, 'chunk', False, tag!='sem', tag!='pos')
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of sentences, each encoded
as a list of word strings.
:rtype: list(list(str))
"""
return self._items(fileids, 'word', True, False, False)
def chunk_sents(self, fileids=None):
"""
:return: the given file(s) as a list of sentences, each encoded
as a list of chunks.
:rtype: list(list(list(str)))
"""
return self._items(fileids, 'chunk', True, False, False)
def tagged_sents(self, fileids=None, tag=('pos' or 'sem' or 'both')):
"""
:return: the given file(s) as a list of sentences. Each sentence
is represented as a list of tagged chunks (in tree form).
:rtype: list(list(Tree))
:param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
to indicate the kind of tags to include. Semantic tags consist of
WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
without a specific entry in WordNet. (Named entities of type 'other'
have no lemma. Other chunks not in WordNet have no semantic tag.
Punctuation tokens have `None` for their part of speech tag.)
"""
return self._items(fileids, 'chunk', True, tag!='sem', tag!='pos')
def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag):
if unit=='word' and not bracket_sent:
# the result of the SemcorWordView may be a multiword unit, so the
# LazyConcatenation will make sure the sentence is flattened
_ = lambda *args: LazyConcatenation((SemcorWordView if self._lazy else self._words)(*args))
else:
_ = SemcorWordView if self._lazy else self._words
return concat([_(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet)
for fileid in self.abspaths(fileids)])
def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag):
"""
Helper used to implement the view methods -- returns a list of
tokens, (segmented) words, chunks, or sentences. The tokens
and chunks may optionally be tagged (with POS and sense
information).
:param fileid: The name of the underlying file.
:param unit: One of `'token'`, `'word'`, or `'chunk'`.
:param bracket_sent: If true, include sentence bracketing.
:param pos_tag: Whether to include part-of-speech tags.
:param sem_tag: Whether to include semantic tags, namely WordNet lemma
and OOV named entity status.
"""
assert unit in ('token', 'word', 'chunk')
result = []
xmldoc = ElementTree.parse(fileid).getroot()
for xmlsent in xmldoc.findall('.//s'):
sent = []
for xmlword in _all_xmlwords_in(xmlsent):
itm = SemcorCorpusReader._word(xmlword, unit, pos_tag, sem_tag, self._wordnet)
if unit=='word':
sent.extend(itm)
else:
sent.append(itm)
if bracket_sent:
result.append(SemcorSentence(xmlsent.attrib['snum'], sent))
else:
result.extend(sent)
assert None not in result
return result
@staticmethod
def _word(xmlword, unit, pos_tag, sem_tag, wordnet):
tkn = xmlword.text
if not tkn:
tkn = "" # fixes issue 337?
lemma = xmlword.get('lemma', tkn) # lemma or NE class
lexsn = xmlword.get('lexsn') # lex_sense (locator for the lemma's sense)
if lexsn is not None:
sense_key = lemma + '%' + lexsn
wnpos = ('n','v','a','r','s')[int(lexsn.split(':')[0])-1] # see http://wordnet.princeton.edu/man/senseidx.5WN.html
else:
sense_key = wnpos = None
redef = xmlword.get('rdf', tkn) # redefinition--this indicates the lookup string
# does not exactly match the enclosed string, e.g. due to typographical adjustments
# or discontinuity of a multiword expression. If a redefinition has occurred,
# the "rdf" attribute holds its inflected form and "lemma" holds its lemma.
# For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class).
sensenum = xmlword.get('wnsn') # WordNet sense number
isOOVEntity = 'pn' in xmlword.keys() # a "personal name" (NE) not in WordNet
pos = xmlword.get('pos') # part of speech for the whole chunk (None for punctuation)
if unit=='token':
if not pos_tag and not sem_tag:
itm = tkn
else:
itm = (tkn,) + ((pos,) if pos_tag else ()) + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ())
return itm
else:
ww = tkn.split('_') # TODO: case where punctuation intervenes in MWE
if unit=='word':
return ww
else:
if sensenum is not None:
try:
sense = wordnet.lemma_from_key(sense_key) # Lemma object
except Exception:
# cannot retrieve the wordnet.Lemma object. possible reasons:
# (a) the wordnet corpus is not downloaded;
# (b) a nonexistant sense is annotated: e.g., such.s.00 triggers:
# nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00'
# solution: just use the lemma name as a string
try:
sense = '%s.%s.%02d' % (lemma, wnpos, int(sensenum)) # e.g.: reach.v.02
except ValueError:
sense = lemma+'.'+wnpos+'.'+sensenum # e.g. the sense number may be "2;1"
bottom = [Tree(pos, ww)] if pos_tag else ww
if sem_tag and isOOVEntity:
if sensenum is not None:
return Tree(sense, [Tree('NE', bottom)])
else: # 'other' NE
return Tree('NE', bottom)
elif sem_tag and sensenum is not None:
return Tree(sense, bottom)
elif pos_tag:
return bottom[0]
else:
return bottom # chunk as a list
def _all_xmlwords_in(elt, result=None):
if result is None: result = []
for child in elt:
if child.tag in ('wf', 'punc'): result.append(child)
else: _all_xmlwords_in(child, result)
return result
class SemcorSentence(list):
"""
A list of words, augmented by an attribute ``num`` used to record
the sentence identifier (the ``n`` attribute from the XML).
"""
def __init__(self, num, items):
self.num = num
list.__init__(self, items)
class SemcorWordView(XMLCorpusView):
"""
A stream backed corpus view specialized for use with the BNC corpus.
"""
def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet):
"""
:param fileid: The name of the underlying file.
:param unit: One of `'token'`, `'word'`, or `'chunk'`.
:param bracket_sent: If true, include sentence bracketing.
:param pos_tag: Whether to include part-of-speech tags.
:param sem_tag: Whether to include semantic tags, namely WordNet lemma
and OOV named entity status.
"""
if bracket_sent: tagspec = '.*/s'
else: tagspec = '.*/s/(punc|wf)'
self._unit = unit
self._sent = bracket_sent
self._pos_tag = pos_tag
self._sem_tag = sem_tag
self._wordnet = wordnet
XMLCorpusView.__init__(self, fileid, tagspec)
def handle_elt(self, elt, context):
if self._sent: return self.handle_sent(elt)
else: return self.handle_word(elt)
def handle_word(self, elt):
return SemcorCorpusReader._word(elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet)
def handle_sent(self, elt):
sent = []
for child in elt:
if child.tag in ('wf','punc'):
itm = self.handle_word(child)
if self._unit=='word':
sent.extend(itm)
else:
sent.append(itm)
else:
raise ValueError('Unexpected element %s' % child.tag)
return SemcorSentence(elt.attrib['snum'], sent)
|
class BungieProfile:
"""Represents the data of a users' bungie profile.
:param dict responseData: The raw data given back by the API request.
uniqueName
The unique name of the bungie profile.
membershipID
The ID of the bungie profile.
displayName
The display name of the bungie profile.
xboxName
The username of the xbox account, if one is linked.
psnName
The username of the psn account, if one is linked.
blizzardName
The username of the blizzard account, if one is linked. Does not include discriminator
lastAccess
The date of the last time that the account was accessed.
firstAccess
The date of the first time that the account was accessed.
isDeleted
A boolean representing if the account has been deleted.
showActivity
Unknown
localeInheretDefault
Unknown
showGroupMessaging
Unknown
locale
The locale of the bungie profile.
statusText
The text representing the status of the account.
statusDate
The date that the status was updated.
profilePicture
The profile picture of the bungie profile.
profilePictureUrl
The URL of the profile picture.
profileTheme
The theme of the bungie profile.
profileThemeID
The ID of the theme of the bungie profile.
successMessageFlags
Unknown
profileTitle
The title of the bungie profile.
profileTitleID
The ID of the tile of the bungie profile.
profileBio
The bio of the bungie profile.
"""
def __init__(self, responseData):
self.uniqueName = responseData.get("uniqueName", None)
self.membershipID = responseData.get("membershipId", None)
self.displayName = responseData.get("displayName", None)
self.xboxName = responseData.get("xboxDisplayName", None)
self.psnName = responseData.get("psnDisplayName", None)
self.blizzardName = responseData.get("blizzardDisplayName", None)
self.lastAccess = responseData.get("lastUpdate", None)
self.firstAccess = responseData.get("firstAccess", None)
self.isDeleted = responseData.get("isDeleted", None)
self.showActivity = responseData.get("showActivity", None)
self.localeInheritDefault = responseData.get("localeInheritDefault", None)
self.showGroupMessaging = responseData.get("showGroupMessaging", None)
self.locale = responseData.get("locale", None)
self.statusText = responseData.get("statusText", None)
self.statusDate = responseData.get("statusDate", None)
self.profilePicture = responseData.get("profilePicture", None)
self.profilePictureUrl = responseData.get("profilePicturePath", None)
self.profileTheme = responseData.get("profileThemeName", None)
self.profileThemeID = responseData.get("profileTheme", None)
self.successMessageFlags = responseData.get("successMessageFlags", None)
self.profileTitleID = responseData.get("userTitle", None)
self.profileTitle = responseData.get("userTitleDisplay", None)
self.profileBio = responseData.get("about", None)
|
import math
cx = 0.5
focal_length = 0.6028125
alpha = 0.0
chi = 0
mx = cx / focal_length
r2 = mx ** 2
mz = (1 - alpha ** 2 * r2) / (alpha * math.sqrt(1 - (2 * alpha - 1) * r2) + 1 - alpha)
beta = (mz * chi + math.sqrt(mz ** 2 + (1 - chi ** 2) * r2)) / (mz ** 2 + r2)
print 2 * (math.pi / 2 - math.atan2(beta * mz - chi, beta * mx)) * 180/math.pi
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 22:09:47 2021
@author: Apple
"""
def start():
import numpy as np
import scipy.io as sio
import sklearn.ensemble
from sklearn import svm
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import joblib
from sklearn import neighbors
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier
import random
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import VotingClassifier
from nonconformist.nc import MarginErrFunc
import warnings
warnings.filterwarnings("ignore", message="Numerical issues were encountered ")
import sys
sys.path.insert(0,'/root/RISE-Version2/')
from Statistical_vector.statistical_vector import train_statistical_vector, test_statistical_vector_param, non_condition_p
min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))
myclassifier = [svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0),
sklearn.ensemble.RandomForestClassifier(n_estimators=100,random_state=0),
DecisionTreeClassifier(random_state=0),neighbors.KNeighborsClassifier(n_neighbors=10),
LogisticRegression(random_state=0),GradientBoostingClassifier(n_estimators=100,random_state=0),
LinearDiscriminantAnalysis(), AdaBoostClassifier(),
GaussianNB(),QuadraticDiscriminantAnalysis()]
times = ['pos3'] ##test set
train_name = ['pos2','pos1','pos4','pos5'] ##train set
filepath = r'/root/RISE-Version2/Jupyter/AllSee_test/S2/data/'
filename = ['_wave_phase_rssi_80D']
class_index = 1
class_num = 6
##load test data
#print('\n---------------test data is ' + times[0] + ' scenario-------------\n')
data = sio.loadmat(filepath + 'rfid_' + times[0] + filename[0] + '.mat')
label = sio.loadmat(filepath + 'label30.mat')
xx2 = data['wave_phase_rssi_80D']
yy2 = label['label30']
yy2 = yy2.flatten()
test_x = xx2
test_y = yy2
##load train data
#print('\n-------training data is ' + str(train_name) + ' scenario----------\n')
xx1 = np.empty(shape=[0, xx2.shape[1]])
yy1 = np.empty(shape=[1, 0],dtype=int)
for ii in train_name:
data = sio.loadmat(filepath + 'rfid_' + ii + filename[0] + '.mat')
label = sio.loadmat(filepath + 'label30.mat')
x1 = data['wave_phase_rssi_80D']
y1 = label['label30']
x1 = min_max_scaler.fit_transform(x1)
xx1 = np.append(xx1, x1, axis=0)
yy1 = np.append(yy1, y1, axis=1)
yy1 = yy1.flatten()
index = [t for t in range(xx1.shape[0])]
random.shuffle(index)
x_train11 = xx1[index]
x_train1 = x_train11
y_train1 = yy1[index]
y_train1 = y_train1 - 1
############################ Without RISE ###############################
print('\n-------- The performance of the underlying model without RISE --------\n')
x_test1 = min_max_scaler.fit_transform(test_x)
y_test1 = test_y
y_test1 = y_test1 - 1
clf_dif = myclassifier[class_index]
clf_dif.fit(x_train1,y_train1)
acc_dif = clf_dif.score(x_test1,y_test1)
print('The accuracy without RISE: ',acc_dif)
y_true_dif, y_pred_dif = y_test1,clf_dif.predict(x_test1)
test_confusion_matrix = confusion_matrix(y_true_dif, y_pred_dif)
print('Confusion matrix without RISE: \n',test_confusion_matrix)
return x_train1, y_train1, x_test1, y_test1, myclassifier, y_true_dif, y_pred_dif, class_num, class_index
|
#!/usr/bin/env python3
import argparse
import os
import os.path
import pickle
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument('--lang1', default='cpp', help='language 1')
parser.add_argument('--lang2', default='java', help='language 2')
parser.add_argument('--output', default='maps', help='output')
parser.add_argument('data', nargs='+', help='data sets')
opt = parser.parse_args()
print(opt)
maps = {}
maps1 = {}
maps1_filename = 'maps.%s.pkl' % opt.lang1
if os.path.exists(maps1_filename):
with open(maps1_filename, 'rb') as f:
maps1 = pickle.load(f)
OrderedDict(sorted(maps1.items(), key=lambda t: t[0]))
maps2_filename = 'maps.%s.pkl' % opt.lang2
if os.path.exists(maps2_filename):
with open(maps2_filename, 'rb') as f:
maps2 = pickle.load(f)
mapping = {}
for key, value in maps2.items():
if key in maps1.keys():
mapping[value] = maps1[key]
else:
maps1[key] = len(mapping.keys())
mapping[value] = maps1[key]
# using mapping to align the lang2 with lang1
def aligning(source, target, mapping):
print("%s %s" % (source, target))
with open(source, 'r') as s:
with open(target, 'w') as t:
for line in s.readlines():
numbers = line.rstrip().split()
if len(numbers)>2 and numbers[0]!='?':
line_t = "%s %s %s\n" % (mapping[numbers[0]], numbers[1], mapping[numbers[2]])
else:
line_t = line
t.write(line_t)
s.close()
t.close()
from_folder=opt.data[0]
to_folder=opt.data[1]
for folder in ['train', 'test']:
for f in os.listdir(os.path.join(from_folder, folder)):
if not os.path.exists(os.path.join(to_folder, folder)):
os.makedirs(os.path.join(to_folder, folder))
aligning(os.path.join(from_folder, folder, f), os.path.join(to_folder, folder, f), mapping)
with open("maps.cll.pkl", 'wb') as f:
pickle.dump(mapping, f, 2)
f.close()
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class UserBestPointsItem(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
UserBestPointsItem - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'granularity_type': 'str',
'points': 'int',
'date_start_workday': 'date',
'date_end_workday': 'date',
'rank': 'int'
}
self.attribute_map = {
'granularity_type': 'granularityType',
'points': 'points',
'date_start_workday': 'dateStartWorkday',
'date_end_workday': 'dateEndWorkday',
'rank': 'rank'
}
self._granularity_type = None
self._points = None
self._date_start_workday = None
self._date_end_workday = None
self._rank = None
@property
def granularity_type(self):
"""
Gets the granularity_type of this UserBestPointsItem.
Best points aggregation interval granularity
:return: The granularity_type of this UserBestPointsItem.
:rtype: str
"""
return self._granularity_type
@granularity_type.setter
def granularity_type(self, granularity_type):
"""
Sets the granularity_type of this UserBestPointsItem.
Best points aggregation interval granularity
:param granularity_type: The granularity_type of this UserBestPointsItem.
:type: str
"""
allowed_values = ["Monthly", "Weekly", "Daily"]
if granularity_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for granularity_type -> " + granularity_type)
self._granularity_type = "outdated_sdk_version"
else:
self._granularity_type = granularity_type
@property
def points(self):
"""
Gets the points of this UserBestPointsItem.
Gamification points
:return: The points of this UserBestPointsItem.
:rtype: int
"""
return self._points
@points.setter
def points(self, points):
"""
Sets the points of this UserBestPointsItem.
Gamification points
:param points: The points of this UserBestPointsItem.
:type: int
"""
self._points = points
@property
def date_start_workday(self):
"""
Gets the date_start_workday of this UserBestPointsItem.
Start workday of the best points aggregation interval. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The date_start_workday of this UserBestPointsItem.
:rtype: date
"""
return self._date_start_workday
@date_start_workday.setter
def date_start_workday(self, date_start_workday):
"""
Sets the date_start_workday of this UserBestPointsItem.
Start workday of the best points aggregation interval. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param date_start_workday: The date_start_workday of this UserBestPointsItem.
:type: date
"""
self._date_start_workday = date_start_workday
@property
def date_end_workday(self):
"""
Gets the date_end_workday of this UserBestPointsItem.
End workday of the best points aggregation interval. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The date_end_workday of this UserBestPointsItem.
:rtype: date
"""
return self._date_end_workday
@date_end_workday.setter
def date_end_workday(self, date_end_workday):
"""
Sets the date_end_workday of this UserBestPointsItem.
End workday of the best points aggregation interval. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param date_end_workday: The date_end_workday of this UserBestPointsItem.
:type: date
"""
self._date_end_workday = date_end_workday
@property
def rank(self):
"""
Gets the rank of this UserBestPointsItem.
The rank of this user
:return: The rank of this UserBestPointsItem.
:rtype: int
"""
return self._rank
@rank.setter
def rank(self, rank):
"""
Sets the rank of this UserBestPointsItem.
The rank of this user
:param rank: The rank of this UserBestPointsItem.
:type: int
"""
self._rank = rank
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from baselines.common.mpi_running_mean_std import RunningMeanStd
import baselines.common.tf_util as U
import tensorflow as tf
import gym
from baselines.common.distributions import make_pdtype
class MlpPolicy(object):
recurrent = False
def __init__(self, name, *args, **kwargs):
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, layers_val, layers_pol, gaussian_fixed_var=True,
dist='gaussian', ):
assert isinstance(ob_space, gym.spaces.Box)
self.dist = dist
self.pdtype = pdtype = make_pdtype(ac_space, dist=dist)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
with tf.variable_scope('vf'):
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i, size in enumerate(layers_val):
last_out = tf.nn.relu(tf.layers.dense(last_out, size, name="fc%i" % (i + 1), kernel_initializer=U.normc_initializer(1.0)))
self.vpred = tf.layers.dense(last_out, 1, name='final', kernel_initializer=U.normc_initializer(1.0))[:, 0]
with tf.variable_scope('pol'):
last_out = obz
for i, size in enumerate(layers_pol):
last_out = tf.nn.tanh(tf.layers.dense(last_out, size, name='fc%i' % (i + 1), kernel_initializer=U.normc_initializer(1.0)))
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
mean = tf.layers.dense(last_out, pdtype.param_shape()[0] // 2, name='final', kernel_initializer=U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0] // 2], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
else:
pdparam = tf.layers.dense(last_out, pdtype.param_shape()[0], name='final', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
if dist == 'gaussian':
self._act = U.function([stochastic, ob], [ac, self.vpred, self.pd.std, self.pd.mean, self.pd.logstd])
elif dist == 'beta':
self._act = U.function([stochastic, ob], [ac, self.vpred, self.pd.alpha, self.pd.beta, self.pd.alpha_beta])
def act(self, stochastic, ob):
ac1, vpred1, stat1, stat2, stat3 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0], stat1[0], stat2[0], stat3[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('pynifti', parent_package, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import glob
import os
import re
from manifests.input_manifest import InputManifest
from manifests.manifests import Manifests
class InputManifests(Manifests):
def __init__(self):
files = glob.glob(os.path.join(self.manifests_path, "**/opensearch-*.yml"))
# there's an opensearch-1.0.0-maven.yml that we want to skip
files = [f for f in files if re.search(r"[\\/]opensearch-([0-9.]*)\.yml$", f)]
super().__init__(InputManifest, files)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copy files to latest/ in the project's default file storage.
"""
import os
import re
import boto3
import random
import logging
from django.conf import settings
from calaccess_raw.models import RawDataVersion
from django.core.management.base import BaseCommand
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy files to latest/ in the project's default file storage.
"""
help = "Copy files to latest/ in the project's default file storage."
def handle(self, *args, **options):
# set up boto session
self.session = boto3.Session(
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_S3_REGION_NAME
)
# and clients
self.s3 = self.session.client('s3')
self.cloudfront = self.session.client('cloudfront')
# Delete existing latest files
latest_key_list = self.s3.list_objects_v2(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Prefix='latest/',
)
if latest_key_list['KeyCount'] > 0:
self.delete_keys(latest_key_list)
# get the version of last update that finished
v = RawDataVersion.objects.exclude(
update_finish_datetime=None
).latest('update_finish_datetime')
logger.debug(
'Copying files for {:%m-%d-%Y %H:%M:%S} version to latest/'.format(
v.release_datetime
)
)
# save downloaded zip to the latest dir
if v.download_zip_archive:
# strip the datetime from the zip name
zip_name = self.strip_datetime(
os.path.basename(v.download_zip_archive.name),
)
self.copy(
v.download_zip_archive.name,
self.get_latest_path(zip_name),
)
# save cleaned zip to the latest dir
if v.clean_zip_archive:
# strip the datetime from the zip name
zip_name = self.strip_datetime(
os.path.basename(v.clean_zip_archive.name),
)
self.copy(
v.clean_zip_archive.name,
self.get_latest_path(zip_name),
)
# loop through all of the raw data files
for f in v.files.all():
# save downloaded file to the latest directory
self.copy(
f.download_file_archive.name,
self.get_latest_path(f.download_file_archive.name)
)
if f.clean_file_archive:
# save cleaned file to the latest directory
self.copy(
f.clean_file_archive.name,
self.get_latest_path(f.clean_file_archive.name)
)
if f.error_log_archive:
# save error log file to the latest directory
self.copy(
f.error_log_archive.name,
self.get_latest_path(f.error_log_archive.name)
)
# save processed zip to the latest dir
if v.processed_version:
for zf in v.processed_version.zips.all():
# strip the datetime from the zip name
zip_name = self.strip_datetime(
os.path.basename(zf.zip_archive.name),
)
self.copy(
zf.zip_archive.name,
self.get_latest_path(zip_name),
)
# same for processed files
for f in v.processed_version.files.all():
# save processed file to the latest directory
if f.file_archive:
# save cleaned file to the latest directory
self.copy(
f.file_archive.name,
self.get_latest_path(f.file_archive.name)
)
# Clear the cloudfront cache by sending an invalidation request
if latest_key_list['KeyCount'] > 0:
self.invalidate_keys(latest_key_list)
def strip_datetime(self, filename):
"""
Removes the datetime portion from filename.
"""
return re.sub(
r'_\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}',
'',
filename,
)
def get_latest_path(self, old_path):
"""
Convert the file path to a latest file path
"""
base_name = os.path.basename(old_path)
return os.path.join("latest", base_name)
def copy(self, source, target):
"""
Copies the provided source key to the provided target key
"""
logger.debug('Saving copy of {} to {}'.format(os.path.basename(source), target))
copy_source = {
'Bucket': settings.AWS_STORAGE_BUCKET_NAME,
'Key': source
}
self.s3.copy(
copy_source,
settings.AWS_STORAGE_BUCKET_NAME,
target,
)
def delete_keys(self, key_list):
"""
Delete all the provided s3 keys.
"""
logger.debug('Deleting {} keys currently under latest/'.format(key_list['KeyCount']))
# format
objects = [{'Key': o['Key']} for o in key_list['Contents']]
# delete
self.s3.delete_objects(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Delete={
'Objects': objects,
}
)
def invalidate_keys(self, key_list):
"""
Send CloudFront an invalidation request to clear.
"""
logger.debug(
"Sending invalidation request for %s keys under latest/" % key_list['KeyCount']
)
items = ["/{}".format(o['Key']) for o in key_list['Contents']]
self.cloudfront.create_invalidation(
DistributionId=settings.CLOUDFRONT_ARCHIVED_DATA_DISTRIBUTION,
InvalidationBatch={
# What to invalidate
'Paths': {
'Quantity': key_list['KeyCount'],
'Items': items
},
# A random name for the request
'CallerReference': str(random.getrandbits(128))
}
)
|
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug
from eZmaxApi.model.common_response_obj_debug_payload_get_list import CommonResponseObjDebugPayloadGetList
globals()['CommonResponseObjDebug'] = CommonResponseObjDebug
globals()['CommonResponseObjDebugPayloadGetList'] = CommonResponseObjDebugPayloadGetList
from eZmaxApi.model.common_response_get_list import CommonResponseGetList
class TestCommonResponseGetList(unittest.TestCase):
"""CommonResponseGetList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCommonResponseGetList(self):
"""Test CommonResponseGetList"""
# FIXME: construct object with mandatory attributes with example values
# model = CommonResponseGetList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.