text
stringlengths 2
999k
|
|---|
from annotypes import Anno, deserialize_object, Array
from scanpointgenerator.compat import np
from scanpointgenerator.core import Generator, AAlternate
with Anno("The array containing points"):
AGenerator = Array[Generator]
@Generator.register_subclass(
"scanpointgenerator:generator/ConcatGenerator:1.0")
class ConcatGenerator(Generator):
""" Concat generators to operate one after each other """
DIFF_LIMIT = 1e-05
def __init__(self, generators, alternate=False):
# type: (AGenerator, AAlternate) -> None
self.generators = AGenerator([deserialize_object(g, Generator)
for g in generators])
assert len(self.generators) > 0, "At least one generator needed"
units = self.generators[0].units
axes = self.generators[0].axes
size = sum(generator.size for generator in self.generators)
for generator in self.generators:
assert generator.axes == axes, "You cannot Concat generators " \
"on different axes"
assert generator.units == units, "You cannot Concat " \
"generators with different units"
assert not generator.alternate, \
"Alternate should not be set on the component generators of a" \
"ConcatGenerator. Set it on the top level ConcatGenerator only."
super(ConcatGenerator, self).__init__(axes=axes,
size=size,
units=units,
alternate=alternate)
def prepare_arrays(self, index_array):
# The ConcatGenerator gets its positions from its sub-generators
merged_arrays = {}
for axis in self.axes:
merged_arrays[axis] = np.array
first = True
if index_array.size == self.size + 1:
# getting bounds
preparing_bounds = True
else:
# getting positions
preparing_bounds = False
for generator in self.generators:
if preparing_bounds:
# getting bounds
arr = generator.prepare_arrays(index_array[:generator.size + 1])
else:
# getting positions
arr = generator.prepare_arrays(index_array[:generator.size])
for axis in self.axes:
axis_array = arr[axis]
if first:
merged_arrays[axis] = axis_array
else:
# This avoids appending an ndarray to a list
cur_array = merged_arrays[axis]
if preparing_bounds:
assert np.abs(cur_array[-1] - axis_array[0]) < self.DIFF_LIMIT, \
"Merged generator bounds don't meet" \
" for axis %s (%f, %f)" \
% (str(axis), cur_array[-1],
axis_array[0])
cur_array = np.append(cur_array[:-1], axis_array)
else:
cur_array = np.append(cur_array, axis_array)
merged_arrays[axis] = cur_array
first = False
return merged_arrays
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
from oslo_config import types
import gnocchi.archive_policy
import gnocchi.indexer
import gnocchi.storage
import gnocchi.storage.ceph
import gnocchi.storage.file
import gnocchi.storage.swift
def list_opts():
return [
("indexer", gnocchi.indexer.OPTS),
("api", (
cfg.IntOpt('port',
default=8041,
help='The port for the Gnocchi API server.'),
cfg.StrOpt('host',
default='0.0.0.0',
help='The listen IP for the Gnocchi API server.'),
cfg.BoolOpt('pecan_debug',
default=False,
help='Toggle Pecan Debug Middleware.'),
cfg.MultiStrOpt(
'middlewares',
default=['keystonemiddleware.auth_token.AuthProtocol'],
help='Middlewares to use',),
cfg.Opt('workers', type=types.Integer(min=1),
help='Number of workers for Gnocchi API server. '
'By default the available number of CPU is used.'),
)),
("storage", itertools.chain(gnocchi.storage._carbonara.OPTS,
gnocchi.storage.OPTS,
gnocchi.storage.ceph.OPTS,
gnocchi.storage.file.OPTS,
gnocchi.storage.swift.OPTS)),
("statsd", (
cfg.StrOpt(
'resource_id',
help='Resource UUID to use to identify statsd in Gnocchi'),
cfg.StrOpt(
'user_id',
help='User UUID to use to identify statsd in Gnocchi'),
cfg.StrOpt(
'project_id',
help='Project UUID to use to identify statsd in Gnocchi'),
cfg.StrOpt(
'archive_policy_name',
help='Archive policy name to use when creating metrics'),
cfg.FloatOpt(
'flush_delay',
help='Delay between flushes'),
)),
("archive_policy", gnocchi.archive_policy.OPTS),
]
|
"""Takeoff task."""
import numpy as np
from gym import spaces
from geometry_msgs.msg import Vector3, Point, Quaternion, Pose, Twist, Wrench
from quad_controller_rl.tasks.base_task import BaseTask
class All(BaseTask):
"""Simple task where the goal is to lift off the ground and reach a target height."""
def __init__(self):
# State space: <position_x, .._y, .._z, orientation_x, .._y, .._z, .._w>
cube_size = 300.0 # env is cube_size x cube_size x cube_size
self.observation_space = spaces.Box(
np.array([- cube_size / 2, - cube_size / 2, 0.0, -1.0, -1.0, -1.0, -1.0]),
np.array([ cube_size / 2, cube_size / 2, cube_size, 1.0, 1.0, 1.0, 1.0]))
#print("Takeoff(): observation_space = {}".format(self.observation_space)) # [debug]
# Action space: <force_x, .._y, .._z, torque_x, .._y, .._z>
max_force = 25.0
max_torque = 25.0
self.action_space = spaces.Box(
np.array([-max_force, -max_force, -max_force, -max_torque, -max_torque, -max_torque]),
np.array([ max_force, max_force, max_force, max_torque, max_torque, max_torque]))
#print("Takeoff(): action_space = {}".format(self.action_space)) # [debug]
# Task-specific parameters
self.max_duration = 5.0 # secs
self.target_z = 10.0 # target height (z position) to reach for successful takeoff
self.episodes = 0
def reset(self):
# Nothing to reset; just return initial condition
return Pose(
position=Point(0.0, 0.0, np.random.normal(0.5, 0.1)), # drop off from a slight random height
orientation=Quaternion(0.0, 0.0, 0.0, 0.0),
), Twist(
linear=Vector3(0.0, 0.0, 0.0),
angular=Vector3(0.0, 0.0, 0.0)
)
def update(self, timestamp, pose, angular_velocity, linear_acceleration):
# Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
state = np.array([
pose.position.x, pose.position.y, pose.position.z,
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]).reshape(1, -1)
# Compute reward / penalty and check if this episode is complete
done = False
if(timestamp < 2 and pose.position.z < self.target_z):
reward = -min(abs(self.target_z - pose.position.z), 20.0) # reward = zero for matching target z, -ve as you go farther, upto -20
if pose.position.z >= self.target_z: # agent has crossed the target height
reward += 10.0 # bonus reward
# takeoff_done = True
# Hover reward
elif(timestamp < 4):
reward = -min(abs(self.target_z - pose.position.z), 20.0) # reward = zero for matching target z, -ve as you go farther, upto -20
if abs(pose.position.z-self.target_z) >= 2 : # agent has gone deviated from the target height
reward -= 10.0 # bonus reward
# Landing reward
elif(timestamp < 6):
reward = (abs(pose.position.z)*3 + abs(angular_velocity.x) + abs(angular_velocity.y) + abs(angular_velocity.z) + abs(linear_acceleration.x) + abs(linear_acceleration.y) + abs(linear_acceleration.z)) # reward = zero for matching target z, -ve as you go farther, upto -20
# Normalizing the reward
reward = -min(reward / 3 , 20.0)
if pose.position.z <= 0.8: # agent has crossed the target height
reward += 10.0 # bonus reward
elif timestamp > self.max_duration:# or pose.position.x > 5 or pose.position.x < -5 or pose.position.y > 5 or pose.position.y < -5: # agent has run out of time
reward -= 10.0 # extra penalty
done = True
else:
done = True
reward = -min(abs(self.target_z - pose.position.z), 20.0) # reward = zero for matching target z, -ve as you go farther, upto -20
if pose.position.z >= self.target_z: # agent has crossed the target height
reward += 10.0 # bonus reward
done = True
elif timestamp > self.max_duration: # agent has run out of time
reward -= 10.0 # extra penalty
done = True
# Take one RL step, passing in current state and reward, and obtain action
# Note: The reward passed in here is the result of past action(s)
action = self.agent.step(state, reward, done) # note: action = <force; torque> vector
# Convert to proper force command (a Wrench object) and return it
if action is not None:
action = np.clip(action.flatten(), self.action_space.low, self.action_space.high) # flatten, clamp to action space limits
return Wrench(
force=Vector3(action[0], action[1], action[2]),
torque=Vector3(action[3], action[4], action[5])
# force=Vector3(0,0, action[2]),
# torque=Vector3(0,0,0)
), done
else:
return Wrench(), done
|
from difflib import SequenceMatcher
from .Helpers import get_numbers_from_string, date_time_string_to_seconds
from .YtSettings import YtSettings
TITLE = {
'OFFICIALS': ['(Official Video)', '(Official Music Video)'],
'DUMP_STAMPS': ['Official Video', 'Official Music Video', 'HQ', 'HD'],
}
CHANNEL = {
'OFFICIALS': ['Official']
}
EMPTY_BRACKETS = ['()', '( )', '[]', '[ ]', '{}', '{ }']
class YouTube:
def __init__(self, spotify_track, video_data):
self.SPOTIFY_TRACK = spotify_track
self.data = self.parse_video_data(video_data)
def parse_video_data(self, video_data):
res = dict()
res['id'] = video_data.get('videoId', None)
res['channel'] = video_data.get('longBylineText', {}).get('runs', [[{}]])[0].get('text', None)
res['url_suffix'] = video_data.get('navigationEndpoint', {}).get('commandMetadata', {}).get(
'webCommandMetadata', {}).get('url', None)
res['title'] = video_data.get('title', {}).get('runs', [[{}]])[0].get('text', None)
res['url'] = self.parse_url(
video_data.get('navigationEndpoint', {}).get('commandMetadata', {}).get('webCommandMetadata', {}).get('url',
None))
res['duration'] = self.parse_duration(video_data.get('lengthText', {}).get('simpleText', 0))
res['views'] = self.parse_views(video_data.get('viewCountText', {}).get('simpleText', 0))
res['search_ratio'] = self.count_search_ratio(res)
return res
def parse_url(self, url):
return f'{YtSettings().YT_BASE_URL}{url}'
def parse_duration(self, duration):
return date_time_string_to_seconds(duration)
def parse_views(self, views):
return get_numbers_from_string(views)
def parse_filename(self, title):
# remove dump stamps
for dump_stamp in TITLE['DUMP_STAMPS']:
title = title.replace(dump_stamp, '')
# removes multiple spaces
title = ' '.join(title.split())
# returns value without leading and trailing space and mp3 extension
for dump_stamp in EMPTY_BRACKETS:
title = title.replace(dump_stamp, '')
return title.strip()
def count_search_ratio(self, res):
search_ratio = dict()
search_ratio['title'] = self.rate_title(res['title'].lower())
if search_ratio['title'] >= 0.6:
search_ratio['channel'] = self.rate_channel(res['channel'].lower())
search_ratio['duration'] = self.rate_duration(res['duration'])
search_ratio['whole'] = sum(search_ratio.values())
return search_ratio['whole']
def rate_title(self, title_lowercase):
rate = round(SequenceMatcher(None, title_lowercase, self.SPOTIFY_TRACK['full_name'].lower()).ratio(),
2)
if self.SPOTIFY_TRACK['is_official'] and any(
official in title_lowercase for official in TITLE['OFFICIALS']):
rate += YtSettings().BONUS_RATES['OFFICIAL']
if self.SPOTIFY_TRACK['is_remix'] and 'remix' in title_lowercase:
rate += YtSettings().BONUS_RATES['REMIX']
if self.SPOTIFY_TRACK['is_instrumental'] and 'instrumental' in title_lowercase:
rate += YtSettings().BONUS_RATES['INSTRUMENTAL']
if self.SPOTIFY_TRACK['is_live'] and 'live' in title_lowercase:
rate += YtSettings().BONUS_RATES['LIVE']
return rate
def rate_channel(self, channel_lowercase):
rate = round(
SequenceMatcher(None, channel_lowercase, self.SPOTIFY_TRACK['full_name'].lower()).ratio(), 2)
if any(official in channel_lowercase for official in CHANNEL['OFFICIALS']):
rate += YtSettings().BONUS_RATES['CHANNEL']
return rate
def rate_duration(self, duration):
return 1 - (abs(duration - self.SPOTIFY_TRACK['duration']) / 100)
def to_dict(self):
return self.data
|
"""
Unit and regression test for the qubekit_gui package.
"""
# Import package, test suite, and other packages as needed
import qubekit_gui
import pytest
import sys
def test_qubekit_gui_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "qubekit_gui" in sys.modules
|
import os
import torch
from torch.utils.data import DataLoader, Dataset
meta = torch.load("./data/meta_info_v2.pth")
# DICT = meta["dict"] # {'A': 0, 'C': 1, 'G': 2, 'T': 3}
char_dict = meta["dict"] # {'A': 0, 'C': 1, 'G': 2, 'T': 3}
MAX_LEN = meta["max_len"] # max length of a strand; typically 120; add a const for insertion length > 120
MAX_T = meta["max_t"] # max num of noisy strands in a cluster; typically 8
vin = meta["vin"] # {'A': 0, 'C': 1, 'G': 2, 'T': 3}
vout = meta["vout"] # {'A': 0, 'C': 1, 'G': 2, 'T': 3}
class DNA_dataset_v2(Dataset):
def __init__(self, root, split, *args, **kwargs):
super(DNA_dataset_v2, self).__init__(*args, **kwargs)
self.data_left = [] # [num_samples, MAX_LEN]
self.data_right = [] # [num_samples, MAX_LEN]
self.labels = [] # [num_samples, MAX_LEN]
split_dir = os.path.join(root, split)
self.load_data(split_dir)
def load_data(self, split_dir):
noisy_f = open(os.path.join(split_dir, "noisy_strands.txt"), "r")
cluster_f = open(os.path.join(split_dir, "clusters.txt"), "r")
noise_lines = noisy_f.readlines()
idx = 0
while idx < len(noise_lines):
line = noise_lines[idx].strip()
assert line.startswith("#")
num_strands = int(line.split()[-1])
idx += 1
strands = noise_lines[idx: idx + num_strands]
onehot_left = self.strands_to_onehot(strands, align='left') # tensor: (MAX_T, MAX_LEN, len(char_dict))
onehot_right = self.strands_to_onehot(strands, align='right') # tensor: (MAX_T, MAX_LEN, len(char_dict))
self.data_left.append(onehot_left)
self.data_right.append(onehot_right)
idx += num_strands
# gt strand
cluster_lines = cluster_f.readlines()
for line in cluster_lines:
self.labels.append(self.strand_to_id(line.strip()))
def strands_to_onehot(self, strands, align='left'):
assert align in ['left', 'right']
paddings = torch.zeros(MAX_T, MAX_LEN, len(vin))
for i, s in enumerate(strands):
s = s.strip()[:MAX_LEN]
len_s = len(s)
if align == 'left':
ids = torch.tensor(list(map(vin.get, s)))
paddings[i][torch.arange(len_s), ids] = 1
else:
ids = torch.tensor(list(map(vin.get, s[::-1])))
paddings[i][torch.arange(len_s), ids] = 1
return paddings
def strand_to_id(self, s):
ids = [vout[c] for c in s]
return torch.tensor(ids)
def __getitem__(self, idx):
return self.data_left[idx], self.data_right[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def get_DNA_loader(root, device="cpu", batch_size=128):
def collate_batch(batch):
src_left, src_right, tgt = [], [], []
for data_left, data_right, label in batch:
# label = [torch.tensor([vout["<BOS>"]]), label, torch.tensor([vout["<EOS>"]])]
src_left.append(data_left)
src_right.append(data_right)
tgt.append(label)
src_left = torch.stack(src_left, dim=-1).contiguous().to(device) # (max_t, max_len, 4, bsz)
src_right = torch.stack(src_right, dim=-1).contiguous().to(device)
tgt = torch.stack(tgt, dim=-1).contiguous().to(device) # (max_len, bsz)
return src_left, src_right, tgt
train_ds = DNA_dataset_v2(root=root, split="train")
val_ds = DNA_dataset_v2(root=root, split="val")
test_ds = DNA_dataset_v2(root=root, split="test")
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
test_dl = DataLoader(test_ds, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
dataset_info = dict(input_dim=len(vin), output_dim=len(vout))
return train_dl, val_dl, test_dl, dataset_info
if __name__ == "__main__":
train_dl = get_DNA_loader(root="./data", batch_size=2)[0]
data = next(iter(train_dl))
|
'''
Crie um programa que moste na tela todos os número pares que estão no intervalor de entre 1 e 50.
'''
for controle in range(2, 51, 2):
print('{}'.format(controle), end=' ')
print('acabou!')
|
"""
Generate a random hash to change password
"""
import hashlib
import string
import random
def random_key(size=5):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for x in range(size))
def generate_hash_key(salt, random_str_size=5):
random_str = random_key(random_str_size)
text = random_str + salt
return hashlib.sha3_224(text.encode('utf-8')).hexdigest()
|
global_memory_name = "HBM"
def generate_attributes(num_replications, num_global_memory_banks=32):
"""
Generates the kernel attributes for the global memory. They specify in which
global memory the buffer is located. The buffers will be placed using a
round robin scheme using the available global memory banks and the number of
replications that should be generated (e.g. if a global memory contains multiple banks)
@param num_replications Number okernel replications
@param num_global_memory_banks Number of global memory banks that should be used for generation
@return Array of strings that contain the attributes for every kernel
"""
global_memory_names = [ "%s%d" % (global_memory_name, i) for i in range(num_global_memory_banks)]
return [ "__attribute__((buffer_location(\"%s\")))"
% (global_memory_names[i % num_global_memory_banks])
for i in range(num_replications)]
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ansible-role-iptables'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.5 Python SDK
Pure Storage FlashBlade REST 1.5 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class TestResult(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'component_address': 'str',
'component_name': 'str',
'description': 'str',
'destination': 'str',
'enabled': 'bool',
'resource': 'str',
'result_details': 'str',
'success': 'bool',
'test_type': 'str'
}
attribute_map = {
'name': 'name',
'component_address': 'component_address',
'component_name': 'component_name',
'description': 'description',
'destination': 'destination',
'enabled': 'enabled',
'resource': 'resource',
'result_details': 'result_details',
'success': 'success',
'test_type': 'test_type'
}
def __init__(self, name=None, component_address=None, component_name=None, description=None, destination=None, enabled=None, resource=None, result_details=None, success=None, test_type=None):
"""
TestResult - a model defined in Swagger
"""
self._name = None
self._component_address = None
self._component_name = None
self._description = None
self._destination = None
self._enabled = None
self._resource = None
self._result_details = None
self._success = None
self._test_type = None
if name is not None:
self.name = name
if component_address is not None:
self.component_address = component_address
if component_name is not None:
self.component_name = component_name
if description is not None:
self.description = description
if destination is not None:
self.destination = destination
if enabled is not None:
self.enabled = enabled
if resource is not None:
self.resource = resource
if result_details is not None:
self.result_details = result_details
if success is not None:
self.success = success
if test_type is not None:
self.test_type = test_type
@property
def name(self):
"""
Gets the name of this TestResult.
name of the object (e.g., a file system or snapshot)
:return: The name of this TestResult.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TestResult.
name of the object (e.g., a file system or snapshot)
:param name: The name of this TestResult.
:type: str
"""
self._name = name
@property
def component_address(self):
"""
Gets the component_address of this TestResult.
Address of the component running the test
:return: The component_address of this TestResult.
:rtype: str
"""
return self._component_address
@component_address.setter
def component_address(self, component_address):
"""
Sets the component_address of this TestResult.
Address of the component running the test
:param component_address: The component_address of this TestResult.
:type: str
"""
self._component_address = component_address
@property
def component_name(self):
"""
Gets the component_name of this TestResult.
Name of the component running the test
:return: The component_name of this TestResult.
:rtype: str
"""
return self._component_name
@component_name.setter
def component_name(self, component_name):
"""
Sets the component_name of this TestResult.
Name of the component running the test
:param component_name: The component_name of this TestResult.
:type: str
"""
self._component_name = component_name
@property
def description(self):
"""
Gets the description of this TestResult.
What the test is doing
:return: The description of this TestResult.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TestResult.
What the test is doing
:param description: The description of this TestResult.
:type: str
"""
self._description = description
@property
def destination(self):
"""
Gets the destination of this TestResult.
The URI of the target server being tested
:return: The destination of this TestResult.
:rtype: str
"""
return self._destination
@destination.setter
def destination(self, destination):
"""
Sets the destination of this TestResult.
The URI of the target server being tested
:param destination: The destination of this TestResult.
:type: str
"""
self._destination = destination
@property
def enabled(self):
"""
Gets the enabled of this TestResult.
Is the service enabled?
:return: The enabled of this TestResult.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this TestResult.
Is the service enabled?
:param enabled: The enabled of this TestResult.
:type: bool
"""
self._enabled = enabled
@property
def resource(self):
"""
Gets the resource of this TestResult.
A reference to the object being tested
:return: The resource of this TestResult.
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this TestResult.
A reference to the object being tested
:param resource: The resource of this TestResult.
:type: str
"""
self._resource = resource
@property
def result_details(self):
"""
Gets the result_details of this TestResult.
Reason of test failure, if any
:return: The result_details of this TestResult.
:rtype: str
"""
return self._result_details
@result_details.setter
def result_details(self, result_details):
"""
Sets the result_details of this TestResult.
Reason of test failure, if any
:param result_details: The result_details of this TestResult.
:type: str
"""
self._result_details = result_details
@property
def success(self):
"""
Gets the success of this TestResult.
Did the test succeed?
:return: The success of this TestResult.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""
Sets the success of this TestResult.
Did the test succeed?
:param success: The success of this TestResult.
:type: bool
"""
self._success = success
@property
def test_type(self):
"""
Gets the test_type of this TestResult.
The type of the test. Possible values are phonehome, phonehome-ping, remote-assist, directory-service, directory-service-connecting, directory-service-binding, directory-service-group-searching, and directory-service-uri-searching.
:return: The test_type of this TestResult.
:rtype: str
"""
return self._test_type
@test_type.setter
def test_type(self, test_type):
"""
Sets the test_type of this TestResult.
The type of the test. Possible values are phonehome, phonehome-ping, remote-assist, directory-service, directory-service-connecting, directory-service-binding, directory-service-group-searching, and directory-service-uri-searching.
:param test_type: The test_type of this TestResult.
:type: str
"""
self._test_type = test_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, TestResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
#!/usr/bin/env python
"""
Start this file as "./simple_deployment run"
Then you can for instance do this:
> cd examples
> say_hello
> --connect
> exit
"""
from deployer.host import LocalHost
from deployer.node import Node
class example_settings(Node):
# Run everything on the local machine
class Hosts:
host = { LocalHost }
# A nested node with some examples.
class examples(Node):
def say_hello(self):
self.hosts.run('echo hello world')
def directory_listing_in_superuser_home(self):
self.hosts.sudo('ls ~')
def return_hello_world(self):
return 'Hello world'
def raise_exception(self):
raise Exception('Custom exception')
if __name__ == '__main__':
# Start an interactive shell.
from deployer.client import start
start(example_settings)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.new_plot, name='new_plot'),
]
|
import sys, csv, re
import json
# import io
from bson import json_util, ObjectId
from pymongo import MongoClient
if __name__ == "__main__":
# open db connection
dbauth = csv.reader(open('dbauth.txt', 'r')).next()
dbauth[0] = dbauth[0].strip()
dbauth[1] = dbauth[1].strip()
dburl = 'mongodb://' + \
dbauth[0] + ':' + dbauth[1] + \
'@localhost:27017/?authSource=admin'
client = MongoClient(dburl)
db = client.nolitory
titles = list(db.scripts.find({}, {'title':1}))
for i, title in enumerate(titles):
title['_id'] = str(title['_id'])
print str(i + 1) + '. ' + title['title']
# # select a movie title
# scripts = []
# for script in db.scripts.find():
# scripts.append(script)
# for i, script in enumerate(scripts):
# print str(i + 1) + '. ' + script['title']
mIdx = raw_input("Select a Movie (Enter number)")
try:
mIdx = int(mIdx) - 1
if mIdx < 0 or mIdx >= len(titles):
print 'Wrong Selection.'
sys.exit(0)
except ValueError:
# Handle the exception
print 'Please enter an integer!'
sys.exit(0)
simplified = raw_input("Do you need smaller simplified data (yes or no)?")
selected = titles[mIdx]
print 'Parsing ', selected['title']
# save into files
filename = selected['title'].encode('ascii','ignore').lower()
filename = re.sub(r"\s+", "_", filename)
filename = filename.replace(',_the', '') # for usual suspects
if (simplified.lower()=='yes' or simplified.lower()=='y'):
script_metadata = db.script_metadata.find_one({"script_id": ObjectId(selected['_id'])})
# delete unnecessary properties
del script_metadata['script_id']
del script_metadata['_id']
if 'character_metadata' in script_metadata:
del script_metadata['character_metadata']
for scene in script_metadata['scenes']:
del scene['conversations']
del scene['character_metadata']
del scene['heading']
del scene['actions']
# print filename
with open('json/'+filename+'_simple.json', 'w') as outfile:
# data = json.dumps(scenes, indent=True)
data = json_util.dumps({\
'script_info':script_metadata \
}, separators=(',', ': '))
outfile.write(data)
else:
script_metadata = db.script_metadata.find_one({"script_id": ObjectId(selected['_id'])})
tmdb_metadata = db.tmdb_metadata.find_one({"script_id": ObjectId(selected['_id'])})
# delete unnecessary properties
del tmdb_metadata['script_id']
del tmdb_metadata['_id']
del script_metadata['script_id']
del script_metadata['_id']
if 'character_metadata' in script_metadata:
del script_metadata['character_metadata']
# print filename
with open('json/'+filename+'.json', 'w') as outfile:
# data = json.dumps(scenes, indent=True)
data = json_util.dumps({\
'movie_info':tmdb_metadata, \
'script_info':script_metadata \
}, indent = 4)
outfile.write(data)
# scenes = script_metadata['scenes']
# print '# of scenes', len(scenes)
# output = []
# for scene in scenes:
# metadata = scene['scene_metadata']
# print scene['story_order'], ',', len(scene['characters']), ',', scene['narrative_order'], ':', metadata['location'], ',', metadata['time']
# output.push({
# 'story_order':scene['story_order'],
# 'narrative_order':scene['narrative_order'],
# 'location':metadata['location'],
# 'metadata2':metadata['time'],
# 'children':
#
#
# })
|
from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "NISTSchema-SV-IV-list-gMonth-length-1-NS"
@dataclass
class NistschemaSvIvListGMonthLength1:
class Meta:
name = "NISTSchema-SV-IV-list-gMonth-length-1"
namespace = "NISTSchema-SV-IV-list-gMonth-length-1-NS"
value: List[XmlPeriod] = field(
default_factory=list,
metadata={
"length": 5,
"tokens": True,
}
)
|
"""
User utilities.
"""
from __future__ import absolute_import, print_function
from distutils.util import get_platform
from numpy.distutils import misc_util
from .errors import *
from .common import *
from .parseUtils import joinStrs
from PyDSTool.core.context_managers import RedirectStdout
# !! Replace use of these named imports with np.<X>
from numpy import Inf, NaN, isfinite, less, greater, sometrue, alltrue, \
searchsorted, take, argsort, array, swapaxes, asarray, zeros, transpose, \
float64, int32, argmin, ndarray, concatenate
import numpy as np
from numpy.linalg import norm
from scipy.optimize import minpack, zeros
try:
newton_meth = minpack.newton
except AttributeError:
# newer version of scipy
newton_meth = zeros.newton
import time, sys, os, platform
import copy
import six
# --------------------------------------------------------------------
# EXPORTS
_classes = []
_functions = ['intersect', 'remain', 'union', 'cartesianProduct',
'makeImplicitFunc', 'orderEventData',
'saveObjects', 'loadObjects', 'info', 'compareList',
'findClosestArray', 'findClosestPointIndex', 'find',
'makeMfileFunction', 'make_RHS_wrap', 'make_Jac_wrap',
'progressBar', 'distutil_destination', 'architecture',
'extra_arch_arg', 'arclength']
_mappings = ['_implicitSolveMethods', '_1DimplicitSolveMethods']
__all__ = _classes + _functions + _mappings
## ------------------------------------------------------------------
# File for stdout redirecting
_logfile = os.devnull
## Utility functions
def makeMfileFunction(name, argname, defs):
"""defs is a dictionary of left-hand side -> right-hand side definitions"""
# writeout file <name>.m
mfile = open(name+".m", 'w')
mfile.write("function %s = %s(%s)\n"%(name,name,argname))
for k, v in defs.items():
if k != name:
mfile.write("%s = %s;\n"%(k,v))
# now the final definition of tau_recip or inf
mfile.write("%s = %s;\n"%(name,defs[name]))
mfile.write("return\n")
mfile.close()
def info(x, specName="Contents", offset=1, recurseDepth=1,
recurseDepthLimit=2, _repeatFirstTime=False):
"""Pretty printer for showing argument lists and dictionary
specifications."""
if recurseDepth == 1:
if not _repeatFirstTime:
# first time through
print("Information for " + specName + "\n")
else:
print(specName + ":", end=' ')
if x.__class__ is type:
return
if hasattr(x, 'items'):
x_keys = sortedDictKeys(x)
if len(x_keys) == 0:
print("< empty >")
elif recurseDepth != 1:
print("")
for k in x_keys:
v = x[k]
kstr = object2str(k)
basestr = " "*(offset-1) + kstr
if hasattr(v, 'items'):
info(v, basestr, offset+4, recurseDepth+1,
recurseDepthLimit)
else:
vStrList = object2str(v).split(', ')
if len(vStrList)==0:
vStrList = ['< no information >']
elif len(vStrList)==1 and vStrList[0] == '':
vStrList = ['< empty >']
outStrList = [basestr+": "]
for i in range(len(vStrList)):
if len(vStrList[i] + outStrList[-1]) < 78:
outStrList[-1] += ", "*(i>0) + vStrList[i]
else:
if i>0:
if i != len(vStrList):
# add trailing comma to previous line
outStrList[-1] += ","
# start on new line
outStrList.append(" "*(len(kstr)+3) + vStrList[i])
else:
# too long for line and string has no commas
# could work harder here, but for now, just include
# the long line
outStrList[-1] += vStrList[i]
if recurseDepth==1 and len(outStrList)>1:
# print an extra space between topmost level entries
# provided those entries occupy more than one line.
print("\n")
for s in outStrList:
print(s)
elif hasattr(x, '__dict__') and recurseDepth <= recurseDepthLimit:
info(x.__dict__, specName, offset, recurseDepth,
recurseDepthLimit, True)
else:
xstr = repr(x)
if xstr == '':
xstr = '< no information >'
print(xstr)
_implicitSolveMethods = ['newton', 'bisect', 'steffe', 'fsolve']
_1DimplicitSolveMethods = ['newton', 'bisect', 'steffe']
def makeImplicitFunc(f, x0, fprime=None, extrafargs=(), xtolval=1e-8,
maxnumiter=100, solmethod='newton', standalone=True):
"""Builds an implicit function representation of an N-dimensional curve
specified by (N-1) equations. Thus argument f is a function of 1 variable.
In the case of the 'fsolve' method, f may have dimension up to N-1.
Available solution methods are: newton, bisect, steffensen, fsolve.
All methods utilize SciPy's Minpack wrappers to Fortran codes.
Steffenson uses Aitken's Delta-squared convergence acceleration.
fsolve uses Minpack's hybrd and hybrj algorithms.
Standalone option (True by default) returns regular function. If False,
an additional argument is added, so as to be compatible as a method
definition."""
if solmethod == 'bisect':
assert isinstance(x0, _seq_types), \
"Invalid type '"+str(type(x0))+"' for x0 = "+str(x0)
assert len(x0) == 2
elif solmethod == 'fsolve':
assert isinstance(x0, (_seq_types, _num_types)), \
"Invalid type '"+str(type(x0))+"' for x0 = "+str(x0)
else:
assert isinstance(x0, _num_types), \
"Invalid type '"+str(type(x0))+"' for x0 = "+str(x0)
# define the functions that could be used
# scipy signatures use y instead of t, but this naming is consistent
# with that in the Generator module
try:
if standalone:
def newton_fn(t):
with RedirectStdout(_logfile):
res = float(newton_meth(f, x0, args=(t,)+extrafargs, tol=xtolval,
maxiter=maxnumiter, fprime=fprime))
return res
def bisect_fn(t):
with RedirectStdout(_logfile):
res = minpack.bisection(f, x0[0], x0[1], args=(t,)+extrafargs,
xtol=xtolval, maxiter=maxnumiter)
return res
def steffe_fn(t):
with RedirectStdout(_logfile):
res = minpack.fixed_point(f, x0, args=(t,)+extrafargs,
xtol=xtolval, maxiter=maxnumiter)
return res
def fsolve_fn(t):
with RedirectStdout(_logfile):
res = minpack.fsolve(f, x0, args=(t,)+extrafargs,
xtol=xtolval, maxfev=maxnumiter,
fprime=fprime)
return res
else:
def newton_fn(s, t):
with RedirectStdout(_logfile):
res = float(newton_meth(f, x0, args=(t,)+extrafargs, tol=xtolval,
maxiter=maxnumiter, fprime=fprime))
return res
def bisect_fn(s, t):
with RedirectStdout(_logfile):
res = minpack.bisection(f, x0[0], x0[1], args=(t,)+extrafargs,
xtol=xtolval, maxiter=maxnumiter)
return res
def steffe_fn(s, t):
with RedirectStdout(_logfile):
res = minpack.fixed_point(f, x0, args=(t,)+extrafargs,
xtol=xtolval, maxiter=maxnumiter)
return res
def fsolve_fn(s, t):
with RedirectStdout(_logfile):
res = minpack.fsolve(f, x0, args=(t,)+extrafargs,
xtol=xtolval, maxfev=maxnumiter,
fprime=fprime)
return res
except TypeError as e:
if solmethod == 'bisect':
infostr = " (did you specify a pair for x0?)"
else:
infostr = ""
raise TypeError("Could not create function" +infostr + ": "+str(e))
if solmethod == 'newton':
return newton_fn
elif solmethod == 'bisect':
if fprime is not None:
print("Warning: fprime argument unused for bisection method")
return bisect_fn
elif solmethod == 'steffe':
if fprime is not None:
print("Warning: fprime argument unused for aitken method")
return steffe_fn
elif solmethod == 'fsolve':
return fsolve_fn
else:
raise ValueError("Unrecognized type of implicit function solver")
def findClosestPointIndex(pt, target, tol=Inf, in_order=True):
"""
Find index of the closest N-dimensional Point in the target N by M array
or Pointset. Uses norm of order given by the Point
or Pointset, unless they are inconsistent, in which case an exception is
raised, or unless they are both arrays, in which case 2-norm is assumed.
With the in_order boolean option (default True), the function will
attempt to determine the local "direction" of the values and return an
insertion index that will preserve this ordering. This option is
incompatible with the tol option (see below).
If the optional tolerance, tol, is given, then an index is returned only
if the closest distance is within the tolerance. Otherwise, a ValueError
is raised. This option is incompatible with the in_order option.
"""
try:
normord = pt._normord
except AttributeError:
normord = 2
try:
if target._normord != normord:
raise ValueError("Incompatible order of norm defined for inputs")
except AttributeError:
pass
dists = [norm(pt-x, normord) for x in target]
index = argmin(dists)
if in_order:
if index > 0:
lo_off = 1
# insertion offset index
ins_off = 1
if index < len(target):
hi_off = 1
else:
hi_off = 0
else:
lo_off = 0
hi_off = 2
# insertion offset index
ins_off = 0
pta = array([pt]) # extra [] to get compatible shape for concat
dim_range = list(range(target.shape[1]))
# neighborhood
nhood = target[index-lo_off:index+hi_off]
if all(ismonotonic(nhood[:,d]) for d in dim_range):
# try inserting at index, otherwise at index+1
new_nhood = concatenate((nhood[:ins_off], pta, nhood[ins_off:]))
if not all(ismonotonic(new_nhood[:,d]) for d in dim_range):
ins_off += 1
index += 1
new_nhood = concatenate((nhood[:ins_off], pta, nhood[ins_off:]))
if not all(ismonotonic(new_nhood[:,d]) for d in dim_range):
raise ValueError("Cannot add point in order, try deactivating the in_order option")
if in_order:
return index
else:
if dists[index] < tol:
return index
else:
raise ValueError("No index found within distance tolerance")
def findClosestArray(input_array, target_array, tol):
"""
Find the set of elements in (1D) input_array that are closest to
elements in target_array. Record the indices of the elements in
target_array that are within tolerance, tol, of their closest
match. Also record the indices of the elements in target_array
that are outside tolerance, tol, of their match.
For example, given an array of observations with irregular
observation times along with an array of times of interest, this
routine can be used to find those observations that are closest to
the times of interest that are within a given time tolerance.
NOTE: input_array must be sorted! The array, target_array, does not have to be sorted.
Inputs:
input_array: a sorted float64 array
target_array: a float64 array
tol: a tolerance
Returns:
closest_indices: the array of indices of elements in input_array that are closest to elements in target_array
Author: Gerry Wiener, 2004
Version 1.0
"""
# NOT RETURNED IN THIS VERSION:
# accept_indices: the indices of elements in target_array that have a match in input_array within tolerance
# reject_indices: the indices of elements in target_array that do not have a match in input_array within tolerance
input_array_len = len(input_array)
closest_indices = searchsorted(input_array, target_array) # determine the locations of target_array in input_array
# acc_rej_indices = [-1] * len(target_array)
curr_tol = [tol] * len(target_array)
est_tol = 0.0
for i in range(len(target_array)):
best_off = 0 # used to adjust closest_indices[i] for best approximating element in input_array
if closest_indices[i] >= input_array_len:
# the value target_array[i] is >= all elements in input_array so check whether it is within tolerance of the last element
closest_indices[i] = input_array_len - 1
est_tol = target_array[i] - input_array[closest_indices[i]]
if est_tol < curr_tol[i]:
curr_tol[i] = est_tol
# acc_rej_indices[i] = i
elif target_array[i] == input_array[closest_indices[i]]:
# target_array[i] is in input_array
est_tol = 0.0
curr_tol[i] = 0.0
# acc_rej_indices[i] = i
elif closest_indices[i] == 0:
# target_array[i] is <= all elements in input_array
est_tol = input_array[0] - target_array[i]
if est_tol < curr_tol[i]:
curr_tol[i] = est_tol
# acc_rej_indices[i] = i
else:
# target_array[i] is between input_array[closest_indices[i]-1] and input_array[closest_indices[i]]
# and closest_indices[i] must be > 0
top_tol = input_array[closest_indices[i]] - target_array[i]
bot_tol = target_array[i] - input_array[closest_indices[i]-1]
if bot_tol <= top_tol:
est_tol = bot_tol
best_off = -1 # this is the only place where best_off != 0
else:
est_tol = top_tol
if est_tol < curr_tol[i]:
curr_tol[i] = est_tol
# acc_rej_indices[i] = i
if est_tol <= tol:
closest_indices[i] += best_off
# accept_indices = compress(greater(acc_rej_indices, -1),
# acc_rej_indices)
# reject_indices = compress(equal(acc_rej_indices, -1),
# arange(len(acc_rej_indices)))
return closest_indices #, accept_indices, reject_indices)
def find(x, v, next_largest=1, indices=None):
"""Returns the index into the 1D array x corresponding to the
element of x that is either equal to v or the nearest to
v. x is assumed to contain unique elements.
if v is outside the range of values in x then the index of the
smallest or largest element of x is returned.
If next_largest == 1 then the nearest element taken is the next
largest, otherwise if next_largest == 0 then the next smallest
is taken.
The optional argument indices speeds up multiple calls to this
function if you have pre-calculated indices=argsort(x).
"""
if indices is None:
indices=argsort(x)
xs=take(x, indices, axis=0)
assert next_largest in [0,1], "next_largest must be 0 or 1"
eqmask=(xs==v).tolist()
try:
ix = eqmask.index(1)
except ValueError:
if next_largest:
mask=(xs<v).tolist()
else:
mask=(xs>v).tolist()
try:
ix=min([max([0,mask.index(1-next_largest)+next_largest-1]),len(mask)-1])
except ValueError:
ix = 0+next_largest-1
return indices[ix]
def orderEventData(edict, evnames=None, nonames=False, bytime=False):
"""Time-order event data dictionary items.
Returns time-ordered list of (eventname, time) tuples.
If 'evnames' argument included, this restricts output to only the named
events.
The 'nonames' flag (default False) forces routine to return only the event
times, with no associated event names.
The 'bytime' flag (default False) only works with nonames=False and returns
the list in (time, eventname) order.
"""
if evnames is None:
evnames = list(edict.keys())
else:
assert remain(evnames, edict.keys()) == [], "Invalid event names passed"
# put times as first tuple entry of etuplelist
if nonames:
alltlist = []
for (evname,tlist) in edict.items():
if evname in evnames:
alltlist.extend(tlist)
alltlist.sort()
return alltlist
else:
etuplelist = []
for (evname,tlist) in edict.items():
if evname in evnames:
etuplelist.extend([(t,evname) for t in tlist])
# sort by times
etuplelist.sort()
if bytime:
return etuplelist
else:
# swap back to get event names as first tuple entry
return [(evname,t) for (t,evname) in etuplelist]
## ------------------------------------------------------------
## Generator wrapping utilities
def make_RHS_wrap(gen, xdict_base, x0_names, use_gen_params=False, overflow_penalty=1e4):
"""Return function wrapping Generator argument gen's RHS function,
but restricting input and output dimensions to those specified by
x0_names. All other variable values will be given by those in xdict_base.
In case of overflow or ValueError during a call to the wrapped function,
an overflow penalty will be used for the returned values (default 1e4).
if use_gen_params flag is set (default False)
then:
Return function has signature Rhs_wrap(x,t)
and takes an array or list of x state variable values and scalar t,
returning an array type of length len(x). The Generator's current param
values (at call time) will be used.
else:
Return function has signature Rhs_wrap(x,t,pdict)
and takes an array or list of x state variable values, scalar t, and a
dictionary of parameters for the Generator, returning an array type of
length len(x).
NB: xdict_base will be copied as it will be updated in the wrapped
function."""
var_ix_map = invertMap(gen.funcspec.vars)
x0_names.sort() # ensures sorted
x0_ixs = [var_ix_map[xname] for xname in x0_names]
dim = len(x0_names)
xdict = xdict_base.copy()
if use_gen_params:
def Rhs_wrap(x, t):
xdict.update(dict(zip(x0_names, x)))
try:
return take(gen.Rhs(t, xdict, gen.pars), x0_ixs)
except (OverflowError, ValueError):
return array([overflow_penalty]*dim)
else:
def Rhs_wrap(x, t, pdict):
xdict.update(dict(zip(x0_names, x)))
try:
return take(gen.Rhs(t, xdict, pdict), x0_ixs)
except (OverflowError, ValueError):
return array([overflow_penalty]*dim)
return Rhs_wrap
def make_Jac_wrap(gen, xdict_base, x0_names, use_gen_params=False, overflow_penalty=1e4):
"""Return function wrapping Generator argument gen's Jacobian function,
but restricting input and output dimensions to those specified by
x0_names. All other variable values will be given by those in xdict_base.
In case of overflow or ValueError during a call to the wrapped function,
an overflow penalty will be used for the returned values (default 1e4).
if use_gen_params flag is set (default False)
then:
Return function Jac_wrap(x,t) takes an array or list of x variable
values and scalar t, returning a 2D array type of size len(x) by len(x).
The Generator's current param values (at call time) will be used.
else:
Return function Jac_wrap(x,t,pdict) takes an array or list of x variable
values, scalar t, and a dictionary of parameters for the Generator,
returning a 2D array type of size len(x) by len(x).
NB: xdict_base will be copied as it will be updated in the wrapped
function."""
if not gen.haveJacobian():
raise ValueError("Jacobian not defined")
var_ix_map = invertMap(gen.funcspec.vars)
x0_names.sort() # ensures sorted
x0_ixs = [var_ix_map[xname] for xname in x0_names]
dim = len(x0_names)
xdict = xdict_base.copy()
if use_gen_params:
def Jac_wrap(x, t):
xdict.update(dict(zip(x0_names, x)))
try:
return take(take(gen.Jacobian(t, xdict, gen.pars), x0_ixs,0), x0_ixs,1)
except (OverflowError, ValueError):
return array([overflow_penalty]*dim)
else:
def Jac_wrap(x, t, pdict):
xdict.update(dict(zip(x0_names, x)))
try:
return take(take(gen.Jacobian(t, xdict, pdict), x0_ixs,0), x0_ixs,1)
except (OverflowError, ValueError):
return array([overflow_penalty]*dim)
return Jac_wrap
## ------------------------------------------------------------
# User-interaction utilities
def progressBar(i, total, width=50):
"""Print an increasing number of dashes up to given width, reflecting
i / total fraction of progress. Prints and refreshes on one line.
"""
percent = float(i)/total
dots = int(percent*width)
progress = str('[').ljust(dots+1, '-')
sys.stdout.write('\r'+progress.ljust(width, ' ')+str('] %.2f%%' % (percent*100.)))
sys.stdout.flush()
## ------------------------------------------------------------
def saveObjects(objlist, filename, force=False):
"""Store PyDSTool objects to file. Argument should be a tuple or list,
but if a singleton non-sequence object X is given then it will be
saved as a list [ X ].
Some PyDSTool objects will not save using this function, and will complain
about attributes that do not have definitions in __main__.
"""
# passing protocol = -1 to pickle means it uses highest available
# protocol (e.g. binary format)
if not force:
if os.path.isfile(filename):
raise ValueError("File '" + filename + "' already exists")
pklfile = open(filename, 'wb')
opt = 0
if not isinstance(objlist, list):
objlist=[objlist]
for obj in objlist:
try:
pickle.dump(obj, pklfile, opt)
except:
if hasattr(obj, 'name'):
print("Failed to save '%s'"%obj.name)
else:
print("Failed to save object '%s'"%str(obj))
raise
pklfile.close()
def loadObjects(filename, namelist=None):
"""Retrieve PyDSTool objects from file. Returns list of objects
unless namelist option is given as a singleton string name.
Also, if only one object X was stored, it will be returned as [X],
and thus you will have to index the returned list with 0 to get X itself.
Optional namelist argument selects objects to return by name,
provided that the objects have name fields (otherwise they are ignored).
If namelist is a single string name then a single object is returned.
"""
# Since names are not intended to be unique in PyDSTool, the while
# loop always goes to the end of the file, and pulls out *all*
# occurrences of the names.
if not os.path.isfile(filename):
raise ValueError("File '" + filename + "' not found")
if namelist is None:
namelist = []
was_singleton_name = isinstance(namelist, six.string_types)
if not isinstance(namelist, list):
if was_singleton_name:
namelist = [copy.copy(namelist)]
else:
raise TypeError("namelist must be list of strings or singleton string")
if not isUniqueSeq(namelist):
raise ValueError("Names must only appear once in namelist argument")
pklfile = open(filename, 'rb')
if namelist == []:
getall = True
else:
getall = False
objlist = []
notDone = True
while notDone:
try:
if getall:
objlist.append(pickle.load(pklfile))
else:
tempobj = pickle.load(pklfile)
if hasattr(tempobj, 'name'):
if tempobj.name in namelist:
objlist.append(tempobj)
except EOFError:
notDone = False
except:
print("Error in un-pickling %s:"%filename)
print("Was the object created with an old version of PyDSTool?")
pklfile.close()
raise
pklfile.close()
if objlist == []:
if getall:
print("No objects found in file")
else:
print("No named objects found in file")
if was_singleton_name:
return objlist[0]
else:
return objlist
def intersect(a, b):
"""Find intersection of two lists, sequences, etc.
Returns a list that includes repetitions if they occur in the inputs."""
return [e for e in a if e in b]
def union(a, b):
"""Find union of two lists, sequences, etc.
Returns a list that includes repetitions if they occur in the input lists.
"""
return list(a)+list(b)
def remain(a, b):
"""Find remainder of two lists, sequences, etc., after intersection.
Returns a list that includes repetitions if they occur in the inputs."""
return [e for e in a if e not in b]
def compareList(a, b):
"""Compare elements of lists, ignoring order (like sets)."""
return len(intersect(a,b))==len(a)==len(b)
def cartesianProduct(a, b):
"""Returns the cartesian product of the sequences."""
ret = []
for i in a:
ret.extend([(i, j) for j in b])
return ret
def arclength(pts):
"""
Return array of L2 arclength progress along parameterized pointset
in all the dimensions of the pointset
"""
x0 = pts[0]
arclength = np.zeros(len(pts))
for i, x in enumerate(pts[1:]):
arclength[i+1] = np.linalg.norm(x - pts[i]) + arclength[i]
return arclength
# ------------------------
def distutil_destination():
"""Internal utility that makes the goofy destination directory string so that PyDSTool
can find where the distutils fortran/gcc compilers put things.
If your temp directory turns out to be different to the one created here, contact us
on sourceforge.net, but in the meantime you can override destdir with whatever directory
name you find that is being used.
"""
import scipy
osname = str.lower(platform.system())
pyname = platform.python_version_tuple()
machinename = platform.machine()
if osname == 'linux':
destdir = 'src.'+osname+'-'+machinename+'-'+pyname[0] + '.' + pyname[1]
elif osname in ['darwin', 'freebsd']:
# use the same version string as numpy.distutils.core.setup used by ContClass.CompileAutoLib
osver = get_platform()
destdir = 'src.' + osver + '-' +pyname[0] + '.' + pyname[1]
elif osname == 'windows':
destdir = 'src.win32-'+pyname[0]+'.'+pyname[1]
else:
destdir = ''
# TEMP for debugging
#import os
#os.system('echo %s > temp_dist.txt' % (os.path.abspath('.') + " : " + destdir))
return destdir
def architecture():
"""
Platform- and version-independent function to determine 32- or 64-bit architecture.
Used primarily to determine need for "-m32" option to C compilers for external library
compilation, e.g. by AUTO, Dopri, Radau.
Returns integer 32 or 64.
"""
import struct
return struct.calcsize("P") * 8
def extra_arch_arg(arglist):
"""
Adds '-m32' flag to existing list of extra compiler/linker flags passed
as argument, based on whether architecture is detected as 32 bit. Otherwise,
it performs the identity function.
"""
if architecture() == 32:
return arglist + ['-m32']
else:
return arglist
def get_lib_extension():
return misc_util.get_shared_lib_extension()
|
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from theblog.views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'Something cool for the web')
class FunctionalTests(unittest.TestCase):
def setUp(self):
from theblog import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertTrue(b'Pyramid' in res.body)
|
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import sys
from collections import defaultdict
import pandas as pd
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import BertTokenizer, BertForSequenceClassification
from utils import convert_examples_new, convert_dataset_to_features
from dataset import HumorDetectionDataset
from model import HumorDetectionModel
from sklearn.metrics import f1_score, precision_score, recall_score
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class DumbProcessorClean(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_clean.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_clean.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_clean.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class DumbProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_wordnet_amb.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_wordnet_amb.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def load_and_cache_examples(args, tokenizer, ambiguity_fn, task_name):
'''
Loads in a cached file for training and/or builds a cached file for this data
:return:
'''
processors = {
"old": ColaProcessor,
"new_clean": DumbProcessorClean
}
# Build the dataset
task = 'test'
logger.info("Creating features from dataset file at %s", args.data_dir)
if args.old_load:
logger.info('using old data features')
processor = processors[task_name]()
label_list = processor.get_labels()
if args.data_name == 'rJokes':
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_dev_examples(args.data_dir)
features = convert_examples_new(examples, label_list, args.max_seq_length, tokenizer)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_masks = torch.tensor([f.input_mask for f in features], dtype=torch.long)
token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
labels = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_masks, token_type_ids, labels)
else:
logger.info("creating features from new dataset")
dataset = HumorDetectionDataset(args.data_dir, args.max_seq_length, task, ambiguity_fn,
use_clean_data=("clean" in task_name))
features = convert_dataset_to_features(dataset, args.max_seq_length, tokenizer)
# convert features to tensor dataset
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_masks = torch.tensor([f.input_mask for f in features], dtype=torch.long)
token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
ambiguity_scores = torch.tensor([f.ambiguity for f in features], dtype=torch.long)
labels = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_masks, token_type_ids, labels, ambiguity_scores)
logger.info("Features Built.")
return dataset
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def get_metrics(logits, labels):
# import pdb;pdb.set_trace()
outputs = np.argmax(logits, axis=1)
f1 = f1_score(labels, outputs)
prec = precision_score(labels, outputs)
recall = recall_score(labels, outputs)
return f1, prec, recall
def evaluate(args, model, tokenizer, ambiguity_fn, task_name):
eval_data = load_and_cache_examples(args, tokenizer, ambiguity_fn, task_name)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_data))
logger.info(" Batch size = %d", args.eval_batch_size)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
full_logits = None
full_labels = None
printed_first = False
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'token_type_ids': batch[2],
'attention_mask': batch[1],
'labels': batch[3]}
if not printed_first:
for i in range(3):
print("Tokens: ", tokenizer.convert_ids_to_tokens(inputs["input_ids"][i]))
print("Token type ids: ", inputs["token_type_ids"][i])
print("Attn mask: ", inputs["attention_mask"][i])
print("Label: ", inputs["labels"][i])
printed_first = True
if not args.bert_base:
inputs['ambiguity_scores'] = batch[4]
with torch.no_grad():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].to('cpu').numpy()
# combine the labels for F1 scores
if full_labels is None:
full_labels = label_ids
else:
full_labels = np.append(full_labels, label_ids, axis=0)
if full_logits is None:
full_logits = logits
else:
full_logits = np.append(full_logits, logits, axis=0)
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += inputs['input_ids'].size(0)
nb_eval_steps += 1
eval_f1, eval_precision, eval_recall = get_metrics(full_logits, full_labels)
full_accuracy = accuracy(full_logits, full_labels)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
results = {
'acc' : eval_accuracy,
'precision' : eval_precision,
'recall' : eval_recall,
'f1' : eval_f1,
'loss' : eval_loss
}
return results
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
# Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=16,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--old_load', action='store_true')
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--overwrite_cache", action='store_true')
parser.add_argument('--bert_base', action='store_true', default=False,
help='loads in bert-base instead of our custom model.')
parser.add_argument('--model_weights', required=True, help="Path to model weights, if loading a saved model. "
"If you wish to evaluate multiple models, separate with commas (no spaces). "
"Models must differ ONLY in random seed and/or ambiguity_fn.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
ambiguity_fn = "none"
if "_csi_" in args.model_weights:
ambiguity_fn = "csi"
elif "_wn_" in args.model_weights:
ambiguity_fn = "wn"
elif "_tf-idf_" in args.model_weights:
ambiguity_fn = "tf-idf"
if args.bert_base:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=2).to(args.device)
else:
use_ambiguity = ambiguity_fn != "none"
model = HumorDetectionModel(rnn_size=768, use_ambiguity=use_ambiguity).to(args.device)
# Loop through 3 Test sets
out_class = None
task_name = 'new_clean'
datasets = ['rJokes']#, 'puns']#, 'short_jokes']
base_dir = args.data_dir
output = []
for data_dir in datasets:
if data_dir == 'rJokes':
args.data_dir = base_dir
task_name = 'new_clean'
else:
args.data_dir = os.path.join(base_dir, data_dir)
task_name = 'old'
args.data_name = data_dir
set_results = defaultdict(float)
logger.info('****** Evaluating on {}'.format(data_dir))
seeds = args.model_weights.split(",")
for weights_path in seeds:
state_dict = torch.load(weights_path)
model.load_state_dict(state_dict)
print(f"Evaluating model: {weights_path}")
results = evaluate(args, model, tokenizer, ambiguity_fn, task_name)
out_class = weights_path
# update rolling
for metric, vals in results.items():
set_results[metric] += vals
# average
logger.info('***** Averaged Results for {}'.format(data_dir))
for metric, vals in set_results.items():
set_results[metric] = vals / len(seeds)
logger.info('***** {}: {}'.format(metric, set_results[metric]))
output.append([data_dir, set_results['acc'], set_results['precision'],
set_results['recall'], set_results['f1'], set_results['loss']])
# Write output to file
save_dir = 'test_results'
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
#table = pd.DataFrame(output, columns=['name', 'acc', 'precision', 'recall', 'f1', 'loss']).set_index('name')
#out_file = 'test_results_{}'.format(out_class[:-2])
#table.to_csv(os.path.join(save_dir, out_file))
return
if __name__ == "__main__":
main()
|
import keras
from keras.models import Sequential
from keras.models import Model
from keras.layers import Input, Dense, Activation, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from config_reader import config_reader
import scipy
import math
import cv2
import matplotlib
import pylab as plt
import numpy as np
import util
import time
from scipy.ndimage.filters import gaussian_filter
import os
import pickle
ROOTDIR = "./Img_minibatch"
def relu(x):
return Activation('relu')(x)
def conv(x, nf, ks, name):
x1 = Conv2D(nf, (ks, ks), padding='same', name=name)(x)
return x1
def pooling(x, ks, st, name):
x = MaxPooling2D((ks, ks), strides=(st, st), name=name)(x)
return x
def vgg_block(x):
# Block 1
x = conv(x, 64, 3, "conv1_1")
x = relu(x)
x = conv(x, 64, 3, "conv1_2")
x = relu(x)
x = pooling(x, 2, 2, "pool1_1")
# Block 2
x = conv(x, 128, 3, "conv2_1")
x = relu(x)
x = conv(x, 128, 3, "conv2_2")
x = relu(x)
x = pooling(x, 2, 2, "pool2_1")
# Block 3
x = conv(x, 256, 3, "conv3_1")
x = relu(x)
x = conv(x, 256, 3, "conv3_2")
x = relu(x)
x = conv(x, 256, 3, "conv3_3")
x = relu(x)
x = conv(x, 256, 3, "conv3_4")
x = relu(x)
x = pooling(x, 2, 2, "pool3_1")
# Block 4
x = conv(x, 512, 3, "conv4_1")
x = relu(x)
x = conv(x, 512, 3, "conv4_2")
x = relu(x)
# Additional non vgg layers
x = conv(x, 256, 3, "conv4_3_CPM")
x = relu(x)
x = conv(x, 128, 3, "conv4_4_CPM")
x = relu(x)
return x
def stage1_block(x, num_p, branch):
# Block 1
x = conv(x, 128, 3, "conv5_1_CPM_L%d" % branch)
x = relu(x)
x = conv(x, 128, 3, "conv5_2_CPM_L%d" % branch)
x = relu(x)
x = conv(x, 128, 3, "conv5_3_CPM_L%d" % branch)
x = relu(x)
x = conv(x, 512, 1, "conv5_4_CPM_L%d" % branch)
x = relu(x)
x = conv(x, num_p, 1, "conv5_5_CPM_L%d" % branch)
return x
def stageT_block(x, num_p, stage, branch):
# Block 1
x = conv(x, 128, 7, "Mconv1_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, 128, 7, "Mconv2_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, 128, 7, "Mconv3_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, 128, 7, "Mconv4_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, 128, 7, "Mconv5_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, 128, 1, "Mconv6_stage%d_L%d" % (stage, branch))
x = relu(x)
x = conv(x, num_p, 1, "Mconv7_stage%d_L%d" % (stage, branch))
return x
# this function takes the images already read by cv2.
def get_keypoints(oriImg, model):
# heatmap
# oriImg = cv2.imread(img_path) # B,G,R order
multiplier = [x * 368 / oriImg.shape[0] for x in (0.5, 1, 1.5, 2)]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
scale = multiplier[0]
imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, 8, 128)
input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)
output_blobs = model.predict(input_img)
heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0,0), fx=8, fy=8, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg = heatmap_avg + heatmap #/ len(multiplier)
# all_peaks --- keypoints
all_peaks = []
peak_counter = 0
for part in range(19-1):
map_ori = heatmap_avg[:,:,part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:,:] = map[:-1,:]
map_right = np.zeros(map.shape)
map_right[:-1,:] = map[1:,:]
map_up = np.zeros(map.shape)
map_up[:,1:] = map[:,:-1]
map_down = np.zeros(map.shape)
map_down[:,:-1] = map[:,1:]
peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > 0.1))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
if __name__ == "__main__":
# set up model
weights_path = "./keras_Realtime_Multi-Person_Pose_Estimation/model/keras/model.h5"
input_shape = (None,None,3)
img_input = Input(shape=input_shape)
stages = 6
np_branch1 = 38
np_branch2 = 19
img_normalized = Lambda(lambda x: x / 256 - 0.5)(img_input) # [-0.5, 0.5]
# VGG
stage0_out = vgg_block(img_normalized)
# stage 1
stage1_branch1_out = stage1_block(stage0_out, np_branch1, 1)
stage1_branch2_out = stage1_block(stage0_out, np_branch2, 2)
x = Concatenate()([stage1_branch1_out, stage1_branch2_out, stage0_out])
# stage t >= 2
for sn in range(2, stages + 1):
stageT_branch1_out = stageT_block(x, np_branch1, sn, 1)
stageT_branch2_out = stageT_block(x, np_branch2, sn, 2)
if (sn < stages):
x = Concatenate()([stageT_branch1_out, stageT_branch2_out, stage0_out])
model = Model(img_input, [stageT_branch1_out, stageT_branch2_out])
model.load_weights(weights_path)
img_path = './keras_Realtime_Multi-Person_Pose_Estimation/sample_images/deepfashion.jpg'
start_time = time.time()
for root, dirs, files in os.walk(ROOTDIR, topdown=True):
# same directory
# code2index = {} # code is 01/02/03 etc. Index is 0 through 50000
for file in files:
fulldir = root + '/' + file
if not "flat" in file:
img = cv2.imread(fulldir)
if img is not None:
keypoints = get_keypoints(img, model)
with open (fulldir+'keypoints','wb') as file:
pickle.dump(keypoints, file)
|
from generallibrary import match, replace, deco_cache
from urllib.parse import quote
class Path_Strings:
""" String operations for Path. """
def __getitem__(self, item):
""" Get character from path string.
:param generalfile.Path self: """
return self.Path(self.path.__getitem__(item))
@deco_cache()
def to_alternative(self):
""" Get path using alternative delimiter and alternative root for windows.
:param generalfile.Path self:
:rtype: generalfile.Path """
return self.Path(replace(string=self.path, **self._alternative_chars))
@deco_cache()
def from_alternative(self):
""" Get path from an alternative representation with or without leading lock dir.
:param generalfile.Path self:
:rtype: generalfile.Path """
path = str(self.remove_start(self.get_lock_dir()))
return self.Path(replace(string=path, reverse=True, **self._alternative_chars))
def absolute(self):
""" Get new Path as absolute.
:param generalfile.Path self:
:rtype: generalfile.Path """
if self.is_absolute():
return self
else:
return self.get_working_dir() / self
def relative(self, base=None):
""" Get new Path as relative, uses working dir if base is None.
Returns self if not inside base.
:param generalfile.Path self:
:param base: Defaults to working dir. """
if self.is_relative() and (base is None or not self.startswith(base)):
return self
else:
if base is None:
base = self.get_working_dir()
try:
return self.Path() if self == base else self.Path(self._path.relative_to(base))
except ValueError:
return None
@deco_cache()
def is_absolute(self):
""" Get whether this Path is absolute.
:param generalfile.Path self: """
return self._path.is_absolute()
@deco_cache()
def is_relative(self):
""" Get whether this Path is relative.
:param generalfile.Path self: """
return not self.is_absolute()
@deco_cache()
def mirror_path(self):
""" Return mirror Path which currently points to same destination based on working dir.
Absolute Path returns relative Path and vice versa.
:param generalfile.Path self:
:rtype: generalfile.Path """
if self.is_absolute():
return self.relative()
else:
return self.absolute()
@deco_cache()
def startswith(self, path):
""" Get whether this Path starts with given string.
:param generalfile.Path self:
:param str or Path path:"""
path = self.Path(path)
return self.path.startswith(str(path))
@deco_cache()
def endswith(self, path):
""" Get whether this Path ends with given string.
:param generalfile.Path self:
:param str or Path path:"""
path = self.Path(path)
return self.path.endswith(str(path))
@deco_cache()
def remove_start(self, path):
""" Remove a string from the start of this Path if it exists.
:param generalfile.Path self:
:param str or Path path:"""
path = self.Path(path)
str_path = str(path)
if not self.startswith(str_path):
return self
else:
new_path = self.Path(self.path[len(str_path):])
if str(new_path).startswith(path.path_delimiter):
return new_path[1:]
else:
return new_path
@deco_cache()
def remove_end(self, path):
""" Remove a string from the end of this Path if it exists.
:param generalfile.Path self:
:param str or Path path:"""
path = self.Path(path)
str_path = str(path)
if not self.endswith(str_path):
return self
else:
new_path = self.Path(self.path[:-len(str_path)])
if str(new_path).endswith(path.path_delimiter):
return new_path[:-1]
else:
return new_path
def same_destination(self, path):
""" See if two paths point to the same destination.
:param generalfile.Path self:
:param str or Path path:"""
path = self.Path(path)
return self.absolute() == path.absolute()
@deco_cache()
def parts(self):
""" Split path using it's delimiter.
With an absolute path the first index is an empty string on a posix system. <- Not sure about that anymore, might be /
:param generalfile.Path self: """
return self.path.split(self.path_delimiter)
@deco_cache()
def name(self):
""" Get string name of Path which is stem + suffix, or entire path if root.
:param generalfile.Path self: """
return self.path if self.is_root() else self._path.name
@deco_cache()
def with_name(self, name):
""" Get a new Path with new name which is stem + suffix.
:param name: Name.
:param generalfile.Path self:
:rtype: generalfile.Path """
return self.Path(self._path.with_name(str(name)))
@deco_cache()
def stem(self):
""" Get stem which is name without last suffix.
:param generalfile.Path self: """
return self._path.stem
@deco_cache()
def with_stem(self, stem):
""" Get a new Path with new stem which is name without last suffix.
:param stem: New stem.
:param generalfile.Path self:
:rtype: generalfile.Path """
return self.Path(self.with_name(f"{stem}{self.suffix()}"))
@deco_cache()
def true_stem(self):
""" Get true stem which is name without any suffixes.
:param generalfile.Path self: """
return self._path.stem.split(".")[0]
@deco_cache()
def with_true_stem(self, true_stem):
""" Get a new Path with new stem which is name without any suffixes.
:param true_stem: New true stem.
:param generalfile.Path self:
:rtype: generalfile.Path """
return self.Path(self.with_name(f"{true_stem}{''.join(self.suffixes())}"))
@deco_cache()
def suffix(self):
""" Get suffix which is name without stem.
Empty string if missing.
:param generalfile.Path self: """
return self._path.suffix
@deco_cache()
def with_suffix(self, suffix, index=-1):
""" Get a new Path with a new suffix at any index.
Index is automatically clamped if it's outside index range.
Set suffix to `None` to remove a suffix.
:param generalfile.Path self:
:param suffix: New suffix, can be `None`.
:param index: Suffix index to alter.
:rtype: generalfile.Path """
suffixes = self.suffixes().copy()
try:
suffixes[index]
except IndexError:
if index >= len(suffixes):
if not suffix:
if suffixes:
del suffixes[-1]
else:
suffixes.append(suffix)
else:
if not suffix:
if suffixes:
del suffixes[0]
else:
suffixes.insert(0, suffix)
else:
if not suffix:
del suffixes[index]
else:
suffixes[index] = suffix
return self.with_name(f"{self.true_stem()}{''.join(suffixes)}")
@deco_cache()
def suffixes(self):
""" Get every suffix as a list.
:param generalfile.Path self: """
return self._path.suffixes
@deco_cache()
def with_suffixes(self, *suffixes):
""" Get a new Path with a new list of suffixes.
:param str suffixes: New suffixes
:param generalfile.Path self:
:rtype: generalfile.Path """
return self.Path(self.with_name(f"{self.true_stem()}{''.join(suffixes)}"))
@deco_cache()
def match(self, *patterns):
""" Get whether this Path matches any given filter line.
:param generalfile.Path self: """
return match(self.path, *map(self._replace_delimiters, patterns))
@deco_cache()
def encode(self):
""" Return a URL encoded string from this Path.
:param generalfile.Path self: """
url = self.path.replace("\\", "/")
return quote(url)
|
from google_trans_new import google_translator
from pyrogram import filters
from inspect import getfullargspec
from pyrogram.types import Message
from _pyrogram import app
from config import PREFIX
trl = google_translator()
async def edrep(msg: Message, **kwargs):
func = msg.edit_text if msg.from_user.is_self else msg.reply
spec = getfullargspec(func.__wrapped__).args
await func(**{k: v for k, v in kwargs.items() if k in spec})
@app.on_message(filters.command("tr", PREFIX) & filters.me)
async def translate(_client, message):
if message.reply_to_message and (
message.reply_to_message.text or message.reply_to_message.caption
):
if len(message.text.split()) == 1:
await edrep(message, text="Usage: Reply to a message, then `tr <lang>`")
return
target = message.text.split()[1]
if message.reply_to_message.text:
text = message.reply_to_message.text
else:
text = message.reply_to_message.caption
detectlang = trl.detect(text)
try:
tekstr = trl.translate(text, lang_tgt=target)
except ValueError as err:
await edrep(message, text=f"Error: `{str(err)}`")
return
else:
if len(message.text.split()) <= 2:
await edrep(message, text="Usage: `tr <lang> <text>`")
return
target = message.text.split(None, 2)[1]
text = message.text.split(None, 2)[2]
detectlang = trl.detect(text)
try:
tekstr = trl.translate(text, lang_tgt=target)
except ValueError as err:
await edrep(message, text="Error: `{}`".format(str(err)))
return
await edrep(
message,
text=f"Translated from `{detectlang[0]}` to `{target}`:\n```{tekstr}```",
)
|
#!/usr/bin/env python
"""Training on a single process."""
import os
import torch
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from onmt.model_builder import build_model
from onmt.utils.optimizers import Optimizer
from onmt.utils.misc import set_random_seed
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
from onmt.utils.parse import ArgumentParser
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def configure_process(opt, device_id):
if device_id >= 0:
torch.cuda.set_device(device_id)
set_random_seed(opt.seed, device_id >= 0)
def main(opt, device_id, batch_queue=None, semaphore=None):
# NOTE: It's important that ``opt`` has been validated and updated
# at this point.
configure_process(opt, device_id)
init_logger(opt.log_file)
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
if 'opt' in checkpoint:
model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
else:
model_opt = opt
if 'vocab' in checkpoint:
logger.info('Loading vocab from checkpoint at %s.', opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
else:
checkpoint = None
model_opt = opt
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# Report src and tgt vocab sizes, including for features
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(
opt, device_id, model, fields, optim, model_saver=model_saver)
if batch_queue is None:
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
else:
assert semaphore is not None, \
"Using batch_queue requires semaphore as well"
def _train_iter():
while True:
batch = batch_queue.get()
semaphore.release()
yield batch
train_iter = _train_iter()
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
train_steps = opt.train_steps
if opt.single_pass and train_steps > 0:
logger.warning("Option single_pass is enabled, ignoring train_steps.")
train_steps = 0
trainer.train(
train_iter,
train_steps,
save_checkpoint_steps=opt.save_checkpoint_steps,
valid_iter=valid_iter,
valid_steps=opt.valid_steps)
if trainer.report_manager.tensorboard_writer is not None:
trainer.report_manager.tensorboard_writer.close()
|
import os
import sys
from datetime import datetime
"""Parser of command args"""
import argparse
parse = argparse.ArgumentParser()
parse.add_argument("--type", type=str,choices=['origin', 'grist',], help="run initial file or grist file")
flags, unparsed = parse.parse_known_args(sys.argv[1:])
case_dict = {
"GH_IPS1":["scripts.study_case.GH_IPS1.ghips1"],
"GH_IPS1_mutant":["scripts.study_case.GH_IPS1_mutant.ghips1_mutant"],
"GH_IPS6":["scripts.study_case.GH_IPS6.command_line.run_logistic_regression"],
"GH_IPS9":["scripts.study_case.GH_IPS9.ghips9"],
"handon_tensorflow":["scripts.study_case.handon_tensorflow.code_10_image"],
"MNIST":["scripts.study_case.MNIST.v3.train"],
"python_tensorflow":["scripts.study_case.python_tensorflow.mnist"],
"SO_IPS1":["scripts.study_case.SO_IPS1.soips1"],
"SO_IPS2":["scripts.study_case.SO_IPS2.soips2"],
"SO_IPS6":["scripts.study_case.SO_IPS6.soips6"],
"SO_IPS7":["scripts.study_case.SO_IPS7.soips7"],
"SO_IPS14":["scripts.study_case.SO_IPS14.soips14"],
"tensorflow_examples":["scripts.study_case.tensorflow_examples.logistic_regression"],
"tensorflow_examples_tutorials_mnist":["scripts.study_case.tensorflow_examples_tutorials_mnist.mnist_softmax"],
"Tensorflow_gesture_Demo":["scripts.study_case.Tensorflow_gesture_Demo.Mnist"],
"tensorflow_in_ml":["scripts.study_case.tensorflow_in_ml.softmax"],
"tensorflow_mnist":["scripts.study_case.tensorflow_mnist.mnist"],
"TensorFuzz":["scripts.study_case.TensorFuzz.nan_model_exp",
"scripts.study_case.TensorFuzz.nan_model_truediv",
"scripts.study_case.TensorFuzz.nan_model_log"],
"tensorflow_value_iteration_networks_v1":["scripts.study_case.tensorflow_value_iteration_networks_v1.train"],
"generative_models_v1":["scripts.study_case.generative_models_v1.GAN.infogan.infogan_tensorflow"],
"generative_models_v2":["scripts.study_case.generative_models_v2.GAN.auxiliary_classifier_gan.ac_gan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.ali_bigan.ali_bigan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.boundary_seeking_gan.bgan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.coupled_gan.cogan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.disco_gan.discogan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.mode_regularized_gan.mode_reg_gan_tensorflow",
"scripts.study_case.generative_models_v2.GAN.vanilla_gan.gan_tensorflow"],
"tf_unet":["scripts.study_case.tf_unet.test_toy"],
"pytorch_playground":["scripts.study_case.pytorch_playground.pytorch_pg"],
"SC_DNN":["scripts.study_case.SC_DNN.sc_train_creg",
"scripts.study_case.SC_DNN.sc_train_creg_div2",
"scripts.study_case.SC_DNN.sc_train_l2reg",
"scripts.study_case.SC_DNN.sc_train_l2reg_div2"],
"skorch":["scripts.study_case.skorch.main"],
"RBM_grist":["scripts.study_case.RBM_grist.rbm"],
"pytorch_geometric_exp":["scripts.study_case.pytorch_geometric_exp.test.utils.test_softmax"],
"pytorch_geometric_fork":["scripts.study_case.pytorch_geometric_fork.test.nn.models.test_autoencoder"],
"Matchzoo":["scripts.study_case.MatchZoo_py.tests.test_losses"],
"MachineLearning":["scripts.study_case.MachineLearning.temp"],
"DeepLearningTest":["scripts.study_case.DeepLearning.deeplearningtest"],
"tensorflow_GAN_MNIST":["scripts.study_case.tensorflow_GAN_MNIST.GAN_MNIST"],
"gan_practice":["scripts.study_case.gan_practice.gan_mnist"],
"CS231":["scripts.study_case.CS231.assign3_acgan",
"scripts.study_case.CS231.assign3_acgan_log1",
"scripts.study_case.CS231.assign3_acgan_log2"],
"deep_learning_Nikolenko_and_Co":["scripts.study_case.deep_learning_Nikolenko_and_Co.ch10_04_01",
"scripts.study_case.deep_learning_Nikolenko_and_Co.ch10_04_03_Pic_10_05",
"scripts.study_case.deep_learning_Nikolenko_and_Co.ch10_04_04_Pic_10_06",
"scripts.study_case.deep_learning_Nikolenko_and_Co.ch10_04_05_Pic_10_07",
"scripts.study_case.deep_learning_Nikolenko_and_Co.ch10_04_06_Pic_10_08"],
"FuzzForTensorflow":["scripts.study_case.nan_model"],
"git1_rbm":["scripts.study_case.My_pytorch1"],
"gongdols":["scripts.study_case.denoising_RBM",
"scripts.study_case.main"],
"MNIST_DCGAN":["scripts.study_case.code",
"scripts.study_case.code_log1",
"scripts.study_case.code_log2"],
"softmax_gan":["scripts.study_case.softmax_gan.softmax_gan_tensorflow"],
}
cur_cnt = 1
run_cnt = 1
s1 = datetime.now()
special_dict = {"tf_unet":"tensorflow0120",
"pytorch_playground":"pytorch151",
"skorch": "pytorch151",
"RBM_grist": "pytorch151",
"pytorch_geometric_exp":"pytorch151",
"pytorch_geometric_fork":"pytorch151",
"Matchzoo":"pytorch151",}
for case_repo,commands in case_dict.items():
print(f"INFO: Executing {case_repo}. {cur_cnt} of {len(case_dict)}")
for cm in commands:
cm = cm+"_grist" if flags.type == "grist" else cm
if case_repo in special_dict.keys():
env_name = special_dict[case_repo]
else:
env_name = "tensorflow181"
python_command = f"/root/anaconda3/envs/{env_name}/bin/python -u -m {cm}"
status1 = os.system(python_command)
print(f"INFO: {cm}")
if status1 == 0:
print(f"INFO: Execution Run {cm} finished!")
else:
print(f"ERROR: Fail to run {cm}")
print(f"INFO: {case_repo} is Over!")
run_cnt += 1
cur_cnt += 1
print(f"Done! Total Time cost: {datetime.now() - s1}")
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.core import MatchConditions
from devtools_testutils import AzureTestCase
from azure.core.exceptions import (
ResourceModifiedError,
ResourceNotFoundError,
ResourceExistsError,
AzureError,
)
from azure.appconfiguration import (
ResourceReadOnlyError,
AzureAppConfigurationClient,
ConfigurationSetting,
SecretReferenceConfigurationSetting,
FeatureFlagConfigurationSetting,
PERCENTAGE,
TARGETING,
TIME_WINDOW,
)
from consts import (
KEY,
LABEL,
TEST_VALUE,
TEST_CONTENT_TYPE,
LABEL_RESERVED_CHARS,
PAGE_SIZE,
KEY_UUID,
)
import pytest
import copy
import datetime
import os
import logging
import re
import functools
from wrapper import app_config_decorator
class AppConfigurationClientTest(AzureTestCase):
def __init__(self, method_name):
super(AppConfigurationClientTest, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
def _delete_setting(self, client, item):
client.delete_configuration_setting(
key=item.key, label=item.label
)
def create_aad_client(self, base_url):
cred = self.get_credential(AzureAppConfigurationClient)
return AzureAppConfigurationClient(base_url, cred)
# method: add_configuration_setting
@app_config_decorator
def test_add_configuration_setting(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
kv = ConfigurationSetting(
key=KEY + "_ADD",
label=LABEL,
value=TEST_VALUE,
content_type=TEST_CONTENT_TYPE,
tags={"tag1": "tag1", "tag2": "tag2"},
)
created_kv = client.add_configuration_setting(kv)
self._delete_setting(client, created_kv)
assert (
created_kv.label == kv.label
and kv.value == kv.value
and created_kv.content_type == kv.content_type
and created_kv.tags == kv.tags
)
assert (
created_kv.etag is not None
and created_kv.last_modified is not None
and created_kv.read_only is False
)
@app_config_decorator
def test_add_existing_configuration_setting(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
with pytest.raises(ResourceExistsError):
client.add_configuration_setting(
ConfigurationSetting(
key=test_config_setting.key,
lable=test_config_setting.label,
)
)
# method: set_configuration_setting
@app_config_decorator
def test_set_existing_configuration_setting_label_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_set_kv = test_config_setting
to_set_kv.value = to_set_kv.value + "a"
to_set_kv.tags = {"a": "b", "c": "d"}
set_kv = client.set_configuration_setting(to_set_kv)
assert (
to_set_kv.key == set_kv.key
and to_set_kv.label == to_set_kv.label
and to_set_kv.value == set_kv.value
and to_set_kv.content_type == set_kv.content_type
and to_set_kv.tags == set_kv.tags
and to_set_kv.etag != set_kv.etag
)
@app_config_decorator
def test_set_existing_configuration_setting_label_wrong_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_set_kv = test_config_setting
to_set_kv.value = to_set_kv.value + "a"
to_set_kv.tags = {"a": "b", "c": "d"}
to_set_kv.etag = "wrong etag"
with pytest.raises(ResourceModifiedError):
client.set_configuration_setting(to_set_kv, match_condition=MatchConditions.IfNotModified)
@app_config_decorator
def test_set_configuration_setting_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
kv = ConfigurationSetting(
key=KEY + "_SET",
label=LABEL,
value=TEST_VALUE,
content_type=TEST_CONTENT_TYPE,
tags={"tag1": "tag1", "tag2": "tag2"},
)
kv.etag = "random etag"
with pytest.raises(ResourceModifiedError):
client.set_configuration_setting(kv, match_condition=MatchConditions.IfNotModified)
@app_config_decorator
def test_set_configuration_setting_no_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_set_kv = ConfigurationSetting(
key=KEY + "_SET",
label=LABEL,
value=TEST_VALUE,
content_type=TEST_CONTENT_TYPE,
tags={"tag1": "tag1", "tag2": "tag2"},
)
set_kv = client.set_configuration_setting(to_set_kv)
self._delete_setting(client, to_set_kv)
assert (
to_set_kv.key == set_kv.key
and to_set_kv.label == set_kv.label
and to_set_kv.value == set_kv.value
and to_set_kv.content_type == set_kv.content_type
and to_set_kv.tags == set_kv.tags
and to_set_kv.etag != set_kv.etag
)
# method: get_configuration_setting
@app_config_decorator
def test_get_configuration_setting_no_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
compare_kv = test_config_setting_no_label
fetched_kv = client.get_configuration_setting(compare_kv.key)
assert (
fetched_kv.key == compare_kv.key
and fetched_kv.value == compare_kv.value
and fetched_kv.content_type == compare_kv.content_type
and fetched_kv.tags == compare_kv.tags
)
assert fetched_kv.label is None
@app_config_decorator
def test_get_configuration_setting_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
compare_kv = test_config_setting
fetched_kv = client.get_configuration_setting(
compare_kv.key, compare_kv.label
)
assert (
fetched_kv.key == compare_kv.key
and fetched_kv.value == compare_kv.value
and fetched_kv.content_type == compare_kv.content_type
and fetched_kv.tags == compare_kv.tags
)
assert fetched_kv.label is not None
@app_config_decorator
def test_get_non_existing_configuration_setting(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
compare_kv = test_config_setting
with pytest.raises(ResourceNotFoundError):
client.get_configuration_setting(
compare_kv.key, compare_kv.label + "a"
)
# method: delete_configuration_setting
@app_config_decorator
def test_delete_with_key_no_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_delete_kv = test_config_setting_no_label
client.delete_configuration_setting(to_delete_kv.key)
self._delete_setting(client, to_delete_kv)
with pytest.raises(ResourceNotFoundError):
client.get_configuration_setting(to_delete_kv.key)
@app_config_decorator
def test_delete_with_key_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_delete_kv = test_config_setting
client.delete_configuration_setting(
to_delete_kv.key, label=to_delete_kv.label
)
self._delete_setting(client, to_delete_kv)
with pytest.raises(ResourceNotFoundError):
client.get_configuration_setting(
to_delete_kv.key, label=to_delete_kv.label
)
@app_config_decorator
def test_delete_non_existing(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
deleted_kv = client.delete_configuration_setting(
"not_exist_" + KEY
)
assert deleted_kv is None
@app_config_decorator
def test_delete_correct_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_delete_kv = test_config_setting_no_label
deleted_kv = client.delete_configuration_setting(
to_delete_kv.key, etag=to_delete_kv.etag
)
self._delete_setting(client, to_delete_kv)
assert deleted_kv is not None
with pytest.raises(ResourceNotFoundError):
client.get_configuration_setting(to_delete_kv.key)
@app_config_decorator
def test_delete_wrong_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_delete_kv = test_config_setting_no_label
with pytest.raises(ResourceModifiedError):
client.delete_configuration_setting(
to_delete_kv.key, etag="wrong etag", match_condition=MatchConditions.IfNotModified
)
# method: list_configuration_settings
@app_config_decorator
def test_list_configuration_settings_key_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_configuration_settings(
label_filter=LABEL, key_filter=KEY
))
assert len(items) == 1
assert all(x.key == KEY and x.label == LABEL for x in items)
@app_config_decorator
def test_list_configuration_settings_only_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_configuration_settings(label_filter=LABEL))
assert len(items) == 1
assert all(x.label == LABEL for x in items)
@pytest.mark.skip("3 != 2, three items are returned")
@app_config_decorator
def test_list_configuration_settings_only_key(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_configuration_settings(key_filter=KEY))
assert len(items) == 2
assert all(x.key == KEY for x in items)
@app_config_decorator
def test_list_configuration_settings_fields(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_configuration_settings(
key_filter="*", label_filter=LABEL, fields=["key", "content_type"]
))
assert len(items) == 1
assert all(x.key and not x.label and x.content_type for x in items)
@pytest.mark.skip("ResourceExistsError: Operation returned an invalid status 'Precondition Failed'")
@app_config_decorator
def test_list_configuration_settings_reserved_chars(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
resered_char_kv = ConfigurationSetting(
key=KEY, label=LABEL_RESERVED_CHARS, value=TEST_VALUE
)
resered_char_kv = client.add_configuration_setting(
resered_char_kv
)
self._delete_setting(client, resered_char_kv)
escaped_label = re.sub(r"((?!^)\*(?!$)|\\|,)", r"\\\1", LABEL_RESERVED_CHARS)
items = list(client.list_configuration_settings(
label_filter=escaped_label
))
assert len(items) == 1
assert all(x.label == LABEL_RESERVED_CHARS for x in items)
@pytest.mark.skip("Bad Request")
@app_config_decorator
def test_list_configuration_settings_contains(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_configuration_settings(
label_filter="*" + LABEL + "*"
))
assert len(items) == 1
assert all(x.label == LABEL for x in items)
@app_config_decorator
def test_list_configuration_settings_correct_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_list_kv = test_config_setting
custom_headers = {"If-Match": to_list_kv.etag}
items = list(client.list_configuration_settings(
key_filter=to_list_kv.key, label_filter=to_list_kv.label, headers=custom_headers
))
assert len(items) == 1
assert all(x.key == to_list_kv.key and x.label == to_list_kv.label for x in items)
@app_config_decorator
def test_list_configuration_settings_multi_pages(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
# create PAGE_SIZE+1 configuration settings to have at least two pages
try:
delete_me = [
client.add_configuration_setting(
ConfigurationSetting(
key="multi_" + str(i) + KEY_UUID,
label="multi_label_" + str(i),
value="multi value",
)
)
for i in range(PAGE_SIZE + 1)
]
except ResourceExistsError:
pass
items = client.list_configuration_settings(key_filter="multi_*")
assert len(list(items)) > PAGE_SIZE
# Remove the configuration settings
try:
[
client.delete_configuration_setting(
key="multi_" + str(i) + KEY_UUID, label="multi_label_" + str(i)
)
for i in range(PAGE_SIZE + 1)
]
except AzureError:
pass
@app_config_decorator
def test_list_configuration_settings_null_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = client.list_configuration_settings(label_filter="\0")
assert len(list(items)) > 0
@app_config_decorator
def test_list_configuration_settings_only_accepttime(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
exclude_today = client.list_configuration_settings(
accept_datetime=datetime.datetime.today() + datetime.timedelta(days=-1)
)
all_inclusive = client.list_configuration_settings()
assert len(list(all_inclusive)) > len(list(exclude_today))
# method: list_revisions
@app_config_decorator
def test_list_revisions_key_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_list1 = test_config_setting
items = list(client.list_revisions(
label_filter=to_list1.label, key_filter=to_list1.key
))
assert len(items) >= 2
assert all(x.key == to_list1.key and x.label == to_list1.label for x in items)
@app_config_decorator
def test_list_revisions_only_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_revisions(label_filter=LABEL))
assert len(items) >= 1
assert all(x.label == LABEL for x in items)
@app_config_decorator
def test_list_revisions_key_no_label(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_revisions(key_filter=KEY))
assert len(items) >= 1
assert all(x.key == KEY for x in items)
@pytest.mark.skip("Operation returned an invalid status 'Internal Server Error'")
@app_config_decorator
def test_list_revisions_fields(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
items = list(client.list_revisions(
key_filter="*", label_filter=LABEL, fields=["key", "content_type"]
))
assert all(x.key and not x.label and x.content_type and not x.tags and not x.etag for x in items)
@app_config_decorator
def test_list_revisions_correct_etag(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_list_kv = test_config_setting
custom_headers = {"If-Match": to_list_kv.etag}
items = list(client.list_revisions(
key_filter=to_list_kv.key, label_filter=to_list_kv.label, headers=custom_headers
))
assert len(items) >= 1
assert all(x.key == to_list_kv.key and x.label == to_list_kv.label for x in items)
@app_config_decorator
def test_read_only(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
kv = test_config_setting_no_label
read_only_kv = client.set_read_only(kv)
assert read_only_kv.read_only
readable_kv = client.set_read_only(read_only_kv, False)
assert not readable_kv.read_only
@app_config_decorator
def test_delete_read_only(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_delete_kv = test_config_setting_no_label
read_only_kv = client.set_read_only(to_delete_kv)
with pytest.raises(ResourceReadOnlyError):
client.delete_configuration_setting(to_delete_kv.key)
client.set_read_only(read_only_kv, False)
client.delete_configuration_setting(to_delete_kv.key)
self._delete_setting(client, to_delete_kv)
with pytest.raises(ResourceNotFoundError):
client.get_configuration_setting(to_delete_kv.key)
@app_config_decorator
def test_set_read_only(self, appconfiguration_endpoint_string, test_config_setting, test_config_setting_no_label):
client = self.create_aad_client(appconfiguration_endpoint_string)
to_set_kv = test_config_setting
to_set_kv.value = to_set_kv.value + "a"
to_set_kv.tags = {"a": "b", "c": "d"}
read_only_kv = client.set_read_only(to_set_kv)
with pytest.raises(ResourceReadOnlyError):
client.set_configuration_setting(read_only_kv)
readable_kv = client.set_read_only(read_only_kv, False)
readable_kv.value = to_set_kv.value
readable_kv.tags = to_set_kv.tags
set_kv = client.set_configuration_setting(readable_kv)
assert (
to_set_kv.key == set_kv.key
and to_set_kv.label == to_set_kv.label
and to_set_kv.value == set_kv.value
and to_set_kv.content_type == set_kv.content_type
and to_set_kv.tags == set_kv.tags
and to_set_kv.etag != set_kv.etag
)
def _order_dict(self, d):
from collections import OrderedDict
new = OrderedDict()
for k, v in d.items():
new[k] = str(v)
return new
def _assert_same_keys(self, key1, key2):
assert type(key1) == type(key2)
assert key1.key == key2.key
assert key1.label == key2.label
assert key1.content_type == key2.content_type
assert key1.tags == key2.tags
assert key1.etag != key2.etag
if isinstance(key1, FeatureFlagConfigurationSetting):
assert key1.enabled == key2.enabled
assert len(key1.filters) == len(key2.filters)
elif isinstance(key1, SecretReferenceConfigurationSetting):
assert key1.secret_uri == key2.secret_uri
else:
assert key1.value == key2.value
@app_config_decorator
def test_sync_tokens(self, client):
sync_tokens = copy.deepcopy(client._sync_token_policy._sync_tokens)
sync_token_header = self._order_dict(sync_tokens)
sync_token_header = ",".join(str(x) for x in sync_token_header.values())
new = ConfigurationSetting(
key="KEY1",
label=None,
value="TEST_VALUE1",
content_type=TEST_CONTENT_TYPE,
tags={"tag1": "tag1", "tag2": "tag2"},
)
sent = client.set_configuration_setting(new)
sync_tokens2 = copy.deepcopy(client._sync_token_policy._sync_tokens)
sync_token_header2 = self._order_dict(sync_tokens2)
sync_token_header2 = ",".join(str(x) for x in sync_token_header2.values())
assert sync_token_header != sync_token_header2
new = ConfigurationSetting(
key="KEY2",
label=None,
value="TEST_VALUE2",
content_type=TEST_CONTENT_TYPE,
tags={"tag1": "tag1", "tag2": "tag2"},
)
sent = client.set_configuration_setting(new)
sync_tokens3 = copy.deepcopy(client._sync_token_policy._sync_tokens)
sync_token_header3 = self._order_dict(sync_tokens3)
sync_token_header3 = ",".join(str(x) for x in sync_token_header3.values())
assert sync_token_header2 != sync_token_header3
client.close()
@app_config_decorator
def test_config_setting_feature_flag(self, client):
feature_flag = FeatureFlagConfigurationSetting("test_feature", True)
set_flag = client.set_configuration_setting(feature_flag)
self._assert_same_keys(feature_flag, set_flag)
set_flag.enabled = not set_flag.enabled
changed_flag = client.set_configuration_setting(set_flag)
self._assert_same_keys(set_flag, changed_flag)
changed_flag.enabled = False
assert changed_flag.value['enabled'] == False
c = copy.deepcopy(changed_flag.value)
c['enabled'] = True
changed_flag.value = c
assert changed_flag.enabled == True
changed_flag.value = {}
assert changed_flag.enabled == None
assert changed_flag.value == {}
with pytest.raises(ValueError):
set_flag.value = "bad_value"
_ = set_flag.enabled
client.delete_configuration_setting(changed_flag.key)
@app_config_decorator
def test_config_setting_secret_reference(self, client):
secret_reference = SecretReferenceConfigurationSetting(
"ConnectionString", "https://test-test.vault.azure.net/secrets/connectionString")
set_flag = client.set_configuration_setting(secret_reference)
self._assert_same_keys(secret_reference, set_flag)
updated_flag = client.set_configuration_setting(set_flag)
self._assert_same_keys(set_flag, updated_flag)
assert isinstance(updated_flag, SecretReferenceConfigurationSetting)
new_uri = "https://aka.ms/azsdk"
new_uri2 = "https://aka.ms/azsdk/python"
updated_flag.secret_uri = new_uri
assert updated_flag.value['secret_uri'] == new_uri
updated_flag.value = {'secret_uri': new_uri2}
assert updated_flag.secret_uri == new_uri2
with pytest.raises(ValueError):
set_flag.value = "bad_value"
_ = set_flag.secret_uri
client.delete_configuration_setting(secret_reference.key)
@app_config_decorator
def test_feature_filter_targeting(self, client):
new = FeatureFlagConfigurationSetting(
"newflag",
True,
filters=[
{
"name": TARGETING,
"parameters": {
u"Audience": {
u"Users": [u"abc", u"def"],
u"Groups": [u"ghi", u"jkl"],
u"DefaultRolloutPercentage": 75
}
}
}
]
)
sent_config = client.set_configuration_setting(new)
self._assert_same_keys(sent_config, new)
assert isinstance(sent_config.filters[0], dict)
assert len(sent_config.filters) == 1
sent_config.filters[0]["parameters"]["Audience"]["DefaultRolloutPercentage"] = 80
updated_sent_config = client.set_configuration_setting(sent_config)
self._assert_same_keys(sent_config, updated_sent_config)
updated_sent_config.filters.append(
{
"name": TARGETING,
"parameters": {
u"Audience": {
u"Users": [u"abcd", u"defg"],
u"Groups": [u"ghij", u"jklm"],
u"DefaultRolloutPercentage": 50
}
}
}
)
updated_sent_config.filters.append(
{
"name": TARGETING,
"parameters": {
u"Audience": {
u"Users": [u"abcde", u"defgh"],
u"Groups": [u"ghijk", u"jklmn"],
u"DefaultRolloutPercentage": 100
}
}
}
)
sent_config = client.set_configuration_setting(updated_sent_config)
self._assert_same_keys(sent_config, updated_sent_config)
assert len(sent_config.filters) == 3
client.delete_configuration_setting(updated_sent_config.key)
@app_config_decorator
def test_feature_filter_time_window(self, client):
new = FeatureFlagConfigurationSetting(
'time_window',
True,
filters=[
{
"name": TIME_WINDOW,
"parameters": {
"Start": "Wed, 10 Mar 2021 05:00:00 GMT",
"End": "Fri, 02 Apr 2021 04:00:00 GMT"
}
}
]
)
sent = client.set_configuration_setting(new)
self._assert_same_keys(sent, new)
sent.filters[0]["parameters"]["Start"] = "Thurs, 11 Mar 2021 05:00:00 GMT"
new_sent = client.set_configuration_setting(sent)
self._assert_same_keys(sent, new_sent)
client.delete_configuration_setting(new_sent.key)
@app_config_decorator
def test_feature_filter_custom(self, client):
new = FeatureFlagConfigurationSetting(
'custom',
True,
filters=[
{
"name": PERCENTAGE,
"parameters": {
"Value": 10,
"User": "user1"
}
}
]
)
sent = client.set_configuration_setting(new)
self._assert_same_keys(sent, new)
sent.filters[0]["parameters"]["Value"] = 100
new_sent = client.set_configuration_setting(sent)
self._assert_same_keys(sent, new_sent)
client.delete_configuration_setting(new_sent.key)
@app_config_decorator
def test_feature_filter_multiple(self, client):
new = FeatureFlagConfigurationSetting(
'custom',
True,
filters=[
{
"name": PERCENTAGE,
"parameters": {
"Value": 10
}
},
{
"name": TIME_WINDOW,
"parameters": {
"Start": "Wed, 10 Mar 2021 05:00:00 GMT",
"End": "Fri, 02 Apr 2021 04:00:00 GMT"
}
},
{
"name": TARGETING,
"parameters": {
u"Audience": {
u"Users": [u"abcde", u"defgh"],
u"Groups": [u"ghijk", u"jklmn"],
u"DefaultRolloutPercentage": 100
}
}
}
]
)
sent = client.set_configuration_setting(new)
self._assert_same_keys(sent, new)
sent.filters[0]["parameters"]["Value"] = 100
sent.filters[1]["parameters"]["Start"] = "Wed, 10 Mar 2021 08:00:00 GMT"
sent.filters[2]["parameters"]["Audience"]["DefaultRolloutPercentage"] = 100
new_sent = client.set_configuration_setting(sent)
self._assert_same_keys(sent, new_sent)
assert new_sent.filters[0]["parameters"]["Value"] == 100
assert new_sent.filters[1]["parameters"]["Start"] == "Wed, 10 Mar 2021 08:00:00 GMT"
assert new_sent.filters[2]["parameters"]["Audience"]["DefaultRolloutPercentage"] == 100
client.delete_configuration_setting(new_sent.key)
|
from pathlib import Path
from datetime import datetime, timedelta
import numpy as np
from astropy import units as u
from astropy import stats
from astropy.time import Time
from astropy.nddata import CCDData
import ccdproc
from keckdrpframework.primitives.base_primitive import BasePrimitive
from .utils import pre_condition, post_condition, find_master
##-----------------------------------------------------------------------------
## Primitive: SubtractBias
##-----------------------------------------------------------------------------
class SubtractBias(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
master_dir = self.cfg['Calibrations'].get('DirectoryForMasters', None)
self.master_bias_file = find_master(master_dir, 'Bias', action.args.meta)
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'master bias is available',
self.master_bias_file is not None),
pre_condition(self, 'Image type is not BIAS',
self.action.args.imtype != 'BIAS'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
self.log.info(f" Found master bias file: {self.master_bias_file.name}")
master_bias_ccddata = CCDData.read(self.master_bias_file, unit="adu")
self.log.info(f" Subtracting bias")
self.action.args.ccddata = ccdproc.subtract_bias(self.action.args.ccddata,
master_bias_ccddata)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: SubtractDark
##-----------------------------------------------------------------------------
class SubtractDark(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
master_dir = self.cfg['Calibrations'].get('DirectoryForMasters', None)
self.master_dark_file = find_master(master_dir, 'Dark', action.args.meta)
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'master dark is available',
self.master_dark_file is not None),
pre_condition(self, 'Image type is not DARK',
self.action.args.imtype != 'DARK'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
self.log.info(f" Found master dark file: {self.master_dark_file.name}")
master_dark_ccddata = CCDData.read(self.master_dark_file, unit="adu")
self.log.info(f" Subtracting dark")
self.action.args.ccddata = ccdproc.subtract_bias(self.action.args.ccddata,
master_dark_ccddata)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: GainCorrect
##-----------------------------------------------------------------------------
class GainCorrect(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
gain = self.action.args.meta.get('GAIN', None)
if gain is not None: self.log.debug(f' Using gain = {gain}')
if gain is None:
gain = self.cfg['Telescope'].getfloat('gain', None)
self.log.debug(f' Got gain from config: {gain}')
self.log.debug(' Gain correcting data')
self.action.args.ccddata = ccdproc.gain_correct(self.action.args.ccddata,
gain,
gain_unit=u.electron/u.adu)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: CreateDeviation
##-----------------------------------------------------------------------------
class CreateDeviation(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
read_noise = self.action.args.meta.get('read_noise', None)
if read_noise is not None: self.log.debug(f' Using read_noise = {read_noise}')
if read_noise is None:
read_noise = self.cfg['Telescope'].getfloat('read_noise', None)
self.log.debug(f' Got read_noise from config: {read_noise}')
self.action.args.ccddata = ccdproc.create_deviation(self.action.args.ccddata,
readnoise=read_noise*u.electron)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: MakeMasterCalFrame
##-----------------------------------------------------------------------------
class MakeMasterCalFrame(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
self.context = context
date_string = self.action.args.meta['UT date string']
# if not hasattr(self.context, 'date_string'):
# self.context[date_string] = {}
# if self.action.args.imtype not in self.context[date_string].keys():
# self.context[date_string][self.action.args.imtype] = []
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
imtype = self.action.args.imtype
date_string = self.action.args.meta['UT date string']
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'Image type is cal',
imtype in ['BIAS', 'DARK']),
# pre_condition(self, 'Connected to mongo',
# self.mongo_iqmon is not None),
pre_condition(self, f'do_{imtype}_subtraction is True',
self.cfg['Calibrations'].getboolean(f'do_{imtype}_subtraction', True) is True),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
imtype = self.action.args.imtype
date_string = self.action.args.meta['UT date string']
self.context[date_string][self.action.args.imtype].append(self.action.args.ccddata)
n_cals = len(self.context[date_string][imtype])
self.log.info(f"Found {n_cals} {imtype} files for {date_string}")
if n_cals >= self.cfg['Calibrations'].getint(f"min_{imtype}_frames"):
self.log.info(f"Stacking {n_cals} {imtype} files")
combined = ccdproc.combine(self.context[date_string][imtype],
method='average',
sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median, sigma_clip_dev_func=stats.mad_std,
)
self.log.info(f" Combined.")
combined_bias.meta['combined'] = True
combined_bias.meta['ncomb'] = n_cals
combined_filename = f'Master{imtype}_{date_string}.fits'
combined_filepath = Path(self.cfg['Calibrations'].get('directory_for_masters'))
combined_file = combined_filepath.joinpath(combined_filename)
if combined_file.exists() is True:
self.log.debug(f" Deleting existing: {combined_file}")
combined_file.unlink()
self.log.info(f" Saving: {combined_file}")
combined.write(combined_file)
return self.action.args
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from lxml import etree, html
import misaka
from .compat import unicode
from .defang import defang
from .legacy import login_name
try:
from html.parser import locatestarttagend
except ImportError:
try:
from html.parser import locatestarttagend_tolerant as locatestarttagend
except ImportError:
from HTMLParser import locatestarttagend
def slug_for(title):
title = title.replace("&", " and ")
return "-".join(m.group(0) for m in re.finditer(r"[a-z0-9]+", title.lower()))
AUTOLINK_URL = (
r"(?P<url>(?isu)\b(?:https?://|www\d{,3}\.|[a-z0-9.-]+\.[a-z]{2,4}/)[^\s()"
r"<>\[\]\x02]+(?![^\s`!()\[\]{};:'\".,<>?\x02\xab\xbb\u201c\u201d\u2018"
r"\u2019]))"
)
url_regexp = re.compile(AUTOLINK_URL)
USER_LINK = re.compile(r"""
\\(?P<escaped>[\\<])
| <(?P<type>!~|[!~])(?P<username>[a-z0-9_]+)>
| .
""", re.I | re.X)
NON_USERNAME_CHARACTERS = re.compile("[^a-z0-9]+", re.I)
_EXCERPT_BLOCK_ELEMENTS = frozenset([
"blockquote", "br", "div", "h1", "h2", "h3", "h4", "h5", "h6", "hr", "ol",
"p", "pre", "ul", "li",
])
def _furaffinity(target):
return "".join(i for i in target if i not in "!#_" and not i.isspace()).lower()
def _inkbunny(target):
return target.lower()
def _deviantart(target):
return "".join(i for i in target if i != "." and not i.isspace()).lower()
def _sofurry(target):
return NON_USERNAME_CHARACTERS.sub("-", target).lstrip("-").lower()
MISAKA_EXT = (
misaka.EXT_TABLES |
misaka.EXT_FENCED_CODE |
misaka.EXT_AUTOLINK |
misaka.EXT_STRIKETHROUGH |
misaka.EXT_NO_INTRA_EMPHASIS |
misaka.EXT_LAX_SPACING |
misaka.EXT_NO_INDENTED_CODE_BLOCKS)
MISAKA_FORMAT = (
misaka.HTML_HARD_WRAP)
def strip_outer_tag(html):
match = locatestarttagend.match(html)
start_tag_end = match.end()
end_tag_start = html.rindex(u'<')
return html[:start_tag_end + 1], html[start_tag_end + 1:end_tag_start], html[end_tag_start:]
class WeasylRenderer(misaka.HtmlRenderer):
# Render Markdown in HTML
def block_html(self, raw_html):
if raw_html.startswith('<!--'):
return raw_html
start, stripped, end = strip_outer_tag(raw_html)
return u''.join([start, _markdown(stripped).rstrip(), end])
# Respect start of ordered lists
def list(self, text, ordered, prefix):
if prefix:
return '<ol start="{start}">{text}</ol>'.format(
start=prefix,
text=text,
)
else:
return '<ul>{text}</ul>'.format(
text=text,
)
def _markdown(target):
renderer = WeasylRenderer(MISAKA_FORMAT)
markdown = misaka.Markdown(renderer, MISAKA_EXT)
return markdown.render(target)
def create_link(t, username):
link = etree.Element(u"a")
link.set(u"href", u"/~" + login_name(username))
if t == "~":
link.text = username
else:
link.set(u"class", u"user-icon")
image = etree.SubElement(link, u"img")
image.set(u"src", u"/~{username}/avatar".format(username=login_name(username)))
image.set(u"alt", username)
if t != "!":
label = etree.SubElement(link, u"span")
label.text = username
image.tail = u" "
return link
def add_user_links(fragment, parent, can_contain):
_nonlocal = {}
def add_matches(text, got_link):
for m in USER_LINK.finditer(text):
escaped, t, username = m.group("escaped", "type", "username")
if escaped:
previous_text.append(escaped)
continue
if not t:
previous_text.append(m.group())
continue
got_link(t, username)
def got_text_link(t, username):
previous = _nonlocal["previous"]
if previous is None:
fragment.text = "".join(previous_text)
else:
previous.tail = "".join(previous_text)
del previous_text[:]
link = create_link(t, username)
fragment.insert(_nonlocal["insert_index"], link)
_nonlocal["insert_index"] += 1
_nonlocal["previous"] = link
def got_tail_link(t, username):
_nonlocal["previous"].tail = "".join(previous_text)
del previous_text[:]
_nonlocal["insert_index"] += 1
link = create_link(t, username)
parent.insert(_nonlocal["insert_index"], link)
_nonlocal["previous"] = link
if can_contain:
for child in list(fragment):
child_can_contain = child.tag not in ("a", "pre", "code")
add_user_links(child, fragment, child_can_contain)
if fragment.text:
_nonlocal["previous"] = None
_nonlocal["insert_index"] = 0
previous_text = []
add_matches(fragment.text, got_text_link)
previous = _nonlocal["previous"]
if previous is None:
fragment.text = "".join(previous_text)
else:
previous.tail = "".join(previous_text)
if fragment.tail:
_nonlocal["previous"] = fragment
_nonlocal["insert_index"] = list(parent).index(fragment)
previous_text = []
add_matches(fragment.tail, got_tail_link)
_nonlocal["previous"].tail = "".join(previous_text)
def _markdown_fragment(target, image):
if not image:
images_left = 0
elif type(image) is int:
images_left = image
else:
images_left = 5
rendered = _markdown(target)
fragment = html.fragment_fromstring(rendered, create_parent=True)
for link in fragment.findall(".//a"):
href = link.attrib.get("href")
if href:
t, _, user = href.partition(":")
if t == "user":
link.attrib["href"] = u"/~{user}".format(user=login_name(user))
elif t == "da":
link.attrib["href"] = u"https://{user}.deviantart.com/".format(user=_deviantart(user))
elif t == "ib":
link.attrib["href"] = u"https://inkbunny.net/{user}".format(user=_inkbunny(user))
elif t == "fa":
link.attrib["href"] = u"https://www.furaffinity.net/user/{user}".format(user=_furaffinity(user))
elif t == "sf":
link.attrib["href"] = u"https://{user}.sofurry.com/".format(user=_sofurry(user))
else:
continue
if not link.text or link.text == href:
link.text = user
for parent in fragment.findall(".//*[img]"):
for image in list(parent):
if image.tag != "img":
continue
src = image.get("src")
if src:
t, _, user = src.partition(":")
if t != "user":
if images_left:
images_left -= 1
else:
i = list(parent).index(image)
link = etree.Element(u"a")
link.tail = image.tail
src = image.get("src")
if src:
link.set(u"href", src)
link.text = image.attrib.get("alt", src)
parent[i] = link
continue
image.set(u"src", u"/~{user}/avatar".format(user=login_name(user)))
link = etree.Element(u"a")
link.set(u"href", u"/~{user}".format(user=login_name(user)))
link.set(u"class", u"user-icon")
parent.insert(list(parent).index(image), link)
parent.remove(image)
link.append(image)
link.tail = image.tail
if "alt" in image.attrib and image.attrib["alt"]:
image.tail = u" "
label = etree.SubElement(link, u"span")
label.text = image.attrib["alt"]
del image.attrib["alt"]
else:
image.tail = None
image.set(u"alt", user)
add_user_links(fragment, None, True)
defang(fragment)
return fragment
def markdown(target, image=False):
fragment = _markdown_fragment(target, image)
return html.tostring(fragment, encoding=unicode)[5:-6] # <div>...</div>
def _itertext_spaced(element):
if element.text:
yield element.text
for child in element:
is_block = child.tag in _EXCERPT_BLOCK_ELEMENTS
if is_block:
yield " "
for t in _itertext_spaced(child):
yield t
if child.tail:
yield child.tail
if is_block:
yield " "
def _normalize_whitespace(text):
return re.sub(r"\s+", " ", text.strip())
def markdown_excerpt(markdown_text, length=300):
fragment = _markdown_fragment(markdown_text, image=False)
text = _normalize_whitespace("".join(_itertext_spaced(fragment)))
if len(text) <= length:
return text
else:
return text[:length - 1].rstrip() + "…"
def markdown_link(title, url):
title = title.replace('[', '\\[').replace(']', '\\]')
return '[%s](%s)' % (title, url)
|
#
# djangocms-page-meta documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 5 23:27:04 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# fmt: off
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
parent = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, parent)
sys.path.insert(0, os.path.join(parent, "tests"))
import cms_helper # isort:skip # noqa
import djangocms_page_meta # isort:skip # noqa
# fmt: on
cms_helper.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "djangocms-page-meta"
copyright = "2014, Iacopo Spalletti" # noqa # A001
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djangocms_page_meta.__version__
# The full version, including alpha/beta/rc tags.
release = djangocms_page_meta.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "djangocms-page-metadoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "djangocms-page-meta.tex", "djangocms-page-meta Documentation", "Iacopo Spalletti", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "djangocms-page-meta", "djangocms-page-meta Documentation", ["Iacopo Spalletti"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"djangocms-page-meta",
"djangocms-page-meta Documentation",
"Iacopo Spalletti",
"djangocms-page-meta",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test.unit.helper.test_helper import (
appium_command,
android_w3c_driver,
get_httpretty_request_body
)
import httpretty
from appium.webdriver.webdriver import WebDriver
class TestWebDriverKeyboard(object):
@httpretty.activate
def test_hide_keyboard(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/hide_keyboard')
)
assert isinstance(driver.hide_keyboard(), WebDriver)
|
import matplotlib.pyplot as plt
import batman
import numpy as np
from TelApy.tools.studyMASC_UQ import MascaretStudy
from batman.space import (Space, dists_to_ot)
from batman.uq import UQ
from batman.visualization import Kiviat3D, HdrBoxplot, response_surface, Tree
from batman.surrogate import SurrogateModel
from batman.surrogate import (PC, Kriging)
from sklearn.metrics import (r2_score, mean_squared_error)
import openturns as ot
import logging
logging.basicConfig(level=logging.INFO)
###
#This script deals with the PC surrogate convergence study for MascaretAPI on the Garonne 1D case study.
#It quantifiate the truncation and the sampling errors via an estimation of water metrics LH and Coefficients metrics LC.
#The PC degree varies in [3,9] and the sampling sizes varies in [1296,2401,4096,6561,10000]
###
Study = MascaretStudy('config_Garonne.json', iprint = 0, working_directory = 'study_Convergence')
curv_abs = [13150.0, 13250.0, 13350.0, 13450.0, 13550.0, 13650.0, 13750.0, 13850.0, 13950.0, 14025.0, 14128.333333333334, 14231.666666666668, 14335.0, 14448.333333333334, 14561.666666666668, 14675.0, 14780.0, 14885.0, 14990.0, 15095.0, 15200.0, 15312.5, 15425.0, 15537.5, 15650.0, 15762.5, 15875.0, 15981.25, 16087.5, 16193.75, 16300.0, 16406.25, 16512.5, 16618.75, 16725.0, 16830.833333333332, 16936.666666666664, 17042.499999999996, 17148.33333333333, 17254.16666666666, 17360.0, 17500.0, 17640.0, 17750.0, 17860.0, 17970.0, 18080.0, 18190.0, 18300.0, 18403.571428571428, 18507.142857142855, 18610.714285714283, 18714.28571428571, 18817.857142857138, 18921.428571428565, 19025.0, 19131.25, 19237.5, 19343.75, 19450.0, 19556.25, 19662.5, 19768.75, 19875.0, 19979.166666666668, 20083.333333333336, 20187.500000000004, 20291.66666666667, 20395.83333333334, 20500.0, 20603.125, 20706.25, 20809.375, 20912.5, 21015.625, 21118.75, 21221.875, 21325.0, 21425.0, 21525.0, 21625.0, 21725.0, 21825.0, 21925.0, 22032.0, 22139.0, 22246.0, 22353.0, 22460.0, 22576.25, 22692.5, 22808.75, 22925.0, 23031.5, 23138.0, 23244.5, 23351.0, 23457.5, 23564.0, 23670.5, 23777.0, 23883.5, 23990.0, 24110.0, 24230.0, 24350.0, 24455.0, 24560.0, 24665.0, 24770.0, 24875.0, 24975.0, 25075.0, 25175.0, 25275.0, 25375.0, 25475.0, 25575.0, 25675.0, 25775.0, 25875.0, 25975.0, 26075.0, 26175.0, 26275.0, 26383.333333333332, 26491.666666666664, 26599.999999999996, 26708.33333333333, 26816.66666666666, 26924.999999999993, 27033.333333333325, 27141.666666666657, 27250.0, 27359.375, 27468.75, 27578.125, 27687.5, 27796.875, 27906.25, 28015.625, 28125.0, 28240.0, 28355.0, 28470.0, 28585.0, 28700.0, 28810.0, 28920.0, 29030.0, 29140.0, 29250.0, 29360.0, 29463.0, 29566.0, 29669.0, 29772.0, 29875.0, 29978.0, 30081.0, 30184.0, 30287.0, 30390.0, 30491.0, 30592.0, 30693.0, 30794.0, 30895.0, 30996.0, 31097.0, 31198.0, 31299.0, 31400.0, 31505.0, 31610.0, 31715.0, 31820.0, 31830.0, 31990.0, 32000.0, 32075.0, 32177.14285714286, 32279.285714285717, 32381.428571428576, 32483.571428571435, 32585.714285714294, 32687.857142857152, 32790.0, 32904.166666666664, 33018.33333333333, 33132.49999999999, 33246.66666666666, 33360.83333333332, 33475.0, 33582.142857142855, 33689.28571428571, 33796.428571428565, 33903.57142857142, 34010.714285714275, 34117.85714285713, 34225.0, 34332.142857142855, 34439.28571428571, 34546.428571428565, 34653.57142857142, 34760.714285714275, 34867.85714285713, 34975.0, 35077.5, 35180.0, 35282.5, 35385.0, 35487.5, 35590.0, 35698.333333333336, 35806.66666666667, 35915.00000000001, 36023.33333333334, 36131.66666666668, 36240.0, 36290.0, 36340.0, 36441.666666666664, 36543.33333333333, 36644.99999999999, 36746.66666666666, 36848.33333333332, 36950.0, 37066.666666666664, 37183.33333333333, 37300.0, 37408.333333333336, 37516.66666666667, 37625.0, 37725.0, 37825.0, 37926.36363636364, 38027.72727272728, 38129.09090909092, 38230.45454545456, 38331.8181818182, 38433.18181818184, 38534.54545454548, 38635.90909090912, 38737.27272727276, 38838.6363636364, 38940.0, 39041.666666666664, 39143.33333333333, 39244.99999999999, 39346.66666666666, 39448.33333333332, 39550.0, 39650.0, 39750.0, 39850.0, 39950.0, 40051.666666666664, 40153.33333333333, 40254.99999999999, 40356.66666666666, 40458.33333333332, 40560.0, 40663.0, 40766.0, 40869.0, 40972.0, 41075.0, 41178.0, 41281.0, 41384.0, 41487.0, 41590.0, 41700.0, 41810.0, 41920.0, 42030.0, 42140.0, 42247.0, 42354.0, 42461.0, 42568.0, 42675.0, 42793.75, 42912.5, 43031.25, 43150.0, 43262.5, 43375.0, 43487.5, 43600.0, 43712.5, 43825.0, 43929.166666666664, 44033.33333333333, 44137.49999999999, 44241.66666666666, 44345.83333333332, 44450.0, 44557.5, 44665.0, 44772.5, 44880.0, 44987.5, 45095.0, 45202.5, 45310.0, 45418.333333333336, 45526.66666666667, 45635.00000000001, 45743.33333333334, 45851.66666666668, 45960.0, 46076.0, 46192.0, 46308.0, 46424.0, 46540.0, 46650.625, 46761.25, 46871.875, 46982.5, 47093.125, 47203.75, 47314.375, 47425.0, 47533.125, 47641.25, 47749.375, 47857.5, 47965.625, 48073.75, 48181.875, 48290.0, 48393.333333333336, 48496.66666666667, 48600.00000000001, 48703.33333333334, 48806.66666666668, 48910.0, 49015.555555555555, 49121.11111111111, 49226.666666666664, 49332.22222222222, 49437.777777777774, 49543.33333333333, 49648.88888888888, 49754.44444444444, 49860.0, 49965.0, 50070.0, 50175.0, 50280.0, 50385.0, 50490.0, 50601.666666666664, 50713.33333333333, 50825.0, 50939.166666666664, 51053.33333333333, 51167.49999999999, 51281.66666666666, 51395.83333333332, 51510.0, 51620.833333333336, 51731.66666666667, 51842.50000000001, 51953.33333333334, 52064.16666666668, 52175.0, 52291.25, 52407.5, 52523.75, 52640.0, 52744.375, 52848.75, 52953.125, 53057.5, 53161.875, 53266.25, 53370.625, 53475.0, 53591.666666666664, 53708.33333333333, 53825.0, 53967.5, 54110.0, 54211.875, 54313.75, 54415.625, 54517.5, 54619.375, 54721.25, 54823.125, 54925.0, 55034.375, 55143.75, 55253.125, 55362.5, 55471.875, 55581.25, 55690.625, 55800.0, 55905.0, 56010.0, 56115.0, 56220.0, 56325.0, 56428.125, 56531.25, 56634.375, 56737.5, 56840.625, 56943.75, 57046.875, 57150.0, 57250.0, 57350.0, 57450.0, 57550.0, 57650.0, 57750.0, 57850.0, 57957.142857142855, 58064.28571428571, 58171.428571428565, 58278.57142857142, 58385.714285714275, 58492.85714285713, 58600.0, 58712.0, 58824.0, 58936.0, 59048.0, 59160.0, 59266.92307692308, 59373.846153846156, 59480.769230769234, 59587.69230769231, 59694.61538461539, 59801.53846153847, 59908.461538461546, 60015.384615384624, 60122.3076923077, 60229.23076923078, 60336.15384615386, 60443.07692307694, 60550.0, 60654.545454545456, 60759.09090909091, 60863.63636363637, 60968.18181818182, 61072.72727272728, 61177.272727272735, 61281.81818181819, 61386.36363636365, 61490.9090909091, 61595.45454545456, 61700.0, 61818.75, 61937.5, 62056.25, 62175.0]
dists = ['Uniform(15., 60.)','Uniform(15., 60.)', 'Uniform(15., 60.)', 'BetaMuSigma(4031, 400, 1000, 6000).getDistribution()']
dists_ot = dists_to_ot(dists)
# Parameter space
corners = ([15.0, 15.0, 15.0, 1000.0], [60.0, 60.0, 60.0, 6000.0]) # ([min(X1), min(X2)], [max(X1), max(X2)])
n_x = len(curv_abs)
indim = len(corners)
plabels = ['Ks1','Ks2','Ks3', 'Q']
space = Space(corners)
## PC-LS Strategy ##
#learning sample size for trunctation error
N_learning = 10000
#learning samples sizes for samling error
N_learning5 = 10000
N_learning4 = 6561
N_learning3 = 4096
N_learning2 = 2401
N_learning1 = 1296
# Build the learning samples
x_learning = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning, True, True).generate()
x_learning = [list(x_learning[i]) for i in range(N_learning)]
x_learning = np.array(x_learning)
x_learning_dico= []
for i in range(N_learning):
x_learning_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning[i,0]},{"type": "zone", "index": 1, "value": x_learning[i,1]},{"type": "zone", "index": 2, "value": x_learning[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning[i,3]}]})
x_learning5 = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning5, True, True).generate()
x_learning5 = [list(x_learning5[i]) for i in range(N_learning5)]
x_learning5 = np.array(x_learning5)
x_learning_dico5= []
for i in range(N_learning5):
x_learning_dico5.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning5[i,0]},{"type": "zone", "index": 1, "value": x_learning5[i,1]},{"type": "zone", "index": 2, "value": x_learning5[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning5[i,3]}]})
x_learning4 = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning4, True, True).generate()
x_learning4 = [list(x_learning4[i]) for i in range(N_learning4)]
x_learning4 = np.array(x_learning4)
x_learning_dico4= []
for i in range(N_learning4):
x_learning_dico4.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning4[i,0]},{"type": "zone", "index": 1, "value": x_learning4[i,1]},{"type": "zone", "index": 2, "value": x_learning4[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning4[i,3]}]})
x_learning3 = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning3, True, True).generate()
x_learning3 = [list(x_learning3[i]) for i in range(N_learning3)]
x_learning3 = np.array(x_learning3)
x_learning_dico3= []
for i in range(N_learning3):
x_learning_dico3.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning3[i,0]},{"type": "zone", "index": 1, "value": x_learning3[i,1]},{"type": "zone", "index": 2, "value": x_learning3[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning3[i,3]}]})
x_learning2 = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning2, True, True).generate()
x_learning2 = [list(x_learning2[i]) for i in range(N_learning2)]
x_learning2 = np.array(x_learning2)
x_learning_dico2= []
for i in range(N_learning2):
x_learning_dico2.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning2[i,0]},{"type": "zone", "index": 1, "value": x_learning2[i,1]},{"type": "zone", "index": 2, "value": x_learning2[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning2[i,3]}]})
x_learning1 = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning1, True, True).generate()
x_learning1 = [list(x_learning1[i]) for i in range(N_learning1)]
x_learning1 = np.array(x_learning1)
x_learning_dico1= []
for i in range(N_learning1):
x_learning_dico1.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learning1[i,0]},{"type": "zone", "index": 1, "value": x_learning1[i,1]},{"type": "zone", "index": 2, "value": x_learning1[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learning1[i,3]}]})
# Reference study for the coefficients metrics
x_learningr = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_learning, True, True).generate() #training sample for estimation of LC metrics (large init_size)
x_learningr = [list(x_learningr[i]) for i in range(N_learning)]
x_learningr = np.array(x_learningr)
x_learning_dicor= []
for i in range(N_learning):
x_learning_dicor.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_learningr[i,0]},{"type": "zone", "index": 1, "value": x_learningr[i,1]},{"type": "zone", "index": 2, "value": x_learningr[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_learningr[i,3]}]})
y_learning= []
y_learning5= []
y_learning4= []
y_learning3= []
y_learning2= []
y_learning1= []
y_learningr= []
y_validation=[]
#Compute the solution vector with MascaretAPI y = f(x)
idx = 0
for k in range(N_learning):
x = x_learning_dico[k]
print("Study learning#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning5):
x = x_learning_dico5[k]
print("Study learning5#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning5.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning4):
x = x_learning_dico4[k]
print("Study learning4#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning4.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning3):
x = x_learning_dico3[k]
print("Study learning3#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning3.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning2):
x = x_learning_dico2[k]
print("Study learning2#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning2.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning1):
x = x_learning_dico1[k]
print("Study learning1#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learning1.append(Output['z'])
idx+=1
idx = 0
for k in range(N_learning):
x = x_learning_dicor[k]
print("Study learningr#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_learningr.append(Output['z'])
idx+=1
#Build Surrogates
#Changing degree for truncation error
PC_lsp9 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=9, distributions=dists_ot)
PC_lsp8 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=8, distributions=dists_ot)
PC_lsp7 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_lsp6 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=6, distributions=dists_ot)
PC_lsp5 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=5, distributions=dists_ot)
PC_lsp4 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=4, distributions=dists_ot)
PC_lsp3 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=3, distributions=dists_ot)
#Changing sampling size for samling error
PC_ls5 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_ls4 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_ls3 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_ls2 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_ls1 = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
# fitting of the validation samples
PC_lsp9.fit(x_learning, y_learning)
PC_lsp8.fit(x_learning, y_learning)
PC_lsp7.fit(x_learning, y_learning)
PC_lsp6.fit(x_learning, y_learning)
PC_lsp5.fit(x_learning, y_learning)
PC_lsp4.fit(x_learning, y_learning)
PC_lsp3.fit(x_learning, y_learning)
PC_ls5.fit(x_learning5, y_learning5)
PC_ls4.fit(x_learning4, y_learning4)
PC_ls3.fit(x_learning3, y_learning3)
PC_ls2.fit(x_learning2, y_learning2)
PC_ls1.fit(x_learning1, y_learning1)
### PC-Quad Strategy ##
#Changing degree for truncation error : N_quad is fixed with the argument 'N_quad'
PC_qp9 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=9, distributions=dists_ot, N_quad = 10000)
PC_qp8 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=8, distributions=dists_ot, N_quad = 10000)
PC_qp7 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 10000)
PC_qp6 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=6, distributions=dists_ot, N_quad = 10000)
PC_qp5 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=5, distributions=dists_ot, N_quad = 10000)
PC_qp4 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=4, distributions=dists_ot, N_quad = 10000)
PC_qp3 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=3, distributions=dists_ot, N_quad = 10000)
#Changing sampling size for sampling error : P is fixed with the argument 'degree'
PC_q5 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 10000)
PC_q4 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 6561)
PC_q3 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 4096)
PC_q2 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 2401)
PC_q1 = SurrogateModel('pc', corners, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 1296)
# Get the quadrature points and build the quadrature samples
x_quadp9 = PC_qp9.predictor.sample
x_quadp9 = np.array(x_quadp9)
(N_quadp9,_) = np.shape(x_quadp9)
x_quadp9_dico = []
for i in range(N_quadp9):
x_quadp9_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp9[i,0]},{"type": "zone", "index": 1, "value": x_quadp9[i,1]},{"type": "zone", "index": 2, "value": x_quadp9[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp9[i,3]}]})
x_quadp8 = PC_qp8.predictor.sample
x_quadp8 = np.array(x_quadp8)
(N_quadp8,_) = np.shape(x_quadp8)
x_quadp8_dico = []
for i in range(N_quadp8):
x_quadp8_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp8[i,0]},{"type": "zone", "index": 1, "value": x_quadp8[i,1]},{"type": "zone", "index": 2, "value": x_quadp8[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp8[i,3]}]})
x_quadp7 = PC_qp7.predictor.sample
x_quadp7 = np.array(x_quadp7)
(N_quadp7,_) = np.shape(x_quadp7)
x_quadp7_dico = []
for i in range(N_quadp7):
x_quadp7_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp7[i,0]},{"type": "zone", "index": 1, "value": x_quadp7[i,1]},{"type": "zone", "index": 2, "value": x_quadp7[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp7[i,3]}]})
x_quadp6 = PC_qp6.predictor.sample
x_quadp6 = np.array(x_quadp6)
(N_quadp6,_) = np.shape(x_quadp6)
x_quadp6_dico = []
for i in range(N_quadp6):
x_quadp6_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp6[i,0]},{"type": "zone", "index": 1, "value": x_quadp6[i,1]},{"type": "zone", "index": 2, "value": x_quadp6[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp6[i,3]}]})
x_quadp5 = PC_qp5.predictor.sample
x_quadp5 = np.array(x_quadp5)
(N_quadp5,_) = np.shape(x_quadp5)
x_quadp5_dico = []
for i in range(N_quadp5):
x_quadp5_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp5[i,0]},{"type": "zone", "index": 1, "value": x_quadp5[i,1]},{"type": "zone", "index": 2, "value": x_quadp5[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp5[i,3]}]})
x_quadp4 = PC_qp4.predictor.sample
x_quadp4 = np.array(x_quadp4)
(N_quadp4,_) = np.shape(x_quadp4)
x_quadp4_dico = []
for i in range(N_quadp4):
x_quadp4_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp4[i,0]},{"type": "zone", "index": 1, "value": x_quadp4[i,1]},{"type": "zone", "index": 2, "value": x_quadp4[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp4[i,3]}]})
x_quadp3 = PC_qp3.predictor.sample
x_quadp3 = np.array(x_quadp3)
(N_quadp3,_) = np.shape(x_quadp3)
x_quadp3_dico = []
for i in range(N_quadp3):
x_quadp3_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadp3[i,0]},{"type": "zone", "index": 1, "value": x_quadp3[i,1]},{"type": "zone", "index": 2, "value": x_quadp3[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadp3[i,3]}]})
x_quad5 = PC_q5.predictor.sample
x_quad5 = np.array(x_quad5)
(N_quad5,_) = np.shape(x_quad5)
x_quad5_dico = []
for i in range(N_quad5):
x_quad5_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quad5[i,0]},{"type": "zone", "index": 1, "value": x_quad5[i,1]},{"type": "zone", "index": 2, "value": x_quad5[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quad5[i,3]}]})
x_quad4 = PC_q4.predictor.sample
x_quad4 = np.array(x_quad4)
(N_quad4,_) = np.shape(x_quad4)
x_quad4_dico = []
for i in range(N_quad4):
x_quad4_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quad4[i,0]},{"type": "zone", "index": 1, "value": x_quad4[i,1]},{"type": "zone", "index": 2, "value": x_quad4[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quad4[i,3]}]})
x_quad3 = PC_q3.predictor.sample
x_quad3 = np.array(x_quad3)
(N_quad3,_) = np.shape(x_quad3)
x_quad3_dico = []
for i in range(N_quad3):
x_quad3_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quad3[i,0]},{"type": "zone", "index": 1, "value": x_quad3[i,1]},{"type": "zone", "index": 2, "value": x_quad3[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quad3[i,3]}]})
x_quad2 = PC_q2.predictor.sample
x_quad2 = np.array(x_quad2)
(N_quad2,_) = np.shape(x_quad2)
x_quad2_dico = []
for i in range(N_quad2):
x_quad2_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quad2[i,0]},{"type": "zone", "index": 1, "value": x_quad2[i,1]},{"type": "zone", "index": 2, "value": x_quad2[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quad2[i,3]}]})
x_quad1 = PC_q1.predictor.sample
x_quad1 = np.array(x_quad1)
(N_quad1,_) = np.shape(x_quad1)
x_quad1_dico = []
for i in range(N_quad1):
x_quad1_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quad1[i,0]},{"type": "zone", "index": 1, "value": x_quad1[i,1]},{"type": "zone", "index": 2, "value": x_quad1[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quad1[i,3]}]})
y_quadp9 = []
y_quadp8 = []
y_quadp7 = []
y_quadp6 = []
y_quadp5 = []
y_quadp4 = []
y_quadp3 = []
y_quad5 = []
y_quad4 = []
y_quad3 = []
y_quad2 = []
y_quad1 = []
# compute the solution on the quadrature points y = f(x)
idx = 0
for k in range(N_quadp9):
x = x_quadp9[k]
print("Study quad P=9#"+str(idx))
Output = Study(x)
y_quadp9.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quadp8):
x = x_quadp8[k]
print(x)
print("Study quad P=8#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp8.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quadp7):
x = x_quadp7[k]
print("Study quad P=7#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp7.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quadp6):
x = x_quadp6[k]
print("Study quad P=6#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp6.append(Output['z'])
idx+=1
idx = 0
idx = 0
for k in range(N_quadp5):
x = x_quadp5[k]
print("Study quad P=5#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp5.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quadp4):
x = x_quadp4[k]
print("Study quad P=4#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp4.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quadp3):
x = x_quadp3[k]
print("Study quad P=3#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadp3.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quad5):
x = x_quad5[k]
print("Study quad5#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quad5.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quad4):
x = x_quad4[k]
print("Study quad4#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quad4.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quad3):
x = x_quad3[k]
print("Study quad3#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quad3.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quad2):
x = x_quad2[k]
print("Study quad2#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quad2.append(Output['z'])
idx+=1
idx = 0
for k in range(N_quad1):
x = x_quad1[k]
print("Study quad1#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quad1.append(Output['z'])
idx+=1
# fitting of the learning samples
PC_qp9.fit(x_quadp9, y_quadp9)
PC_qp8.fit(x_quadp8, y_quadp8)
PC_qp7.fit(x_quadp7, y_quadp7)
PC_qp6.fit(x_quadp6, y_quadp6)
PC_qp5.fit(x_quadp5, y_quadp5)
PC_qp4.fit(x_quadp4, y_quadp4)
PC_qp3.fit(x_quadp3, y_quadp3)
PC_q5.fit(x_quad5, y_quad5)
PC_q4.fit(x_quad4, y_quad4)
PC_q3.fit(x_quad3, y_quad3)
PC_q2.fit(x_quad2, y_quad2)
PC_q1.fit(x_quad1, y_quad1)
# Validation
#Build the validation sample
N_validation = 5#10000
x_validation = ot.LHSExperiment(ot.ComposedDistribution(dists_ot),N_validation, True, True).generate() #training sample for truncation error (1 sample)
x_validation = [list(x_validation[i]) for i in range(N_validation)]
x_validation = np.array(x_validation)
x_validation_dico= []
for i in range(N_validation):
x_validation_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_validation[i,0]},{"type": "zone", "index": 1, "value": x_validation[i,1]},{"type": "zone", "index": 2, "value": x_validation[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_validation[i,3]}]})
#Build the validation vector y =f(x)
idx = 0
for k in range(N_validation):
x = x_validation_dico[k]
print("Study Validation#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_validation.append(Output['z'])
idx+=1
# predictions
# LS strategy
y_pred_pc_lsp9, _ = PC_lsp9(x_validation)
y_pred_pc_lsp8, _ = PC_lsp8(x_validation)
y_pred_pc_lsp7, _ = PC_lsp7(x_validation)
y_pred_pc_lsp6, _ = PC_lsp6(x_validation)
y_pred_pc_lsp5, _ = PC_lsp5(x_validation)
y_pred_pc_lsp4, _ = PC_lsp4(x_validation)
y_pred_pc_lsp3, _ = PC_lsp3(x_validation)
y_pred_pc_ls5, _ = PC_ls5(x_validation)
y_pred_pc_ls4, _ = PC_ls4(x_validation)
y_pred_pc_ls3, _ = PC_ls3(x_validation)
y_pred_pc_ls2, _ = PC_ls2(x_validation)
y_pred_pc_ls1, _ = PC_ls1(x_validation)
# Quad Strategy
y_pred_pc_qp9, _ = PC_qp9(x_validation)
y_pred_pc_qp8, _ = PC_qp8(x_validation)
y_pred_pc_qp7, _ = PC_qp7(x_validation)
y_pred_pc_qp6, _ = PC_qp6(x_validation)
y_pred_pc_qp5, _ = PC_qp5(x_validation)
y_pred_pc_qp5, _ = PC_qp5(x_validation)
y_pred_pc_qp4, _ = PC_qp4(x_validation)
y_pred_pc_qp3, _ = PC_qp3(x_validation)
y_pred_pc_q5, _ = PC_q5(x_validation)
y_pred_pc_q4, _ = PC_q4(x_validation)
y_pred_pc_q3, _ = PC_q3(x_validation)
y_pred_pc_q2, _ = PC_q2(x_validation)
y_pred_pc_q1, _ = PC_q1(x_validation)
Lh_pc_qp9 = np.zeros(n_x)
Lh_pc_qp8 = np.zeros(n_x)
Lh_pc_qp7 = np.zeros(n_x)
Lh_pc_qp6 = np.zeros(n_x)
Lh_pc_qp5 = np.zeros(n_x)
Lh_pc_qp4 = np.zeros(n_x)
Lh_pc_qp3 = np.zeros(n_x)
Lh_pc_q5 = np.zeros(n_x)
Lh_pc_q4 = np.zeros(n_x)
Lh_pc_q3 = np.zeros(n_x)
Lh_pc_q2 = np.zeros(n_x)
Lh_pc_q1 = np.zeros(n_x)
y_test = np.array(y_validation)
#LC metrics
# Reference case
PC_r = SurrogateModel('pc', corners, 10000, plabels, strategy='LS', degree=7, distributions=dists_ot)
# Surrogate cases
PC_quad = SurrogateModel('pc', corners, 4096, plabels, strategy='Quad', degree=7, distributions=dists_ot, N_quad = 5)
PC_ls1_LC = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
PC_ls2_LC = SurrogateModel('pc', corners, plabels, strategy='LS', degree=7, distributions=dists_ot)
x_quadq = PC_quad.predictor.sample
x_quadq = np.array(x_quadq)
(N_quadq,_) = np.shape(x_quadq)
x_quadq_dico = []
for i in range(N_quadq):
x_quadq_dico.append({'friction_coefficients':[{"type": "zone", "index": 0, "value": x_quadq[i,0]},{"type": "zone", "index": 1, "value": x_quadq[i,1]},{"type": "zone", "index": 2, "value": x_quadq[i,2]}],"boundary_conditions":[{"type": "discharge", "index": 0, "value": x_quadq[i,3]}]})
y_quadq = []
idx = 0
for k in range(N_quadq):
x = x_quadq[k]
print("Study quadq#"+str(idx))
Study.initialize_model()
Output = Study(x)
y_quadq.append(Output['z'])
idx+=1
# fitting of the validation samples
PC_r.fit(x_learningr,y_learningr)
PC_quad.fit(x_quadq,y_quadq)
PC_ls1_LC.fit(x_learning4,y_learning4)
PC_ls2_LC.fit(x_learning3,y_learning3)
surror = PC_r.predictor.pc_result
surroq = PC_quad.predictor.pc_result
surrols1 = PC_ls1_LC.predictor.pc_result
surrols2 = PC_ls2_LC.predictor.pc_result
# Get the coefficients : Coeffs
Surror = ot.FunctionalChaosResult(surror)
Coeffsr = Surror.getCoefficients()
Surroq = ot.FunctionalChaosResult(surroq)
Coeffsq = Surroq.getCoefficients()
Surrols1 = ot.FunctionalChaosResult(surrols1)
Coeffls1 = Surrols1.getCoefficients()
Surrols2 = ot.FunctionalChaosResult(surrols2)
Coeffls2 = Surrols2.getCoefficients()
# Compute LC metrics
g_ref = np.array(Coeffsr)
(n_pc,k) = np.shape(g_ref)
gq = np.array(Coeffsq)
gls1 = np.array(Coeffls1)
gls2 = np.array(Coeffls2)
LC_pc_q = np.zeros(n_pc)
LC_pc_ls1 = np.zeros(n_pc)
LC_pc_ls2 = np.zeros(n_pc)
g = np.zeros(n_pc)
for j in range(n_pc):
for i in range(k):
g[j]+=g_ref[j,i]
# Compute LC metrics
for j in range(n_pc):
for i in range(k):
LC_pc_q[j]+= (abs(g_ref[j,i]-gq[j,i])**2)
LC_pc_ls1[j]+= (abs(g_ref[j,i]-gls1[j,i])**2)
LC_pc_ls2[j]+= (abs(g_ref[j,i]-gls2[j,i])**2)
# compute Lh metrics
for j in range(n_x):
for i in range(N_validation):
Lh_pc_qp9[j]+= (y_test[i,j]-y_pred_pc_qp9[i,j])**2
Lh_pc_qp8[j]+= (y_test[i,j]-y_pred_pc_qp8[i,j])**2
Lh_pc_qp7[j]+= (y_test[i,j]-y_pred_pc_qp7[i,j])**2
Lh_pc_qp6[j]+= (y_test[i,j]-y_pred_pc_qp6[i,j])**2
Lh_pc_qp5[j]+= (y_test[i,j]-y_pred_pc_qp5[i,j])**2
Lh_pc_qp4[j]+= (y_test[i,j]-y_pred_pc_qp4[i,j])**2
Lh_pc_qp3[j]+= (y_test[i,j]-y_pred_pc_qp3[i,j])**2
Lh_pc_q5[j]+= (y_test[i,j]-y_pred_pc_q5[i,j])**2
Lh_pc_q4[j]+= (y_test[i,j]-y_pred_pc_q4[i,j])**2
Lh_pc_q3[j]+= (y_test[i,j]-y_pred_pc_q3[i,j])**2
Lh_pc_q3[j]+= (y_test[i,j]-y_pred_pc_q3[i,j])**2
Lh_pc_q2[j]+= (y_test[i,j]-y_pred_pc_q2[i,j])**2
Lh_pc_q1[j]+= (y_test[i,j]-y_pred_pc_q1[i,j])**2
Lh_pc_qp9 = Lh_pc_qp9/N_validation
Lh_pc_qp8 = Lh_pc_qp8/N_validation
Lh_pc_qp7 = Lh_pc_qp7/N_validation
Lh_pc_q6 = Lh_pc_qp6/N_validation
Lh_pc_qp5 = Lh_pc_qp5/N_validation
Lh_pc_qp4 = Lh_pc_qp4/N_validation
Lh_pc_qp3 = Lh_pc_qp3/N_validation
Lh_pc_q5 = Lh_pc_q5/N_validation
Lh_pc_q4 = Lh_pc_q4/N_validation
Lh_pc_q3 = Lh_pc_q3/N_validation
Lh_pc_q2 = Lh_pc_q2/N_validation
Lh_pc_q1 = Lh_pc_q1/N_validation
Lh_pc_lsp9 = np.zeros(n_x)
Lh_pc_lsp8 = np.zeros(n_x)
Lh_pc_lsp7 = np.zeros(n_x)
Lh_pc_lsp6 = np.zeros(n_x)
Lh_pc_lsp5 = np.zeros(n_x)
Lh_pc_lsp4 = np.zeros(n_x)
Lh_pc_lsp3 = np.zeros(n_x)
Lh_pc_ls5 = np.zeros(n_x)
Lh_pc_ls4 = np.zeros(n_x)
Lh_pc_ls3 = np.zeros(n_x)
Lh_pc_ls2 = np.zeros(n_x)
Lh_pc_ls1 = np.zeros(n_x)
for j in range(n_x):
for i in range(N_validation):
Lh_pc_lsp9[j]+= (y_test[i,j]-y_pred_pc_lsp9[i,j])**2
Lh_pc_lsp8[j]+= (y_test[i,j]-y_pred_pc_lsp8[i,j])**2
Lh_pc_lsp7[j]+= (y_test[i,j]-y_pred_pc_lsp7[i,j])**2
Lh_pc_lsp6[j]+= (y_test[i,j]-y_pred_pc_lsp6[i,j])**2
Lh_pc_lsp5[j]+= (y_test[i,j]-y_pred_pc_lsp5[i,j])**2
Lh_pc_lsp4[j]+= (y_test[i,j]-y_pred_pc_lsp4[i,j])**2
Lh_pc_lsp3[j]+= (y_test[i,j]-y_pred_pc_lsp3[i,j])**2
Lh_pc_ls5[j]+= (y_test[i,j]-y_pred_pc_ls5[i,j])**2
Lh_pc_ls4[j]+= (y_test[i,j]-y_pred_pc_ls4[i,j])**2
Lh_pc_ls3[j]+= (y_test[i,j]-y_pred_pc_ls3[i,j])**2
Lh_pc_ls2[j]+= (y_test[i,j]-y_pred_pc_ls2[i,j])**2
Lh_pc_ls1[j]+= (y_test[i,j]-y_pred_pc_ls1[i,j])**2
Lh_pc_lsp9 = Lh_pc_lsp9/N_validation
Lh_pc_lsp8 = Lh_pc_lsp8/N_validation
Lh_pc_lsp7 = Lh_pc_lsp7/N_validation
Lh_pc_lsp6 = Lh_pc_lsp6/N_validation
Lh_pc_lsp5 = Lh_pc_lsp5/N_validation
Lh_pc_lsp4 = Lh_pc_lsp4/N_validation
Lh_pc_lsp3 = Lh_pc_lsp3/N_validation
Lh_pc_ls5 = Lh_pc_ls5/N_validation
Lh_pc_ls4 = Lh_pc_ls4/N_validation
Lh_pc_ls3 = Lh_pc_ls3/N_validation
Lh_pc_ls2 = Lh_pc_ls2/N_validation
Lh_pc_ls1 = Lh_pc_ls1/N_validation
#Plot the LH and LC metrics
#Samling error Quad
plt.figure(1)
plt.plot(curv_abs,Lh_pc_q5, '--',label='Nquad=10000')
plt.plot(curv_abs,Lh_pc_q4, '--',label='Nquad=6561')
plt.plot(curv_abs,Lh_pc_q3, '--',label='Nquad=4096')
plt.plot(curv_abs,Lh_pc_q2, '--',label='Nquad=2401')
plt.plot(curv_abs,Lh_pc_q1, '--',label='Nquad=1296')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics LH (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_quad_Sampling.pdf')
#Truncation error Quad
plt.figure(2)
plt.plot(curv_abs,Lh_pc_qp9, '--',label='P=9')
plt.plot(curv_abs,Lh_pc_qp8, '--',label='P=8')
plt.plot(curv_abs,Lh_pc_qp7, '--',label='P=7')
plt.plot(curv_abs,Lh_pc_qp6, '--',label='P=6')
plt.plot(curv_abs,Lh_pc_qp4, '--',label='P=5')
plt.plot(curv_abs,Lh_pc_qp4, '--',label='P=4')
plt.plot(curv_abs,Lh_pc_qp3, '--',label='P=3')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics LH (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_quad_Trunc.pdf')
#Samling error LS
plt.figure(3)
plt.plot(curv_abs,Lh_pc_ls5,'--',label='N=10000')
plt.plot(curv_abs,Lh_pc_ls4,'--',label='N=6561')
plt.plot(curv_abs,Lh_pc_ls3,'--',label='N=4096')
plt.plot(curv_abs,Lh_pc_ls2,'--',label='N=2401')
plt.plot(curv_abs,Lh_pc_ls1,'--',label='N=1296')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_LS_Sampling.pdf')
#Truncation error LS
plt.figure(4)
plt.plot(curv_abs,Lh_pc_lsp9,'--',label='P=9')
plt.plot(curv_abs,Lh_pc_lsp8,'--',label='P=8')
plt.plot(curv_abs,Lh_pc_lsp7,'--',label='P=7')
plt.plot(curv_abs,Lh_pc_lsp6,'--',label='P=6')
plt.plot(curv_abs,Lh_pc_lsp5,'--',label='P=5')
plt.plot(curv_abs,Lh_pc_lsp4,'--',label='P=4')
plt.plot(curv_abs,Lh_pc_lsp3,'--',label='P=3')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_LS_Trunc.pdf')
#Coefficients erorr
plt.figure(5)
plt.figure(figsize=(30,5))
plt.plot(LC_pc_q, '--',label='Quad, N=4096')
plt.plot(LC_pc_ls1, '--',label='LS, N=6561')
plt.plot(LC_pc_ls2, '--',label='LS, N=4096')
plt.xlabel('spectrum')
plt.ylabel('Water metrics LC')
plt.title('Coefficients metrics along the Garonne river')
plt.legend()
plt.savefig('LC_Sampling.pdf')
# Log scales
plt.figure(6)
plt.semilogy()
plt.plot(curv_abs,Lh_pc_q5, '--',label='Nquad=10000')
plt.plot(curv_abs,Lh_pc_q4, '--',label='Nquad=6561')
plt.plot(curv_abs,Lh_pc_q3, '--',label='Nquad=4096')
plt.plot(curv_abs,Lh_pc_q2, '--',label='Nquad=2401')
plt.plot(curv_abs,Lh_pc_q1, '--',label='Nquad=1296')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics LH (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_quad_Sampling_log.pdf')
#Truncation error Quad
plt.figure(7)
plt.semilogy()
plt.plot(curv_abs,Lh_pc_qp9, '--',label='P=9')
plt.plot(curv_abs,Lh_pc_qp8, '--',label='P=8')
plt.plot(curv_abs,Lh_pc_qp7, '--',label='P=7')
plt.plot(curv_abs,Lh_pc_qp6, '--',label='P=6')
plt.plot(curv_abs,Lh_pc_qp5, '--',label='P=5')
plt.plot(curv_abs,Lh_pc_qp4, '--',label='P=4')
plt.plot(curv_abs,Lh_pc_qp3, '--',label='P=3')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics LH (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_quad_Trunc_log.pdf')
#Samling error LS
plt.figure(8)
plt.semilogy()
plt.plot(curv_abs,Lh_pc_ls5,'--',label='N=10000')
plt.plot(curv_abs,Lh_pc_ls4,'--',label='N=6561')
plt.plot(curv_abs,Lh_pc_ls3,'--',label='N=4096')
plt.plot(curv_abs,Lh_pc_ls2,'--',label='N=2401')
plt.plot(curv_abs,Lh_pc_ls1,'--',label='N=1296')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_LS_Sampling_log.pdf')
#Truncation error LS
plt.figure(9)
plt.semilogy()
plt.plot(curv_abs,Lh_pc_lsp9,'--',label='P=9')
plt.plot(curv_abs,Lh_pc_lsp8,'--',label='P=8')
plt.plot(curv_abs,Lh_pc_lsp7,'--',label='P=7')
plt.plot(curv_abs,Lh_pc_lsp6,'--',label='P=6')
plt.plot(curv_abs,Lh_pc_lsp5,'--',label='P=5')
plt.plot(curv_abs,Lh_pc_lsp4,'--',label='P=4')
plt.plot(curv_abs,Lh_pc_lsp3,'--',label='P=3')
plt.xlabel('curvilinear abscissa (m)')
plt.ylabel('Water metrics (m2)')
plt.title('Water metrics along the Garonne river')
plt.legend()
plt.savefig('LH_LS_Trunc_log.pdf')
#Coefficients erorr
plt.figure(10)
plt.figure(figsize=(30,5))
plt.semilogy()
plt.plot(LC_pc_q, '--',label='Quad, N=4096')
plt.plot(LC_pc_ls1, '--',label='LS, N=6561')
plt.plot(LC_pc_ls2, '--',label='LS, N=4096')
plt.xlabel('spectrum')
plt.ylabel('Water metrics LC(i)')
plt.title('Coefficients metrics along the Garonne river')
plt.legend()
plt.savefig('LC_Sampling_log.pdf')
|
# coding: utf-8
"""
Container Security APIs
All features of the Container Security are available through REST APIs.<br/>Access support information at www.qualys.com/support/<br/><br/><b>Permissions:</b><br/>User must have the Container module enabled<br/>User must have API ACCESS permission # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from qualys_cs_api.api_client import ApiClient
from qualys_cs_api.exceptions import (
ApiTypeError,
ApiValueError
)
class ContainerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_containers_using_delete(self, container_delete_request, **kwargs): # noqa: E501
"""Delete containers in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_containers_using_delete(container_delete_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ContainerDeleteRequest container_delete_request: Provide one or more container Ids or filters in the format shown under Example Value. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_containers_using_delete_with_http_info(container_delete_request, **kwargs) # noqa: E501
def delete_containers_using_delete_with_http_info(self, container_delete_request, **kwargs): # noqa: E501
"""Delete containers in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_containers_using_delete_with_http_info(container_delete_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ContainerDeleteRequest container_delete_request: Provide one or more container Ids or filters in the format shown under Example Value. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['container_delete_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_containers_using_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'container_delete_request' is set
if self.api_client.client_side_validation and ('container_delete_request' not in local_var_params or # noqa: E501
local_var_params['container_delete_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `container_delete_request` when calling `delete_containers_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'container_delete_request' in local_var_params:
body_params = local_var_params['container_delete_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_container_details_using_get(self, container_id, **kwargs): # noqa: E501
"""Show details of a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_details_using_get(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ContainerDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_container_details_using_get_with_http_info(container_id, **kwargs) # noqa: E501
def get_container_details_using_get_with_http_info(self, container_id, **kwargs): # noqa: E501
"""Show details of a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_details_using_get_with_http_info(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ContainerDetails, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['container_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_container_details_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'container_id' is set
if self.api_client.client_side_validation and ('container_id' not in local_var_params or # noqa: E501
local_var_params['container_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `container_id` when calling `get_container_details_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'container_id' in local_var_params:
path_params['containerId'] = local_var_params['container_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers/{containerId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ContainerDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_container_installed_software_details_using_get(self, container_id, **kwargs): # noqa: E501
"""Show software installed on a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_installed_software_details_using_get(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param str filter: Filter the container vulnerability details by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param bool is_drift: isDrift
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SoftwarePivotListFacade
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_container_installed_software_details_using_get_with_http_info(container_id, **kwargs) # noqa: E501
def get_container_installed_software_details_using_get_with_http_info(self, container_id, **kwargs): # noqa: E501
"""Show software installed on a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_installed_software_details_using_get_with_http_info(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param str filter: Filter the container vulnerability details by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param bool is_drift: isDrift
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SoftwarePivotListFacade, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['container_id', 'filter', 'sort', 'is_drift'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_container_installed_software_details_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'container_id' is set
if self.api_client.client_side_validation and ('container_id' not in local_var_params or # noqa: E501
local_var_params['container_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `container_id` when calling `get_container_installed_software_details_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'container_id' in local_var_params:
path_params['containerId'] = local_var_params['container_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'is_drift' in local_var_params and local_var_params['is_drift'] is not None: # noqa: E501
query_params.append(('isDrift', local_var_params['is_drift'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers/{containerId}/software', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SoftwarePivotListFacade', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_container_vuln_count_using_get(self, container_id, **kwargs): # noqa: E501
"""Show vulnerability count for a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_vuln_count_using_get(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: dict(str, int)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_container_vuln_count_using_get_with_http_info(container_id, **kwargs) # noqa: E501
def get_container_vuln_count_using_get_with_http_info(self, container_id, **kwargs): # noqa: E501
"""Show vulnerability count for a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_vuln_count_using_get_with_http_info(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(dict(str, int), status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['container_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_container_vuln_count_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'container_id' is set
if self.api_client.client_side_validation and ('container_id' not in local_var_params or # noqa: E501
local_var_params['container_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `container_id` when calling `get_container_vuln_count_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'container_id' in local_var_params:
path_params['containerId'] = local_var_params['container_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers/{containerId}/vuln/count', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, int)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_container_vuln_details_using_get(self, container_id, **kwargs): # noqa: E501
"""Show vulnerability details for a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_vuln_details_using_get(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param str filter: Filter the container vulnerability details by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str type: Specify the type of information to be fetched: Summary, Details, All.
:param bool is_drift: isDrift
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ContainerVulnResponseFacade
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_container_vuln_details_using_get_with_http_info(container_id, **kwargs) # noqa: E501
def get_container_vuln_details_using_get_with_http_info(self, container_id, **kwargs): # noqa: E501
"""Show vulnerability details for a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_container_vuln_details_using_get_with_http_info(container_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str container_id: Specify the ID or SHA value of a specific container in the user’s scope. (required)
:param str filter: Filter the container vulnerability details by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str type: Specify the type of information to be fetched: Summary, Details, All.
:param bool is_drift: isDrift
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ContainerVulnResponseFacade, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['container_id', 'filter', 'type', 'is_drift'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_container_vuln_details_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'container_id' is set
if self.api_client.client_side_validation and ('container_id' not in local_var_params or # noqa: E501
local_var_params['container_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `container_id` when calling `get_container_vuln_details_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'container_id' in local_var_params:
path_params['containerId'] = local_var_params['container_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'type' in local_var_params and local_var_params['type'] is not None: # noqa: E501
query_params.append(('type', local_var_params['type'])) # noqa: E501
if 'is_drift' in local_var_params and local_var_params['is_drift'] is not None: # noqa: E501
query_params.append(('isDrift', local_var_params['is_drift'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers/{containerId}/vuln', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ContainerVulnResponseFacade', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_containers_pivot_data_with_list_using_get(self, page_no, page_size, **kwargs): # noqa: E501
"""Show a list of containers in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_containers_pivot_data_with_list_using_get(page_no, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_no: The page to be returned. (required)
:param int page_size: The number of records per page to be included in the response. (required)
:param str filter: Filter the containers list by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PivotListResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_containers_pivot_data_with_list_using_get_with_http_info(page_no, page_size, **kwargs) # noqa: E501
def get_containers_pivot_data_with_list_using_get_with_http_info(self, page_no, page_size, **kwargs): # noqa: E501
"""Show a list of containers in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_containers_pivot_data_with_list_using_get_with_http_info(page_no, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_no: The page to be returned. (required)
:param int page_size: The number of records per page to be included in the response. (required)
:param str filter: Filter the containers list by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PivotListResponseContainer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_no', 'page_size', 'filter', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_containers_pivot_data_with_list_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_no' is set
if self.api_client.client_side_validation and ('page_no' not in local_var_params or # noqa: E501
local_var_params['page_no'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_no` when calling `get_containers_pivot_data_with_list_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if self.api_client.client_side_validation and ('page_size' not in local_var_params or # noqa: E501
local_var_params['page_size'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_size` when calling `get_containers_pivot_data_with_list_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page_no' in local_var_params and local_var_params['page_no'] is not None: # noqa: E501
query_params.append(('pageNo', local_var_params['page_no'])) # noqa: E501
if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501
query_params.append(('pageSize', local_var_params['page_size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/containers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PivotListResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
# -- Project information -----------------------------------------------------
import os
project = "Sphinx Book Theme"
copyright = "2020"
author = "the Executable Book Project"
master_doc = "index"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"sphinx_copybutton",
"sphinx_togglebutton",
"sphinxcontrib.bibtex",
"sphinx_thebe",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ablog",
"sphinxext.opengraph",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
}
nitpick_ignore = [
("py:class", "docutils.nodes.document"),
("py:class", "docutils.parsers.rst.directives.body.Sidebar"),
]
suppress_warnings = ["myst.domains", "ref.ref"]
numfig = True
myst_enable_extensions = [
"dollarmath",
# "amsmath",
"deflist",
# "html_admonition",
# "html_image",
"colon_fence",
# "smartquotes",
# "replacements",
# "linkify",
# "substitution",
]
myst_url_schemes = ("http", "https", "mailto")
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_logo = "_static/logo-wide.png"
html_title = "Sphinx Book Theme"
html_copy_source = True
html_sourcelink_suffix = ""
html_favicon = "_static/logo-square.png"
html_last_updated_fmt = ""
html_sidebars = {
"reference/blog/*": [
"sidebar-logo.html",
"search-field.html",
"postcard.html",
"recentposts.html",
"tagcloud.html",
"categories.html",
"archives.html",
"sbt-sidebar-nav.html",
"sbt-sidebar-footer.html",
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
jupyter_execute_notebooks = "cache"
thebe_config = {
"repository_url": "https://github.com/binder-examples/jupyter-stacks-datascience",
"repository_branch": "master",
}
html_theme_options = {
"theme_dev_mode": True,
"path_to_docs": "docs",
"repository_url": "https://github.com/executablebooks/sphinx-book-theme",
# "repository_branch": "gh-pages", # For testing
"launch_buttons": {
"binderhub_url": "https://mybinder.org",
# "jupyterhub_url": "https://datahub.berkeley.edu", # For testing
"colab_url": "https://colab.research.google.com/",
"notebook_interface": "jupyterlab",
"thebe": True,
},
"use_edit_page_button": True,
"use_issues_button": True,
"use_repository_button": True,
"use_download_button": True,
"logo_only": True,
# For testing
# "use_fullscreen_button": False,
# "home_page_in_toc": True,
# "single_page": True,
# "extra_footer": "<a href='https://google.com'>Test</a>", # DEPRECATED KEY
# "extra_navbar": "<a href='https://google.com'>Test</a>",
# "show_navbar_depth": 2,
}
# -- ABlog config -------------------------------------------------
blog_path = "reference/blog"
blog_post_pattern = "reference/blog/*.md"
blog_baseurl = "https://sphinx-book-theme.readthedocs.io"
fontawesome_included = True
post_auto_image = 1
post_auto_excerpt = 2
execution_show_tb = "READTHEDOCS" in os.environ
bibtex_bibfiles = ["references.bib"]
bibtex_reference_style = "author_year"
|
"""Test Main methods."""
from unittest import TestCase
from unittest.mock import MagicMock, PropertyMock, create_autospec, patch
from pyof.foundation.network_types import Ethernet
from pyof.v0x01.controller2switch.common import StatsType
from pyof.v0x04.controller2switch.common import MultipartType
from kytos.core.connection import ConnectionState
from kytos.lib.helpers import (get_connection_mock, get_kytos_event_mock,
get_switch_mock)
from napps.kytos.of_core.utils import NegotiationException
from tests.helpers import get_controller_mock
# pylint: disable=protected-access, too-many-public-methods
class TestMain(TestCase):
"""Test the Main class."""
def setUp(self):
"""Execute steps before each tests.
Set the server_name_url from kytos/of_core
"""
self.switch_v0x01 = get_switch_mock("00:00:00:00:00:00:00:01", 0x01)
self.switch_v0x04 = get_switch_mock("00:00:00:00:00:00:00:02", 0x04)
self.switch_v0x01.connection = get_connection_mock(
0x01, get_switch_mock("00:00:00:00:00:00:00:03"))
self.switch_v0x04.connection = get_connection_mock(
0x04, get_switch_mock("00:00:00:00:00:00:00:04"))
patch('kytos.core.helpers.run_on_thread', lambda x: x).start()
# pylint: disable=import-outside-toplevel
from napps.kytos.of_core.main import Main
self.addCleanup(patch.stopall)
self.napp = Main(get_controller_mock())
@patch('napps.kytos.of_core.v0x01.utils.send_echo')
@patch('napps.kytos.of_core.v0x04.utils.send_echo')
def test_execute(self, *args):
"""Test execute."""
(mock_of_core_v0x04_utils, mock_of_core_v0x01_utils) = args
self.switch_v0x01.is_connected.return_value = True
self.switch_v0x04.is_connected.return_value = True
self.napp.controller.switches = {"00:00:00:00:00:00:00:01":
self.switch_v0x01}
self.napp.execute()
mock_of_core_v0x01_utils.assert_called()
self.napp.controller.switches = {"00:00:00:00:00:00:00:01":
self.switch_v0x04}
self.napp.execute()
mock_of_core_v0x04_utils.assert_called()
@patch('napps.kytos.of_core.v0x04.utils.update_flow_list')
@patch('napps.kytos.of_core.v0x01.utils.update_flow_list')
def test_request_flow_list(self, *args):
"""Test request flow list."""
(mock_update_flow_list_v0x01, mock_update_flow_list_v0x04) = args
mock_update_flow_list_v0x04.return_value = "ABC"
self.napp._request_flow_list(self.switch_v0x01)
mock_update_flow_list_v0x01.assert_called_with(self.napp.controller,
self.switch_v0x01)
self.napp._request_flow_list(self.switch_v0x04)
mock_update_flow_list_v0x04.assert_called_with(self.napp.controller,
self.switch_v0x04)
@patch('napps.kytos.of_core.v0x04.utils.update_flow_list')
@patch('napps.kytos.of_core.v0x01.utils.update_flow_list')
def test_on_handshake_completed_request_flow_list(self, *args):
"""Test request flow list."""
(mock_update_flow_list_v0x01, mock_update_flow_list_v0x04) = args
mock_update_flow_list_v0x04.return_value = "ABC"
name = 'kytos/of_core.handshake.completed'
content = {"switch": self.switch_v0x01}
event = get_kytos_event_mock(name=name, content=content)
self.napp.on_handshake_completed_request_flow_list(event)
mock_update_flow_list_v0x01.assert_called_with(self.napp.controller,
self.switch_v0x01)
content = {"switch": self.switch_v0x04}
event = get_kytos_event_mock(name=name, content=content)
self.napp.on_handshake_completed_request_flow_list(event)
mock_update_flow_list_v0x04.assert_called_with(self.napp.controller,
self.switch_v0x04)
@patch('napps.kytos.of_core.v0x01.flow.Flow.from_of_flow_stats')
def test_handle_stats_reply(self, mock_from_of_flow_stats_v0x01):
"""Test handle stats reply."""
mock_from_of_flow_stats_v0x01.return_value = "ABC"
flow_msg = MagicMock()
flow_msg.body = "A"
flow_msg.body_type = StatsType.OFPST_FLOW
name = 'kytos/of_core.v0x01.messages.in.ofpt_stats_reply'
content = {"source": self.switch_v0x01.connection,
"message": flow_msg}
event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_stats_reply(event)
mock_from_of_flow_stats_v0x01.assert_called_with(
flow_msg.body, self.switch_v0x01.connection.switch)
desc_msg = MagicMock()
desc_msg.body = "A"
desc_msg.body_type = StatsType.OFPST_DESC
content = {"source": self.switch_v0x01.connection,
"message": desc_msg}
event = get_kytos_event_mock(name=name, content=content)
switch_update = self.switch_v0x01.connection.switch.update_description
self.napp.handle_stats_reply(event)
self.assertEqual(switch_update.call_count, 1)
@patch('napps.kytos.of_core.main.Main._handle_multipart_flow_stats')
@patch('napps.kytos.of_core.v0x04.utils.handle_port_desc')
def test_handle_multipart_reply(self, *args):
"""Test handle multipart reply."""
(mock_of_core_v0x04_utils, mock_from_of_flow_stats_v0x04) = args
flow_msg = MagicMock()
flow_msg.multipart_type = MultipartType.OFPMP_FLOW
name = 'kytos/of_core.v0x04.messages.in.ofpt_multipart_reply'
content = {"source": self.switch_v0x04.connection,
"message": flow_msg}
event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_multipart_reply(event)
mock_from_of_flow_stats_v0x04.assert_called_with(
flow_msg, self.switch_v0x04.connection.switch)
ofpmp_port_desc = MagicMock()
ofpmp_port_desc.body = "A"
ofpmp_port_desc.multipart_type = MultipartType.OFPMP_PORT_DESC
content = {"source": self.switch_v0x04.connection,
"message": ofpmp_port_desc}
event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_multipart_reply(event)
mock_of_core_v0x04_utils.assert_called_with(
self.napp.controller, self.switch_v0x04.connection.switch,
ofpmp_port_desc.body)
ofpmp_desc = MagicMock()
ofpmp_desc.body = "A"
ofpmp_desc.multipart_type = MultipartType.OFPMP_DESC
content = {"source": self.switch_v0x04.connection,
"message": ofpmp_desc}
event = get_kytos_event_mock(name=name, content=content)
switch_update = self.switch_v0x04.connection.switch.update_description
self.napp.handle_multipart_reply(event)
self.assertEqual(switch_update.call_count, 1)
@patch('kytos.core.buffers.KytosEventBuffer.put')
@patch('napps.kytos.of_core.v0x04.utils.send_set_config')
@patch('napps.kytos.of_core.v0x01.utils.send_set_config')
@patch('napps.kytos.of_core.v0x04.utils.send_desc_request')
@patch('napps.kytos.of_core.v0x01.utils.send_desc_request')
@patch('napps.kytos.of_core.v0x04.utils.handle_features_reply')
@patch('napps.kytos.of_core.v0x01.utils.handle_features_reply')
def test_handle_features_reply(self, *args):
"""Test handle features reply."""
(mock_freply_v0x01, mock_freply_v0x04, mock_send_desc_request_v0x01,
mock_send_desc_request_v0x04, mock_send_set_config_v0x01,
mock_send_set_config_v0x04, mock_buffers_put) = args
mock_freply_v0x01.return_value = self.switch_v0x01.connection.switch
mock_freply_v0x04.return_value = self.switch_v0x04.connection.switch
self.switch_v0x01.connection.state = ConnectionState.SETUP
self.switch_v0x01.connection.protocol.state = 'waiting_features_reply'
name = 'kytos/of_core.v0x0[14].messages.in.ofpt_features_reply'
content = {"source": self.switch_v0x01.connection}
event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_features_reply(event)
mock_freply_v0x01.assert_called_with(self.napp.controller, event)
mock_send_desc_request_v0x01.assert_called_with(
self.napp.controller, self.switch_v0x01.connection.switch)
mock_send_set_config_v0x01.assert_called_with(
self.napp.controller, self.switch_v0x01.connection.switch)
self.switch_v0x04.connection.state = ConnectionState.SETUP
self.switch_v0x04.connection.protocol.state = 'waiting_features_reply'
content = {"source": self.switch_v0x04.connection}
event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_features_reply(event)
mock_freply_v0x04.assert_called_with(self.napp.controller, event)
mock_send_desc_request_v0x04.assert_called_with(
self.napp.controller, self.switch_v0x04.connection.switch)
mock_send_set_config_v0x04.assert_called_with(
self.napp.controller, self.switch_v0x04.connection.switch)
mock_buffers_put.assert_called()
@patch('napps.kytos.of_core.main.Main._update_switch_flows')
@patch('napps.kytos.of_core.v0x04.flow.Flow.from_of_flow_stats')
@patch('napps.kytos.of_core.main.Main._is_multipart_reply_ours')
def test_handle_multipart_flow_stats(self, *args):
"""Test handle multipart flow stats."""
(mock_is_multipart_reply_ours, mock_from_of_flow_stats_v0x01,
mock_update_switch_flows) = args
mock_is_multipart_reply_ours.return_value = True
mock_from_of_flow_stats_v0x01.return_value = "ABC"
flow_msg = MagicMock()
flow_msg.body = "A"
flow_msg.flags.value = 2
flow_msg.body_type = StatsType.OFPST_FLOW
self.napp._handle_multipart_flow_stats(flow_msg, self.switch_v0x04)
mock_is_multipart_reply_ours.assert_called_with(flow_msg,
self.switch_v0x04,
'flows')
mock_from_of_flow_stats_v0x01.assert_called_with(flow_msg.body,
self.switch_v0x04)
mock_update_switch_flows.assert_called_with(self.switch_v0x04)
def test_update_switch_flows(self):
"""Test update_switch_flows."""
dpid = '00:00:00:00:00:00:00:01'
mock_switch = get_switch_mock(dpid)
mock_switch.id = dpid
self.napp._multipart_replies_flows = {dpid: mock_switch}
self.napp._multipart_replies_xids = {dpid: {'flows': mock_switch}}
self.napp._update_switch_flows(mock_switch)
self.assertEqual(self.napp._multipart_replies_xids, {dpid: {}})
self.assertEqual(self.napp._multipart_replies_flows, {})
def test_is_multipart_reply_ours(self):
"""Test _is_multipart_reply_ours."""
dpid_a = '00:00:00:00:00:00:00:01'
dpid_b = '00:00:00:00:00:00:00:02'
mock_switch = get_switch_mock(dpid_a)
mock_reply = MagicMock()
mock_reply.header.xid = mock_switch
type(mock_switch).id = PropertyMock(side_effect=[dpid_a,
dpid_a, dpid_b])
self.napp._multipart_replies_xids = {dpid_a: {'flows': mock_switch}}
response = self.napp._is_multipart_reply_ours(
mock_reply, mock_switch, 'flows')
self.assertEqual(response, True)
response = self.napp._is_multipart_reply_ours(
mock_reply, mock_switch, 'flows')
self.assertEqual(response, False)
@patch('napps.kytos.of_core.main.of_slicer')
@patch('napps.kytos.of_core.main.Main._negotiate')
@patch('napps.kytos.of_core.main.Main.emit_message_in')
def test_handle_raw_in(self, *args):
"""Test handle_raw_in."""
(mock_emit_message_in, mock_negotiate, mock_of_slicer) = args
mock_packets = MagicMock()
mock_data = MagicMock()
mock_connection = MagicMock()
mock_connection.is_new.side_effect = [True, False, True, False]
mock_connection.is_during_setup.return_value = False
mock_of_slicer.return_value = [[mock_packets, mock_packets], b'']
name = 'kytos/core.openflow.raw.in'
content = {'source': mock_connection, 'new_data': mock_data}
mock_event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_raw_in(mock_event)
mock_negotiate.assert_called()
mock_emit_message_in.assert_called()
# Test Fail
mock_negotiate.side_effect = NegotiationException('Foo')
self.napp.handle_raw_in(mock_event)
self.assertEqual(mock_connection.close.call_count, 1)
mock_connection.close.call_count = 0
mock_connection.protocol.unpack.side_effect = AttributeError()
self.napp.handle_raw_in(mock_event)
self.assertEqual(mock_connection.close.call_count, 1)
@patch('napps.kytos.of_core.main.Main._new_port_stats')
@patch('napps.kytos.of_core.main.Main._is_multipart_reply_ours')
def test_handle_multipart_port_stats(self, *args):
"""Test handle multipart flow stats."""
(mock_is_multipart_reply_ours,
mock_new_port_stats) = args
mock_is_multipart_reply_ours.return_value = True
port_stats_msg = MagicMock()
port_stats_msg.body = "A"
port_stats_msg.flags.value = 2
port_stats_msg.multipart_type = MultipartType.OFPMP_PORT_STATS
self.napp._handle_multipart_port_stats(port_stats_msg,
self.switch_v0x04)
mock_is_multipart_reply_ours.assert_called_with(port_stats_msg,
self.switch_v0x04,
'ports')
mock_new_port_stats.assert_called_with(self.switch_v0x04)
@patch('napps.kytos.of_core.main.Main.update_port_status')
@patch('napps.kytos.of_core.main.Main.update_links')
def test_emit_message_in(self, *args):
"""Test emit_message_in."""
(mock_update_links, mock_update_port_status) = args
mock_port_connection = MagicMock()
msg_port_mock = MagicMock()
msg_port_mock.header.message_type.name = 'ofpt_port_status'
mock_port_connection.side_effect = True
self.napp.emit_message_in(mock_port_connection,
msg_port_mock)
mock_update_port_status.assert_called_with(msg_port_mock,
mock_port_connection)
mock_packet_in_connection = MagicMock()
msg_packet_in_mock = MagicMock()
mock_packet_in_connection.side_effect = True
msg_packet_in_mock.header.message_type.name = 'ofpt_packet_in'
self.napp.emit_message_in(mock_packet_in_connection,
msg_packet_in_mock)
mock_update_links.assert_called_with(msg_packet_in_mock,
mock_packet_in_connection)
@patch('napps.kytos.of_core.main.emit_message_out')
def test_emit_message_out(self, mock_emit_message_out):
"""Test emit message_out."""
mock_connection = MagicMock()
mock_message = MagicMock()
mock_connection.is_alive.return_value = True
self.napp.emit_message_out(mock_connection, mock_message)
mock_emit_message_out.assert_called()
@patch('pyof.utils.v0x04.symmetric.echo_reply.EchoReply')
@patch('napps.kytos.of_core.main.Main.emit_message_out')
def test_handle_echo_request(self, *args):
"""Test handle echo request messages."""
(mock_emit_message_out, mock_echo_reply) = args
mock_event = MagicMock()
mock_echo_request = MagicMock()
mock_echo_reply.return_value = "A"
mock_echo_request.header.xid = "A"
mock_echo_request.data = "A"
mock_event.source.protocol.version = 4
mock_event.message = mock_echo_request
self.napp.handle_echo_request(mock_event)
mock_echo_reply.assert_called_with(xid=mock_echo_request.header.xid,
data=mock_echo_request.data)
mock_emit_message_out.assert_called_with(mock_event.source, "A")
@patch('napps.kytos.of_core.main.Main.send_features_request')
@patch('napps.kytos.of_core.v0x04.utils.say_hello')
@patch('napps.kytos.of_core.main._get_version_from_bitmask')
@patch('napps.kytos.of_core.main._get_version_from_header')
def test_negotiate(self, *args):
"""Test negotiate."""
(mock_version_header, mock_version_bitmask, mock_say_hello,
mock_features_request) = args
mock_version_header.return_value = 4
mock_version_bitmask.side_effect = [4, None]
mock_connection = MagicMock()
mock_message = MagicMock()
type(mock_message).versions = PropertyMock(side_effect=[4, 4, 4,
False])
self.napp._negotiate(mock_connection, mock_message)
mock_version_bitmask.assert_called_with(mock_message.versions)
mock_say_hello.assert_called_with(self.napp.controller,
mock_connection)
mock_features_request.assert_called_with(mock_connection)
self.napp._negotiate(mock_connection, mock_message)
mock_say_hello.assert_called_with(self.napp.controller,
mock_connection)
mock_features_request.assert_called_with(mock_connection)
# Test Fail
with self.assertRaises(NegotiationException):
type(mock_message).versions = PropertyMock(return_value=[4])
self.napp._negotiate(mock_connection, mock_message)
@patch('pyof.utils.v0x04.asynchronous.error_msg.ErrorMsg')
@patch('napps.kytos.of_core.main.Main.emit_message_out')
@patch('kytos.core.buffers.KytosEventBuffer.put')
def tests_fail_negotiation(self, *args):
"""Test fail_negotiation."""
(mock_event_buffer, mock_emit_message_out,
mock_error_msg) = args
mock_connection = MagicMock()
mock_message = MagicMock()
mock_connection.id = "A"
mock_message.side_effect = 4
self.napp.fail_negotiation(mock_connection, mock_message)
mock_event_buffer.assert_called()
mock_emit_message_out.assert_called_with(mock_connection,
mock_error_msg.return_value)
@patch('napps.kytos.of_core.settings.SEND_FEATURES_REQUEST_ON_ECHO')
@patch('napps.kytos.of_core.main.Main.send_features_request')
def test_handle_queued_openflow_echo_reply(self, *args):
"""Test handle queued OpenFlow echo reply messages."""
(mock_send_features_request, mock_settings) = args
mock_settings.return_value = True
mock_event = MagicMock()
self.napp.handle_queued_openflow_echo_reply(mock_event)
mock_send_features_request.assert_called_with(mock_event.destination)
@patch('pyof.utils.v0x04.controller2switch.'
'features_request.FeaturesRequest')
@patch('napps.kytos.of_core.main.Main.emit_message_out')
def test_send_features_request(self, *args):
"""Test send send_features_request."""
(mock_emit_message_out, mock_features_request) = args
mock_destination = MagicMock()
mock_destination.protocol.version = 4
mock_features_request.return_value = "A"
self.napp.send_features_request(mock_destination)
mock_features_request.assert_called()
mock_emit_message_out.assert_called_with(mock_destination, "A")
def test_handle_features_request_sent(self):
"""Test tests_handle_features_request_sent."""
mock_protocol = MagicMock()
mock_protocol.protocol.state = 'sending_features'
expected = 'waiting_features_reply'
name = 'kytos/of_core.v0x0[14].messages.out.ofpt_features_request'
content = {'destination': mock_protocol}
mock_event = get_kytos_event_mock(name=name, content=content)
self.napp.handle_features_request_sent(mock_event)
self.assertEqual(mock_event.destination.protocol.state, expected)
def test_handle_openflow_in_hello_failed(self):
"""Test handle_openflow_in_hello_failed."""
mock_destination = MagicMock()
content = {'destination': mock_destination}
mock_event = get_kytos_event_mock(name='kytos/of_core',
content=content)
self.napp.handle_openflow_in_hello_failed(mock_event)
self.assertEqual(mock_event.destination.close.call_count, 1)
@patch('napps.kytos.of_core.main.log')
def test_shutdown(self, mock_log):
"""Test shutdown."""
self.napp.shutdown()
self.assertEqual(mock_log.debug.call_count, 1)
@patch('kytos.core.buffers.KytosEventBuffer.put')
@patch('napps.kytos.of_core.main.Ethernet')
def test_update_links(self, *args):
"""Test update_links."""
(mock_ethernet, mock_buffer_put) = args
ethernet = create_autospec(Ethernet)
ethernet.ether_type = "A"
mock_ethernet.side_effect = ethernet
mock_message = MagicMock()
mock_s = MagicMock()
mock_s.switch.get_interface_by_port_no.side_effect = [AttributeError(),
True]
self.napp.update_links(mock_message, mock_s)
mock_ethernet.assert_called()
mock_buffer_put.assert_called()
@patch('kytos.core.buffers.KytosEventBuffer.put')
def test_send_specific_port_mod(self, mock_buffer_put):
"""Test send specific port."""
mock_port = MagicMock()
mock_interface = MagicMock()
type(mock_port.state).value = PropertyMock(side_effect=[0, 1, 2])
current_state = 0
self.napp._send_specific_port_mod(mock_port,
mock_interface, current_state)
mock_buffer_put.assert_called()
current_state = 1
self.napp._send_specific_port_mod(mock_port,
mock_interface, current_state)
mock_buffer_put.assert_called()
current_state = 2
self.napp._send_specific_port_mod(mock_port,
mock_interface, current_state)
mock_buffer_put.assert_called()
@patch('kytos.core.buffers.KytosEventBuffer.put')
@patch('napps.kytos.of_core.main.Interface')
@patch('napps.kytos.of_core.main.Main._send_specific_port_mod')
def test_update_port_status(self, *args):
"""Test update_port_status."""
(mock_port_mod, mock_interface, mock_buffer_put) = args
mock_port_status = MagicMock()
mock_source = MagicMock()
mock_port_status.reason.value.side_effect = [0, 1, 2]
mock_port_status.reason.enum_ref(0).name = 'OFPPR_ADD'
self.napp.update_port_status(mock_port_status, mock_source)
mock_interface.assert_called()
# check OFPRR_MODIFY
mock_port_status.reason.enum_ref(1).name = 'OFPPR_MODIFY'
mock_source.switch.get_interface_by_port_no.return_value = False
self.napp.update_port_status(mock_port_status, mock_source)
mock_port_mod.assert_called()
mock_buffer_put.assert_called()
mock_source.switch.get_interface_by_port_no.return_value = MagicMock()
self.napp.update_port_status(mock_port_status, mock_source)
mock_port_mod.assert_called()
mock_buffer_put.assert_called()
# check OFPRR_DELETE
mock_port_status.reason.enum_ref(2).name = 'OFPPR_DELETE'
self.napp.update_port_status(mock_port_status, mock_source)
mock_port_mod.assert_called()
mock_buffer_put.assert_called()
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import ECM
def test_ECM_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
autoclip=dict(
argstr="-autoclip",
),
automask=dict(
argstr="-automask",
),
environ=dict(
nohash=True,
usedefault=True,
),
eps=dict(
argstr="-eps %f",
),
fecm=dict(
argstr="-fecm",
),
full=dict(
argstr="-full",
),
in_file=dict(
argstr="%s",
copyfile=False,
extensions=None,
mandatory=True,
position=-1,
),
mask=dict(
argstr="-mask %s",
extensions=None,
),
max_iter=dict(
argstr="-max_iter %d",
),
memory=dict(
argstr="-memory %f",
),
num_threads=dict(
nohash=True,
usedefault=True,
),
out_file=dict(
argstr="-prefix %s",
extensions=None,
name_source=["in_file"],
name_template="%s_afni",
),
outputtype=dict(),
polort=dict(
argstr="-polort %d",
),
scale=dict(
argstr="-scale %f",
),
shift=dict(
argstr="-shift %f",
),
sparsity=dict(
argstr="-sparsity %f",
),
thresh=dict(
argstr="-thresh %f",
),
)
inputs = ECM.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ECM_outputs():
output_map = dict(
out_file=dict(
extensions=None,
),
)
outputs = ECM.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
#!/usr/bin/python
from math import pi, cos, sin, sqrt, acos
from sys import exit
def tensprod(T1, T2):
# M3 = tensprod(M1, M2)
#
#Tensor product
#M1, M2 = [[x_ss.. ],[ y_ps..],[z_ts..]]
#
#Othe seccond input can have multiple columns??
#
c = len( T1)
v = len( T1[0])
Tx = []
Ty = []
Tz = []
#print '-'*35
#for i in range( len( T2)):
# print T2[i]
#for i in range( c):
# print T1[i]
#print T1[0][0]*T2[0][0] + T1[0][2]*T2[2][0]
if len( T2) <> v:
print('Tensors length')
pT = [ [], [], []]
else:
for j in range( len( T2[0])):
sx = 0
sy = 0
sz = 0
for i in [0,1,2]:
#print T1[0][i],'*', T2[0][j], '+',
sx += T1[0][i] * T2[i][j]
sy += T1[1][i] * T2[i][j]
sz += T1[2][i] * T2[i][j]
#print ' .'
Tx.append( sx)
Ty.append( sy)
Tz.append( sz)
pT = [ Tx, Ty, Tz]
return pT
def raiz( value):
return sqrt( value)
def arcos( value):
return acos( value)
def rotate( t2r, angle, axis = 'x'):
R = [[ 1 , 0 , 0 ],
[ 0 , 1 , 0 ],
[ 0 , 0 , 1 ]]
if axis == 'x':
R = [[ 1 , 0 , 0 ],
[ 0 , cos(angle),-sin(angle)],
[ 0 , sin(angle), cos(angle)]]
elif axis == 'y':
R = [[ cos(angle), 0 , sin(angle)],
[ 0 , 1 , 0 ],
[-sin(angle), 0 , cos(angle)]]
elif axis == 'z':
R = [[ cos(angle), -sin(angle), 0 ],
[ sin(angle), cos(angle), 0 ],
[ 0 , 0 , 1 ]]
else:
print 'Error! invalid axis : {}'.format( axis)
return tensprod( R, t2r)
if __name__ == '__main__':
tensorvec = [ 4.04659, 2.63083, 68.36656, 0.00000, 0.00000, -0.73454, 0.00000, -1.45599, -0.54283]
tensorvec = [ x_y_z*10 for x_y_z in tensorvec ]
Ar = [[0,0,0],[0,0,0],[0,0,0]]
k = 0
index = range(3)
for i in range(3):
Ar[i][i] = tensorvec[i]
print Ar
for j in range(3):#.remove(i):
if i <> j:
#
Ar[i][j] = tensorvec[k+3]
k += 1
print Ar
#Ar = [ [4.0465, 0, 0],[-0.73454, 2.63083, 0], [-1.45599, -0.54283, 68.36656 ]]
#A = [ 4.0465, -0.73454, -1.45599, 0, 2.63083, -0.54283, 0, 0, 68.36656 ]
a_tor_y = -acos( (Ar[0][0])/(sqrt(Ar[0][0]*Ar[0][0]+Ar[2][0]*Ar[2][0])) )
#Ry = [cos(a_tor_y) 0 sin(a_tor_y); 0 1 0 ; -sin(a_tor_y) 0 cos(a_tor_y)]
#Ry = [[cos(a_tor_y), 0, sin(a_tor_y)],[0, 1, 0], [-sin(a_tor_y), 0, cos(a_tor_y)]]
#Ar = tensprod( Ry, Ar)
Ar = rotate( Ar, a_tor_y, 'y')
a_tor_z = acos( (Ar[0][0])/(sqrt(Ar[0][0]*Ar[0][0]+Ar[1][0]*Ar[1][0])) )
#Rz = [cos(a_tor_z) -sin(a_tor_z) 0; sin(a_tor_z) cos(a_tor_z) 0; 0 0 1];
#Rz = [[ cos(a_tor_z), -sin(a_tor_z), 0], [sin(a_tor_z), cos(a_tor_z), 0], [0, 0, 1]]
#Ar = tensprod( Rz, Ar)
Ar = rotate( Ar, a_tor_z, 'z')
a_tor_x = acos( Ar[1][1]/( sqrt( Ar[1][1]*Ar[1][1] + Ar[2][1]*Ar[2][1])) )
#Rx = [[1, 0, 0], [0, cos(a_tor_x), -sin(a_tor_x)], [0, sin(a_tor_x), cos(a_tor_x)]]
#Rx = [1 0 0; 0 cos(a_tor_x) -sin(a_tor_x); 0 sin(a_tor_x) cos(a_tor_x)];
Ar = rotate( Ar, a_tor_x)
for i in range( len( Ar)):
print Ar[i]
# vim:tw=80
|
# qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += SWAP(2,0) # number=8
prog += SWAP(2,0) # number=9
prog += Y(2) # number=10
prog += CNOT(3,0) # number=11
prog += CNOT(3,0) # number=12
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil611.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
from rest_framework import serializers
from .models import Client, CreditCard, Employee, Account, Insurance, PreapprovalRequest, Promotion, Report
class ClientSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = "__all__"
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = Account
fields = "__all__"
class PromotionSerializer(serializers.ModelSerializer):
class Meta:
model = Promotion
fields = "__all__"
class InsuranceSerializer(serializers.ModelSerializer):
class Meta:
model = Insurance
fields = "__all__"
class CreditCardSerializerWithDepth(serializers.ModelSerializer):
"""
This has depth enabled so it is easy to fetch credit cards
with all the information related to the promotions or
insurances associated to each card.
"""
class Meta:
model = CreditCard
fields = "__all__"
depth = 1
class CreditCardSerializer(serializers.ModelSerializer):
"""
This serializer has depth disabled so it is easy to write
new credit cards associated to promotions or insurances
"""
class Meta:
model = CreditCard
fields = "__all__"
class PreapprovalRequestSerializerWithDepth(serializers.ModelSerializer):
"""
This has depth enabled so it is easy to fetch preapproval
request with all the information related to the client,
credit card and employee associated to each card.
"""
class Meta:
model = PreapprovalRequest
fields = "__all__"
depth = 1
class PreapprovalRequestSerializer(serializers.ModelSerializer):
class Meta:
model = PreapprovalRequest
fields = "__all__"
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Report
fields = "__all__"
class ReportSerializerWithDepth(serializers.ModelSerializer):
class Meta:
model = Report
fields = "__all__"
depth = 2
|
l = []
for i in range(9):
l.append([int(input()), i+1])
l.sort()
print(l[-1][0])
print(l[-1][1])
|
import RPi.GPIO as GPIO
import time
from config import THRESHOLD
GPIO_TRIGGER = 14
GPIO_ECHO = 15
def getDistance():
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = ((TimeElapsed * 34300) / 2) - THRESHOLD
GPIO.cleanup()
print ("Measured Distance = %.1f cm" % distance)
return distance
if __name__ == '__main__':
getDistance()
|
from banklite.dtypes import (
BaseAccount,
ReserveAccount,
CheckingAccount,
SavingsAccount,
)
def test_base_account():
acct = BaseAccount("aaaa", "bbbb")
assert acct.account_id == "aaaa"
assert acct.customer_id == "bbbb"
def test_reserve_account():
acct = ReserveAccount()
assert acct.account_id == "0000000001"
assert acct.customer_id is None
assert acct.rate == 0.0
assert acct.type == 0
def test_checking_account():
acct = CheckingAccount(account_id="aaaa", customer_id="bbbb")
assert acct.account_id == "aaaa"
assert acct.customer_id == "bbbb"
assert acct.rate == 0.0
assert acct.type == 1
def test_savings_account():
acct = SavingsAccount(account_id="aaaa", customer_id="bbbb", rate=0.03)
assert acct.account_id == "aaaa"
assert acct.customer_id == "bbbb"
assert acct.rate == 0.03
assert acct.type == 2
|
from typing import Callable, Dict, List, Tuple, Union
import mlrun
from mlrun.artifacts import Artifact
from mlrun.frameworks._common.loggers import MLRunLogger, TrackableType
from mlrun.frameworks.pytorch.callbacks.logging_callback import LoggingCallback
from mlrun.frameworks.pytorch.model_handler import PyTorchModelHandler
class MLRunLoggingCallback(LoggingCallback):
"""
Callback for logging data during training / validation via mlrun's context. Each tracked hyperparameter and metrics
results will be logged per epoch and at the end of the run the model will be saved and logged as well. Some plots
will be available as well. To summerize, the available data in mlrun will be:
* For each epoch:
* Tracking table: epoch, static hyperparameters, dynamic hyperparameters, training metrics, validation metrics.
* Per iteration (batch) chart artifacts for the training and validation metrics.
* At the end of the run:
* Per epoch chart artifacts for the validation summaries and dynamic hyperparameters.
* Model is logged with all of the files and artifacts.
All the collected data will be available in this callback post the training / validation process and can be accessed
via the 'training_results', 'validation_results', 'static_hyperparameters', 'dynamic_hyperparameters' and
'summaries' properties.
"""
def __init__(
self,
context: mlrun.MLClientCtx,
custom_objects_map: Union[Dict[str, Union[str, List[str]]], str],
custom_objects_directory: str,
log_model_labels: Dict[str, TrackableType] = None,
log_model_parameters: Dict[str, TrackableType] = None,
log_model_extra_data: Dict[str, Union[TrackableType, Artifact]] = None,
dynamic_hyperparameters: Dict[
str, Tuple[str, Union[List[Union[str, int]], Callable[[], TrackableType]]]
] = None,
static_hyperparameters: Dict[
str, Union[TrackableType, Tuple[str, List[Union[str, int]]]]
] = None,
auto_log: bool = False,
):
"""
Initialize an mlrun logging callback with the given hyperparameters and logging configurations.
:param context: MLRun context to log to. Its parameters will be logged automatically if
'auto_log' is True.
:param custom_objects_map: A dictionary of all the custom objects required for loading the model. Each key
is a path to a python file and its value is the custom object name to import
from it. If multiple objects needed to be imported from the same py file a list
can be given. The map can be passed as a path to a json file as well. For
example:
{
"/.../custom_optimizer.py": "optimizer",
"/.../custom_layers.py": ["layer1", "layer2"]
}
All the paths will be accessed from the given 'custom_objects_directory',
meaning each py file will be read from 'custom_objects_directory/<MAP VALUE>'.
If the model path given is of a store object, the custom objects map will be
read from the logged custom object map artifact of the model.
Notice: The custom objects will be imported in the order they came in this
dictionary (or json). If a custom object is depended on another, make sure to
put it below the one it relies on.
:param custom_objects_directory: Path to the directory with all the python files required for the custom
objects. Can be passed as a zip file as well (will be extracted during the run
before loading the model). If the model path given is of a store object, the
custom objects files will be read from the logged custom object artifact of the
model.
:param log_model_labels: Labels to log with the model.
:param log_model_parameters: Parameters to log with the model.
:param log_model_extra_data: Extra data to log with the model.
:param dynamic_hyperparameters: If needed to track a hyperparameter dynamically (sample it each epoch) it
should be passed here. The parameter expects a dictionary where the keys are
the hyperparameter chosen names and the values are tuples of object key and a
list with the key chain. A key chain is a list of keys and indices to know how
to access the needed hyperparameter. If the hyperparameter is not of accessible
from any of the HyperparametersKeys, a custom callable method can be passed in
the tuple instead of the key chain when providing the word
HyperparametersKeys.CUSTOM. For example, to track the 'lr' attribute of
an optimizer and a custom parameter, one should pass:
{
"lr": (HyperparametersKeys.OPTIMIZER, ["param_groups", 0, "lr"]),
"custom parameter": (HyperparametersKeys.CUSTOM, get_custom_parameter)
}
:param static_hyperparameters: If needed to track a hyperparameter one time per run it should be passed here.
The parameter expects a dictionary where the keys are the
hyperparameter chosen names and the values are the hyperparameter static value
or a tuple of object key and a list with the key chain just like the dynamic
hyperparameter. For example, to track the 'epochs' of an experiment run, one
should pass:
{
"epochs": 7
}
:param auto_log: Whether or not to enable auto logging for logging the context parameters and
trying to track common static and dynamic hyperparameters.
"""
super(MLRunLoggingCallback, self).__init__(
dynamic_hyperparameters=dynamic_hyperparameters,
static_hyperparameters=static_hyperparameters,
auto_log=auto_log,
)
# Replace the logger with an MLRunLogger:
del self._logger
self._logger = MLRunLogger(
context=context,
log_model_labels=log_model_labels,
log_model_parameters=log_model_parameters,
log_model_extra_data=log_model_extra_data,
)
# Store the additional PyTorchModelHandler parameters for logging the model later:
self._custom_objects_map = custom_objects_map
self._custom_objects_directory = custom_objects_directory
def on_run_end(self):
"""
Before the run ends, this method will be called to log the model and the run summaries charts.
"""
model = self._objects[self._ObjectKeys.MODEL]
self._logger.log_run(
model_handler=PyTorchModelHandler(
model_name=type(model).__name__,
custom_objects_map=self._custom_objects_map,
custom_objects_directory=self._custom_objects_directory,
model=model,
)
)
def on_epoch_end(self, epoch: int):
"""
Before the given epoch ends, this method will be called to log the dynamic hyperparameters and results of this
epoch via the stored context.
:param epoch: The epoch that has just ended.
"""
super(MLRunLoggingCallback, self).on_epoch_end(epoch=epoch)
# Create child context to hold the current epoch's results:
self._logger.log_epoch_to_context(epoch=epoch)
|
# M2Crypto is not supported on python3
from jumpscale import j
JSBASE = j.application.jsbase_get_class()
class Empty(JSBASE):
def __init__(self):
JSBASE.__init__(self)
# from jumpscale import j
#
# # from OpenSSL import crypto
# import os
# import M2Crypto as m2c
#
# # PASSWD="apasswd_now2easy"
#
#
# def empty_callback():
# return None
#
# # howto used from http://e1ven.com/2011/04/06/how-to-use-m2crypto-tutorial/
#
#
# class SSL:
#
# def __init__(self):
# self.__imports__ = "M2Crypto"
#
# def getSSLHandler(self, keyvaluestor=None):
# """
# default keyvaluestor=j.data.kvs.getFSStore("sslkeys", serializers=[]) #make sure to use no serializers
# pass another keyvaluestor if required (first do 'import JumpscaleLib.JumpscaleLib.data.key_value_store')
# """
# if keyvaluestor is None:
# keyvaluestor = j.data.kvs.getFSStore("sslkeys", serializers=[])
# return KeyStor(keyvaluestor)
#
#
# class KeyStor:
#
# def __init__(self, keyvaluestor=None):
# self.keys = {}
# self.db = keyvaluestor
#
# def createKeyPair(self, organization="", user="", path=""):
# """
# creates keypairs & stores in localdb
# @return (priv,pub) keys
# """
# m2c.Rand.rand_seed(os.urandom(1024))
# # print "Generating a 1024 bit private/public key pair ..."
# # If you don't like the default M2Crypto ASCII "progress" bar it makes when generating keys, you can use:
# # You can change the key size, though key lengths < 1024 are considered insecure
# # The larger the key size the longer it will take to generate the key and the larger the signature will be when signing
# # You should probably leave the public exponent at 65537 (http://en.wikipedia.org/wiki/Rsa#Key_generation_2)
# keys = m2c.RSA.gen_key(1024, 65537, empty_callback)
#
# # Save Alice's private key
# # The 'None' tells it to save the private key in an unencrypted format
# # For best security practices, you'd use:
# # That would cause the private key to be saved in an encrypted format
# # Python would ask you to enter a password to use to encrypt the key file
# # For a demo script though it's easier/quicker to just use 'None' :)
# path = j.tools.path.get(path)
# if path:
# path.makedirs_p()
# p1 = path.joinpath("priv.pem")
# p2 = path.joinpath("pub.pem")
# else:
# p1 = '/tmp/_key_%s' % j.data.idgenerator.generateGUID()
# p2 = '/tmp/_key_%s' % j.data.idgenerator.generateGUID()
#
# keys.save_key(p1, None)
# keys.save_pub_key(p2)
#
# priv = p1.text()
# pub = p2.text()
#
# if path:
# p1.remove_p()
# p2.remove_p()
#
# self.db.set(organization, "private_%s" % user, priv)
# self.db.set(organization, "public_%s" % user, pub)
#
# return (priv, pub)
#
# def _getKey(self, organization, user, cat, returnAsString=False, keyoverrule=""):
# cachekey = "%s_%s_%s" % (organization, user, cat)
# if cachekey in self.keys:
# if returnAsString:
# return self.keys[cachekey].as_pem()
# else:
# return self.keys[cachekey]
# p1 = j.tools.path.get('/tmp/_key_%s' % j.data.idgenerator.generateGUID())
# if keyoverrule:
# key = keyoverrule
# else:
# key = self.db.get(organization, "%s_%s" % (cat, user))
# if returnAsString:
# return key
# p1.write_text(key)
# try:
# if cat == "public":
# key = m2c.RSA.load_pub_key(p1)
# else:
# key = m2c.RSA.load_key(p1, empty_callback)
# except BaseException:
# raise j.exceptions.RuntimeError("Cannot load key:%s" % cachekey)
# p1.remove_p()
# self.keys[cachekey] = key
# return key
#
# def getPrivKey(self, organization, user):
# key = self._getKey(organization, user, "private")
# return key
#
# def getPubKey(self, organization, user, returnAsString=False, pubkeyReader=""):
# key = self._getKey(organization, user, "public", returnAsString, keyoverrule=pubkeyReader)
# return key
#
# def setPubKey(self, organization, user, pemstr):
# key = self.db.set(organization, "%s_%s" % ("public", user), pemstr)
#
# def test(self):
# """
# """
# org = "myorg.com"
# self.createKeyPair(org, "alice")
# self.createKeyPair(org, "bob")
# msg, signature = self.encrypt(org, "alice", "bob", "this is a test message.")
# print("msg")
# print(msg)
# print("signature")
# print(signature)
# print("decrypt")
# print((self.decrypt(org, "alice", "bob", msg, signature)))
#
# def perftest(self, nrrounds=1000, sign=True):
# start = time.time()
# org = "myorg.com"
# print(("\n\nstart perftest for encryption, nrrounds:%s" % nrrounds))
# for i in range(nrrounds):
# msg, signature = self.encrypt(org, "alice", "bob", "this is a test message.", sign=sign)
# self.decrypt(org, "alice", "bob", msg, signature)
# stop = time.time()
# nritems = nrrounds / (stop - start)
# #print(("nrrounds items per sec: %s" % nritems))
#
# def encrypt(self, orgsender, sender, orgreader, reader, message, sign=True, base64=True, pubkeyReader=""):
# """
# @param sender, name of person sending
# @param name of person reading
# @return encryptedtext,signature
# """
# # print "encrypt org:%s for:%s from:%s
# # message:%s"%(organization,reader,sender,message)
#
# # Alice wants to send a message to reader, which only reader will be able to decrypt
# # Step 1, load reader's public key
# WriteRSA = self.getPubKey(orgreader, reader, pubkeyReader=pubkeyReader)
#
# # Step 2, encrypt the message using that public key
# # Only reader's private key can decrypt a message encrypted using reader's
# # public key
# CipherText = WriteRSA.public_encrypt(message, m2c.RSA.pkcs1_oaep_padding)
# if base64:
# CipherText2 = CipherText.encode('base64')
# else:
# CipherText2 = CipherText
#
# if sign:
# # Generate a signature
# MsgDigest = m2c.EVP.MessageDigest('sha1')
# MsgDigest.update(CipherText)
#
# RSAsender = self.getPrivKey(orgsender, sender)
#
# signature = RSAsender.sign_rsassa_pss(MsgDigest.digest())
# if base64:
# signature = signature.encode('base64')
# else:
# signature = signature
# else:
# signature = None
#
# return CipherText2, signature
#
# def decrypt(self, orgsender, sender, orgreader, reader, message, signature=None, base64=True):
# # print "decrypt org:%s for:%s from:%s\nmessage:%s"%(organization,reader,sender,message)
# ReadRSA = self.getPrivKey(orgreader, reader)
# if base64:
# message2 = message.decode("base64")
# else:
# message2 = message
# plainText = ReadRSA.private_decrypt(message2, m2c.RSA.pkcs1_oaep_padding)
#
# if signature is not None:
# if base64:
# signature2 = signature.decode("base64")
# else:
# signature2 = signature
#
# PubKey = self.getPubKey(orgsender, sender)
#
# MsgDigest = m2c.EVP.MessageDigest('sha1')
# MsgDigest.update(message2)
#
# if not PubKey.verify_rsassa_pss(MsgDigest.digest(), signature2) == 1:
# raise j.exceptions.RuntimeError("Could not verify the message")
#
# return plainText
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .ohem_hinge_loss import OHEMHingeLoss
@LOSSES.register_module()
class SSNLoss(nn.Module):
@staticmethod
def activity_loss(activity_score, labels, activity_indexer):
"""Activity Loss.
It will calculate activity loss given activity_score and label.
Args:
activity_score (torch.Tensor): Predicted activity score.
labels (torch.Tensor): Groundtruth class label.
activity_indexer (torch.Tensor): Index slices of proposals.
Returns:
torch.Tensor: Returned cross entropy loss.
"""
pred = activity_score[activity_indexer, :]
gt = labels[activity_indexer]
return F.cross_entropy(pred, gt)
@staticmethod
def completeness_loss(completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=0.17):
"""Completeness Loss.
It will calculate completeness loss given completeness_score and label.
Args:
completeness_score (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
completeness_indexer (torch.Tensor): Index slices of positive and
incomplete proposals.
positive_per_video (int): Number of positive proposals sampled
per video.
incomplete_per_video (int): Number of incomplete proposals sampled
pre video.
ohem_ratio (float): Ratio of online hard example mining.
Default: 0.17.
Returns:
torch.Tensor: Returned class-wise completeness loss.
"""
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video,
pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1),
-1, ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
return ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
@staticmethod
def classwise_regression_loss(bbox_pred, labels, bbox_targets,
regression_indexer):
"""Classwise Regression Loss.
It will calculate classwise_regression loss given
class_reg_pred and targets.
Args:
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
regression_indexer (torch.Tensor): Index slices of
positive proposals.
Returns:
torch.Tensor: Returned class-wise regression loss.
"""
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat(
(torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
loss = F.smooth_l1_loss(
classwise_reg_pred.view(-1), reg_target.view(-1)) * 2
return loss
def forward(self, activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg):
"""Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
"""
self.sampler = train_cfg.ssn.sampler
self.loss_weight = train_cfg.ssn.loss_weight
losses = dict()
proposal_type = proposal_type.view(-1)
labels = labels.view(-1)
activity_indexer = ((proposal_type == 0) +
(proposal_type == 2)).nonzero().squeeze(1)
completeness_indexer = ((proposal_type == 0) +
(proposal_type == 1)).nonzero().squeeze(1)
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
positive_per_video = int(self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
incomplete_per_video = (
self.sampler.num_per_video - positive_per_video -
background_per_video)
losses['loss_activity'] = self.activity_loss(activity_score, labels,
activity_indexer)
losses['loss_completeness'] = self.completeness_loss(
completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=positive_per_video / incomplete_per_video)
losses['loss_completeness'] *= self.loss_weight.comp_loss_weight
if bbox_pred is not None:
regression_indexer = (proposal_type == 0).nonzero().squeeze(1)
bbox_targets = bbox_targets.view(-1, 2)
losses['loss_reg'] = self.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
losses['loss_reg'] *= self.loss_weight.reg_loss_weight
return losses
|
"""
Sage Intacct SDK Exceptions
"""
class SageIntacctSDKError(Exception):
"""The base exception class for SageIntacctSDK.
Parameters:
msg (str): Short description of the error.
response: Error response from the API call.
"""
def __init__(self, msg, response=None):
super(SageIntacctSDKError, self).__init__(msg)
self.message = msg
self.response = response
def _str__(self):
return repr(self.message)
class SageIntacctSDKWarning(Warning):
"""The base Warning class for SageIntacctSDK.
Parameters:
msg (str): Short description of the alert.
response: Error response from the API call.
"""
def __init__(self, msg, response=None):
super(SageIntacctSDKWarning, self).__init__(msg)
self.message = msg
self.response = response
def _str__(self):
return repr(self.message)
class ExpiredTokenError(SageIntacctSDKError):
"""Expired (old) access token, 498 error."""
class InvalidTokenError(SageIntacctSDKError):
"""Wrong/non-existing access token, 401 error."""
class NoPrivilegeError(SageIntacctSDKError):
"""The user has insufficient privilege, 403 error."""
class WrongParamsError(SageIntacctSDKError):
"""Some of the parameters (HTTP params or request body) are wrong, 400 error."""
class NotFoundItemError(SageIntacctSDKError):
"""Not found the item from URL, 404 error."""
class InternalServerError(SageIntacctSDKError):
"""The rest SageIntacctSDK errors, 500 error."""
# WARNING SECTION
class DataIntegrityWarning(SageIntacctSDKWarning):
"""Warns the user that a query did not return all records meeting specified criteria"""
|
"""
Selection Sort
Approach: Loop
Complexity: O(n2)
"""
def selection_sort(input_arr):
print("""""""""""""""""""""""""")
print("input " + str(input_arr))
print("""""""""""""""""""""""""")
i = 0
ln = len(input_arr)
while i < ln: # n times
m = i
j = i + 1
while j < ln: # n times
if input_arr[j] < input_arr[m]:
m = j
j += 1
temp = input_arr[i]
input_arr[i] = input_arr[m]
input_arr[m] = temp
i += 1
print("pass " + str(i) + str(input_arr))
print("""""""""""""""""""""""""")
print("result " + str(input_arr))
print("""""""""""""""""""""""""")
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
selection_sort(arr)
|
# -*- test-case-name: twisted.trial.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A miscellany of code used to run Trial tests.
Maintainer: Jonathan Lange
"""
__all__ = [
"TestSuite",
"DestructiveTestSuite",
"ErrorHolder",
"LoggedSuite",
"TestHolder",
"TestLoader",
"TrialRunner",
"TrialSuite",
"filenameToModule",
"isPackage",
"isPackageDirectory",
"isTestCase",
"name",
"samefile",
"NOT_IN_TEST",
]
import doctest
import importlib
import inspect
import os
import sys
import time
import types
import warnings
from importlib.machinery import SourceFileLoader
from twisted.python import reflect, log, failure, modules, filepath
from twisted.internet import defer
from twisted.trial import util, unittest
from twisted.trial.itrial import ITestCase
from twisted.trial.reporter import _ExitWrapper, UncleanWarningsReporterWrapper
from twisted.trial._asyncrunner import _ForceGarbageCollectionDecorator, _iterateTests
from twisted.trial._synctest import _logObserver
# These are imported so that they remain in the public API for t.trial.runner
from twisted.trial.unittest import TestSuite
from zope.interface import implementer
pyunit = __import__("unittest")
def isPackage(module):
"""Given an object return True if the object looks like a package"""
if not isinstance(module, types.ModuleType):
return False
basename = os.path.splitext(os.path.basename(module.__file__))[0]
return basename == "__init__"
def isPackageDirectory(dirname):
"""
Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False
"""
def _getSuffixes():
return importlib.machinery.all_suffixes()
for ext in _getSuffixes():
initFile = "__init__" + ext
if os.path.exists(os.path.join(dirname, initFile)):
return initFile
return False
def samefile(filename1, filename2):
"""
A hacky implementation of C{os.path.samefile}. Used by L{filenameToModule}
when the platform doesn't provide C{os.path.samefile}. Do not use this.
"""
return os.path.abspath(filename1) == os.path.abspath(filename2)
def filenameToModule(fn):
"""
Given a filename, do whatever possible to return a module object matching
that file.
If the file in question is a module in Python path, properly import and
return that module. Otherwise, load the source manually.
@param fn: A filename.
@return: A module object.
@raise ValueError: If C{fn} does not exist.
"""
oldFn = fn
if (3, 8) <= sys.version_info < (3, 10) and not os.path.isabs(fn):
# module.__spec__.__file__ is supposed to be absolute in py3.8+
# importlib.util.spec_from_file_location does this automatically from
# 3.10+
# This was backported to 3.8 and 3.9, but then reverted in 3.8.11 and
# 3.9.6
# See https://twistedmatrix.com/trac/ticket/10230
# and https://bugs.python.org/issue44070
fn = os.path.join(os.getcwd(), fn)
if not os.path.exists(fn):
raise ValueError(f"{oldFn!r} doesn't exist")
moduleName = reflect.filenameToModuleName(fn)
try:
ret = reflect.namedAny(moduleName)
except (ValueError, AttributeError):
# Couldn't find module. The file 'fn' is not in PYTHONPATH
return _importFromFile(fn, moduleName=moduleName)
# >=3.7 has __file__ attribute as None, previously __file__ was not present
if getattr(ret, "__file__", None) is None:
# This isn't a Python module in a package, so import it from a file
return _importFromFile(fn, moduleName=moduleName)
# ensure that the loaded module matches the file
retFile = os.path.splitext(ret.__file__)[0] + ".py"
# not all platforms (e.g. win32) have os.path.samefile
same = getattr(os.path, "samefile", samefile)
if os.path.isfile(fn) and not same(fn, retFile):
del sys.modules[ret.__name__]
ret = _importFromFile(fn, moduleName=moduleName)
return ret
def _importFromFile(fn, *, moduleName):
fn = _resolveDirectory(fn)
if not moduleName:
moduleName = os.path.splitext(os.path.split(fn)[-1])[0]
if moduleName in sys.modules:
return sys.modules[moduleName]
spec = importlib.util.spec_from_file_location(moduleName, fn)
if not spec:
raise SyntaxError(fn)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[moduleName] = module
return module
def _resolveDirectory(fn):
if os.path.isdir(fn):
initFile = isPackageDirectory(fn)
if initFile:
fn = os.path.join(fn, initFile)
else:
raise ValueError(f"{fn!r} is not a package directory")
return fn
def _getMethodNameInClass(method):
"""
Find the attribute name on the method's class which refers to the method.
For some methods, notably decorators which have not had __name__ set correctly:
getattr(method.im_class, method.__name__) != method
"""
if getattr(method.im_class, method.__name__, object()) != method:
for alias in dir(method.im_class):
if getattr(method.im_class, alias, object()) == method:
return alias
return method.__name__
class DestructiveTestSuite(TestSuite):
"""
A test suite which remove the tests once run, to minimize memory usage.
"""
def run(self, result):
"""
Almost the same as L{TestSuite.run}, but with C{self._tests} being
empty at the end.
"""
while self._tests:
if result.shouldStop:
break
test = self._tests.pop(0)
test(result)
return result
# When an error occurs outside of any test, the user will see this string
# in place of a test's name.
NOT_IN_TEST = "<not in test>"
class LoggedSuite(TestSuite):
"""
Any errors logged in this suite will be reported to the L{TestResult}
object.
"""
def run(self, result):
"""
Run the suite, storing all errors in C{result}. If an error is logged
while no tests are running, then it will be added as an error to
C{result}.
@param result: A L{TestResult} object.
"""
observer = _logObserver
observer._add()
super().run(result)
observer._remove()
for error in observer.getErrors():
result.addError(TestHolder(NOT_IN_TEST), error)
observer.flushErrors()
class TrialSuite(TestSuite):
"""
Suite to wrap around every single test in a C{trial} run. Used internally
by Trial to set up things necessary for Trial tests to work, regardless of
what context they are run in.
"""
def __init__(self, tests=(), forceGarbageCollection=False):
if forceGarbageCollection:
newTests = []
for test in tests:
test = unittest.decorate(test, _ForceGarbageCollectionDecorator)
newTests.append(test)
tests = newTests
suite = LoggedSuite(tests)
super().__init__([suite])
def _bail(self):
from twisted.internet import reactor
d = defer.Deferred()
reactor.addSystemEventTrigger("after", "shutdown", lambda: d.callback(None))
reactor.fireSystemEvent("shutdown") # radix's suggestion
# As long as TestCase does crap stuff with the reactor we need to
# manually shutdown the reactor here, and that requires util.wait
# :(
# so that the shutdown event completes
unittest.TestCase("mktemp")._wait(d)
def run(self, result):
try:
TestSuite.run(self, result)
finally:
self._bail()
def name(thing):
"""
@param thing: an object from modules (instance of PythonModule,
PythonAttribute), a TestCase subclass, or an instance of a TestCase.
"""
if isTestCase(thing):
# TestCase subclass
theName = reflect.qual(thing)
else:
# thing from trial, or thing from modules.
# this monstrosity exists so that modules' objects do not have to
# implement id(). -jml
try:
theName = thing.id()
except AttributeError:
theName = thing.name
return theName
def isTestCase(obj):
"""
@return: C{True} if C{obj} is a class that contains test cases, C{False}
otherwise. Used to find all the tests in a module.
"""
try:
return issubclass(obj, pyunit.TestCase)
except TypeError:
return False
@implementer(ITestCase)
class TestHolder:
"""
Placeholder for a L{TestCase} inside a reporter. As far as a L{TestResult}
is concerned, this looks exactly like a unit test.
"""
failureException = None
def __init__(self, description):
"""
@param description: A string to be displayed L{TestResult}.
"""
self.description = description
def __call__(self, result):
return self.run(result)
def id(self):
return self.description
def countTestCases(self):
return 0
def run(self, result):
"""
This test is just a placeholder. Run the test successfully.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.IReporter}.
"""
result.startTest(self)
result.addSuccess(self)
result.stopTest(self)
def shortDescription(self):
return self.description
class ErrorHolder(TestHolder):
"""
Used to insert arbitrary errors into a test suite run. Provides enough
methods to look like a C{TestCase}, however, when it is run, it simply adds
an error to the C{TestResult}. The most common use-case is for when a
module fails to import.
"""
def __init__(self, description, error):
"""
@param description: A string used by C{TestResult}s to identify this
error. Generally, this is the name of a module that failed to import.
@param error: The error to be added to the result. Can be an `exc_info`
tuple or a L{twisted.python.failure.Failure}.
"""
super().__init__(description)
self.error = util.excInfoOrFailureToExcInfo(error)
def __repr__(self) -> str:
return "<ErrorHolder description={!r} error={!r}>".format(
self.description,
self.error[1],
)
def run(self, result):
"""
Run the test, reporting the error.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.IReporter}.
"""
result.startTest(self)
result.addError(self, self.error)
result.stopTest(self)
class TestLoader:
"""
I find tests inside function, modules, files -- whatever -- then return
them wrapped inside a Test (either a L{TestSuite} or a L{TestCase}).
@ivar methodPrefix: A string prefix. C{TestLoader} will assume that all the
methods in a class that begin with C{methodPrefix} are test cases.
@ivar modulePrefix: A string prefix. Every module in a package that begins
with C{modulePrefix} is considered a module full of tests.
@ivar forceGarbageCollection: A flag applied to each C{TestCase} loaded.
See L{unittest.TestCase} for more information.
@ivar sorter: A key function used to sort C{TestCase}s, test classes,
modules and packages.
@ivar suiteFactory: A callable which is passed a list of tests (which
themselves may be suites of tests). Must return a test suite.
"""
methodPrefix = "test"
modulePrefix = "test_"
def __init__(self):
self.suiteFactory = TestSuite
self.sorter = name
self._importErrors = []
def sort(self, xs):
"""
Sort the given things using L{sorter}.
@param xs: A list of test cases, class or modules.
"""
return sorted(xs, key=self.sorter)
def findTestClasses(self, module):
"""Given a module, return all Trial test classes"""
classes = []
for name, val in inspect.getmembers(module):
if isTestCase(val):
classes.append(val)
return self.sort(classes)
def findByName(self, _name, recurse=False):
"""
Find and load tests, given C{name}.
@param _name: The qualified name of the thing to load.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect
modules in the package itself.
@return: If C{name} is a filename, return the module. If C{name} is a
fully-qualified Python name, return the object it refers to.
"""
if os.sep in _name:
# It's a file, try and get the module name for this file.
name = reflect.filenameToModuleName(_name)
try:
# Try and import it, if it's on the path.
# CAVEAT: If you have two twisteds, and you try and import the
# one NOT on your path, it'll load the one on your path. But
# that's silly, nobody should do that, and existing Trial does
# that anyway.
__import__(name)
except ImportError:
# If we can't import it, look for one NOT on the path.
return self.loadFile(_name, recurse=recurse)
else:
name = _name
obj = parent = remaining = None
for searchName, remainingName in _qualNameWalker(name):
# Walk down the qualified name, trying to import a module. For
# example, `twisted.test.test_paths.FilePathTests` would try
# the full qualified name, then just up to test_paths, and then
# just up to test, and so forth.
# This gets us the highest level thing which is a module.
try:
obj = reflect.namedModule(searchName)
# If we reach here, we have successfully found a module.
# obj will be the module, and remaining will be the remaining
# part of the qualified name.
remaining = remainingName
break
except ImportError:
# Check to see where the ImportError happened. If it happened
# in this file, ignore it.
tb = sys.exc_info()[2]
# Walk down to the deepest frame, where it actually happened.
while tb.tb_next is not None:
tb = tb.tb_next
# Get the filename that the ImportError originated in.
filenameWhereHappened = tb.tb_frame.f_code.co_filename
# If it originated in the reflect file, then it's because it
# doesn't exist. If it originates elsewhere, it's because an
# ImportError happened in a module that does exist.
if filenameWhereHappened != reflect.__file__:
raise
if remaining == "":
raise reflect.ModuleNotFound(f"The module {name} does not exist.")
if obj is None:
# If it's none here, we didn't get to import anything.
# Try something drastic.
obj = reflect.namedAny(name)
remaining = name.split(".")[len(".".split(obj.__name__)) + 1 :]
try:
for part in remaining:
# Walk down the remaining modules. Hold on to the parent for
# methods, as on Python 3, you can no longer get the parent
# class from just holding onto the method.
parent, obj = obj, getattr(obj, part)
except AttributeError:
raise AttributeError(f"{name} does not exist.")
return self.loadAnything(
obj, parent=parent, qualName=remaining, recurse=recurse
)
def loadModule(self, module):
"""
Return a test suite with all the tests from a module.
Included are TestCase subclasses and doctests listed in the module's
__doctests__ module. If that's not good for you, put a function named
either C{testSuite} or C{test_suite} in your module that returns a
TestSuite, and I'll use the results of that instead.
If C{testSuite} and C{test_suite} are both present, then I'll use
C{testSuite}.
"""
## XXX - should I add an optional parameter to disable the check for
## a custom suite.
## OR, should I add another method
if not isinstance(module, types.ModuleType):
raise TypeError(f"{module!r} is not a module")
if hasattr(module, "testSuite"):
return module.testSuite()
elif hasattr(module, "test_suite"):
return module.test_suite()
suite = self.suiteFactory()
for testClass in self.findTestClasses(module):
suite.addTest(self.loadClass(testClass))
if not hasattr(module, "__doctests__"):
return suite
docSuite = self.suiteFactory()
for docTest in module.__doctests__:
docSuite.addTest(self.loadDoctests(docTest))
return self.suiteFactory([suite, docSuite])
loadTestsFromModule = loadModule
def loadClass(self, klass):
"""
Given a class which contains test cases, return a list of L{TestCase}s.
@param klass: The class to load tests from.
"""
if not isinstance(klass, type):
raise TypeError(f"{klass!r} is not a class")
if not isTestCase(klass):
raise ValueError(f"{klass!r} is not a test case")
names = self.getTestCaseNames(klass)
tests = self.sort(
[self._makeCase(klass, self.methodPrefix + name) for name in names]
)
return self.suiteFactory(tests)
loadTestsFromTestCase = loadClass
def getTestCaseNames(self, klass):
"""
Given a class that contains C{TestCase}s, return a list of names of
methods that probably contain tests.
"""
return reflect.prefixedMethodNames(klass, self.methodPrefix)
def loadMethod(self, method):
raise NotImplementedError("Can't happen on Py3")
def _makeCase(self, klass, methodName):
return klass(methodName)
def loadPackage(self, package, recurse=False):
"""
Load tests from a module object representing a package, and return a
TestSuite containing those tests.
Tests are only loaded from modules whose name begins with 'test_'
(or whatever C{modulePrefix} is set to).
@param package: a types.ModuleType object (or reasonable facsimile
obtained by importing) which may contain tests.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect modules
in the package itself.
@raise TypeError: If C{package} is not a package.
@return: a TestSuite created with my suiteFactory, containing all the
tests.
"""
if not isPackage(package):
raise TypeError(f"{package!r} is not a package")
pkgobj = modules.getModule(package.__name__)
if recurse:
discovery = pkgobj.walkModules()
else:
discovery = pkgobj.iterModules()
discovered = []
for disco in discovery:
if disco.name.split(".")[-1].startswith(self.modulePrefix):
discovered.append(disco)
suite = self.suiteFactory()
for modinfo in self.sort(discovered):
try:
module = modinfo.load()
except BaseException:
thingToAdd = ErrorHolder(modinfo.name, failure.Failure())
else:
thingToAdd = self.loadModule(module)
suite.addTest(thingToAdd)
return suite
def loadDoctests(self, module):
"""
Return a suite of tests for all the doctests defined in C{module}.
@param module: A module object or a module name.
"""
if isinstance(module, str):
try:
module = reflect.namedAny(module)
except BaseException:
return ErrorHolder(module, failure.Failure())
if not inspect.ismodule(module):
warnings.warn("trial only supports doctesting modules")
return
extraArgs = {}
# Work around Python issue2604: DocTestCase.tearDown clobbers globs
def saveGlobals(test):
"""
Save C{test.globs} and replace it with a copy so that if
necessary, the original will be available for the next test
run.
"""
test._savedGlobals = getattr(test, "_savedGlobals", test.globs)
test.globs = test._savedGlobals.copy()
extraArgs["setUp"] = saveGlobals
return doctest.DocTestSuite(module, **extraArgs)
def loadAnything(self, obj, recurse=False, parent=None, qualName=None):
"""
Load absolutely anything (as long as that anything is a module,
package, class, or method (with associated parent class and qualname).
@param obj: The object to load.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect
modules in the package itself.
@param parent: If C{obj} is a method, this is the parent class of the
method. C{qualName} is also required.
@param qualName: If C{obj} is a method, this a list containing is the
qualified name of the method. C{parent} is also required.
@return: A C{TestCase} or C{TestSuite}.
"""
if isinstance(obj, types.ModuleType):
# It looks like a module
if isPackage(obj):
# It's a package, so recurse down it.
return self.loadPackage(obj, recurse=recurse)
# Otherwise get all the tests in the module.
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, pyunit.TestCase):
# We've found a raw test case, get the tests from it.
return self.loadTestsFromTestCase(obj)
elif (
isinstance(obj, types.FunctionType)
and isinstance(parent, type)
and issubclass(parent, pyunit.TestCase)
):
# We've found a method, and its parent is a TestCase. Instantiate
# it with the name of the method we want.
name = qualName[-1]
inst = parent(name)
# Sanity check to make sure that the method we have got from the
# test case is the same one as was passed in. This doesn't actually
# use the function we passed in, because reasons.
assert getattr(inst, inst._testMethodName).__func__ == obj
return inst
elif isinstance(obj, TestSuite):
# We've found a test suite.
return obj
else:
raise TypeError(f"don't know how to make test from: {obj}")
def loadByName(self, name, recurse=False):
"""
Load some tests by name.
@param name: The qualified name for the test to load.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect
modules in the package itself.
"""
try:
return self.suiteFactory([self.findByName(name, recurse=recurse)])
except BaseException:
return self.suiteFactory([ErrorHolder(name, failure.Failure())])
loadTestsFromName = loadByName
def loadByNames(self, names, recurse=False):
"""
Load some tests by a list of names.
@param names: A L{list} of qualified names.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect
modules in the package itself.
"""
things = []
errors = []
for name in names:
try:
things.append(self.loadByName(name, recurse=recurse))
except BaseException:
errors.append(ErrorHolder(name, failure.Failure()))
things.extend(errors)
return self.suiteFactory(self._uniqueTests(things))
def _uniqueTests(self, things):
"""
Gather unique suite objects from loaded things. This will guarantee
uniqueness of inherited methods on TestCases which would otherwise hash
to same value and collapse to one test unexpectedly if using simpler
means: e.g. set().
"""
seen = set()
for testthing in things:
testthings = testthing._tests
for thing in testthings:
# This is horrible.
if str(thing) not in seen:
yield thing
seen.add(str(thing))
def loadFile(self, fileName, recurse=False):
"""
Load a file, and then the tests in that file.
@param fileName: The file name to load.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect
modules in the package itself.
"""
name = reflect.filenameToModuleName(fileName)
try:
module = SourceFileLoader(name, fileName).load_module()
return self.loadAnything(module, recurse=recurse)
except OSError:
raise ValueError(f"{fileName} is not a Python file.")
def _qualNameWalker(qualName):
"""
Given a Python qualified name, this function yields a 2-tuple of the most
specific qualified name first, followed by the next-most-specific qualified
name, and so on, paired with the remainder of the qualified name.
@param qualName: A Python qualified name.
@type qualName: L{str}
"""
# Yield what we were just given
yield (qualName, [])
# If they want more, split the qualified name up
qualParts = qualName.split(".")
for index in range(1, len(qualParts)):
# This code here will produce, from the example walker.texas.ranger:
# (walker.texas, ["ranger"])
# (walker, ["texas", "ranger"])
yield (".".join(qualParts[:-index]), qualParts[-index:])
class TrialRunner:
"""
A specialised runner that the trial front end uses.
"""
DEBUG = "debug"
DRY_RUN = "dry-run"
def _setUpTestdir(self):
self._tearDownLogFile()
currentDir = os.getcwd()
base = filepath.FilePath(self.workingDirectory)
testdir, self._testDirLock = util._unusedTestDirectory(base)
os.chdir(testdir.path)
return currentDir
def _tearDownTestdir(self, oldDir):
os.chdir(oldDir)
self._testDirLock.unlock()
_log = log
def _makeResult(self):
reporter = self.reporterFactory(
self.stream, self.tbformat, self.rterrors, self._log
)
if self._exitFirst:
reporter = _ExitWrapper(reporter)
if self.uncleanWarnings:
reporter = UncleanWarningsReporterWrapper(reporter)
return reporter
def __init__(
self,
reporterFactory,
mode=None,
logfile="test.log",
stream=sys.stdout,
profile=False,
tracebackFormat="default",
realTimeErrors=False,
uncleanWarnings=False,
workingDirectory=None,
forceGarbageCollection=False,
debugger=None,
exitFirst=False,
):
self.reporterFactory = reporterFactory
self.logfile = logfile
self.mode = mode
self.stream = stream
self.tbformat = tracebackFormat
self.rterrors = realTimeErrors
self.uncleanWarnings = uncleanWarnings
self._result = None
self.workingDirectory = workingDirectory or "_trial_temp"
self._logFileObserver = None
self._logFileObject = None
self._forceGarbageCollection = forceGarbageCollection
self.debugger = debugger
self._exitFirst = exitFirst
if profile:
self.run = util.profiled(self.run, "profile.data")
def _tearDownLogFile(self):
if self._logFileObserver is not None:
log.removeObserver(self._logFileObserver.emit)
self._logFileObserver = None
if self._logFileObject is not None:
self._logFileObject.close()
self._logFileObject = None
def _setUpLogFile(self):
self._tearDownLogFile()
if self.logfile == "-":
logFile = sys.stdout
else:
logFile = open(self.logfile, "a")
self._logFileObject = logFile
self._logFileObserver = log.FileLogObserver(logFile)
log.startLoggingWithObserver(self._logFileObserver.emit, 0)
def run(self, test):
"""
Run the test or suite and return a result object.
"""
test = unittest.decorate(test, ITestCase)
return self._runWithoutDecoration(test, self._forceGarbageCollection)
def _runWithoutDecoration(self, test, forceGarbageCollection=False):
"""
Private helper that runs the given test but doesn't decorate it.
"""
result = self._makeResult()
# decorate the suite with reactor cleanup and log starting
# This should move out of the runner and be presumed to be
# present
suite = TrialSuite([test], forceGarbageCollection)
startTime = time.time()
if self.mode == self.DRY_RUN:
for single in _iterateTests(suite):
result.startTest(single)
result.addSuccess(single)
result.stopTest(single)
else:
if self.mode == self.DEBUG:
run = lambda: self.debugger.runcall(suite.run, result)
else:
run = lambda: suite.run(result)
oldDir = self._setUpTestdir()
try:
self._setUpLogFile()
run()
finally:
self._tearDownLogFile()
self._tearDownTestdir(oldDir)
endTime = time.time()
done = getattr(result, "done", None)
if done is None:
warnings.warn(
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
category=DeprecationWarning,
stacklevel=3,
)
result.printErrors()
result.writeln(result.separator)
result.writeln(
"Ran %d tests in %.3fs", result.testsRun, endTime - startTime
)
result.write("\n")
result.printSummary()
else:
result.done()
return result
def runUntilFailure(self, test):
"""
Repeatedly run C{test} until it fails.
"""
count = 0
while True:
count += 1
self.stream.write("Test Pass %d\n" % (count,))
if count == 1:
result = self.run(test)
else:
result = self._runWithoutDecoration(test)
if result.testsRun == 0:
break
if not result.wasSuccessful():
break
return result
|
import json
import re
from django.conf import settings
from share.util.graph import MutableGraph
from share.util.names import get_related_agent_name
from share.util import IDObfuscator
from .base import MetadataFormatter
def format_type(type_name):
# convert from PascalCase to lower case with spaces between words
return re.sub(r'\B([A-Z])', r' \1', type_name).lower()
def format_node_type(node):
return format_type(node.schema_type.name)
def format_node_type_lineage(node):
return [format_type(t) for t in node.schema_type.type_lineage]
# values that, for the purpose of indexing in elasticsearch, are equivalent to absence
EMPTY_VALUES = (None, '')
def strip_empty_values(thing):
if isinstance(thing, dict):
return {
k: strip_empty_values(v)
for k, v in thing.items()
if v not in EMPTY_VALUES
}
if isinstance(thing, list):
return [
strip_empty_values(v)
for v in thing
if v not in EMPTY_VALUES
]
if isinstance(thing, tuple):
return tuple(
strip_empty_values(v)
for v in thing
if v not in EMPTY_VALUES
)
return thing
class ShareV2ElasticFormatter(MetadataFormatter):
def format_as_deleted(self, suid):
# a document with is_deleted:True will be deleted from the elastic index
# TODO handle deletion better -- maybe put a `deleted` field on suids and actually delete the FormattedMetadataRecord
return json.dumps({
'id': IDObfuscator.encode(suid),
'is_deleted': True,
})
def format(self, normalized_datum):
mgraph = MutableGraph.from_jsonld(normalized_datum.data)
central_work = mgraph.get_central_node(guess=True)
if not central_work or central_work.concrete_type != 'abstractcreativework':
return None
suid = normalized_datum.raw.suid
if central_work['is_deleted']:
return self.format_as_deleted(suid)
source_name = suid.source_config.source.long_title
return json.dumps(strip_empty_values({
'id': IDObfuscator.encode(suid),
'sources': [source_name],
'type': format_node_type(central_work),
'types': format_node_type_lineage(central_work),
# attributes:
'date_created': suid.get_date_first_seen().isoformat(),
'date_modified': normalized_datum.created_at.isoformat(),
'date_published': central_work['date_published'],
'date_updated': central_work['date_updated'],
'description': central_work['description'] or '',
'justification': central_work['justification'],
'language': central_work['language'],
'registration_type': central_work['registration_type'],
'retracted': bool(central_work['withdrawn']),
'title': central_work['title'],
'withdrawn': central_work['withdrawn'],
'date': (
central_work['date_published']
or central_work['date_updated']
or normalized_datum.created_at.isoformat()
),
# agent relations:
'affiliations': self._get_related_agent_names(central_work, ['agentworkrelation']),
'contributors': self._get_related_agent_names(central_work, [
'contributor',
'creator',
'principalinvestigator',
'principalinvestigatorcontact',
]),
'funders': self._get_related_agent_names(central_work, ['funder']),
'publishers': self._get_related_agent_names(central_work, ['publisher']),
'hosts': self._get_related_agent_names(central_work, ['host']),
# other relations:
'identifiers': [
identifier_node['uri']
for identifier_node in central_work['identifiers']
],
'tags': [
tag_node['name']
for tag_node in central_work['tags']
],
'subjects': self._get_subjects(central_work, source_name),
'subject_synonyms': self._get_subject_synonyms(central_work),
# a bunch of nested data because reasons -- used mostly for rendering search results
'lists': {
'affiliations': self._build_related_agent_list(central_work, ['agentworkrelation']),
'contributors': self._build_related_agent_list(central_work, [
'contributor',
'creator',
'principalinvestigator',
'principalinvestigatorcontact',
]),
'funders': self._build_related_agent_list(central_work, ['funder']),
'publishers': self._build_related_agent_list(central_work, ['publisher']),
'hosts': self._build_related_agent_list(central_work, ['host']),
'lineage': self._build_work_lineage(central_work),
},
}))
def _get_related_agent_names(self, work_node, relation_types):
return [
get_related_agent_name(relation_node)
for relation_node in work_node['agent_relations']
if relation_node.type in relation_types
]
def _get_subjects(self, work_node, source_name):
return [
self._serialize_subject(through_subject['subject'], source_name)
for through_subject in work_node['subject_relations']
if (
not through_subject['is_deleted']
and not through_subject['subject']['is_deleted']
)
]
def _get_subject_synonyms(self, work_node):
return [
self._serialize_subject(through_subject['subject']['central_synonym'])
for through_subject in work_node['subject_relations']
if (
not through_subject['is_deleted']
and not through_subject['subject']['is_deleted']
and through_subject['subject']['central_synonym']
)
]
def _serialize_subject(self, subject_node, source_name=None):
subject_lineage = [subject_node['name']]
next_subject = subject_node['parent']
while next_subject:
subject_lineage.insert(0, next_subject['name'])
next_subject = next_subject['parent']
if source_name and subject_node['central_synonym']:
taxonomy_name = source_name
else:
taxonomy_name = settings.SUBJECTS_CENTRAL_TAXONOMY
subject_lineage.insert(0, taxonomy_name)
return '|'.join(subject_lineage)
def _build_list_agent(self, relation_node):
agent_node = relation_node['agent']
return {
'type': format_node_type(agent_node),
'types': format_node_type_lineage(agent_node),
'name': agent_node['name'] or get_related_agent_name(relation_node),
'given_name': agent_node['given_name'],
'family_name': agent_node['family_name'],
'additional_name': agent_node['additional_name'],
'suffix': agent_node['suffix'],
'identifiers': [
identifier_node['uri']
for identifier_node in agent_node['identifiers']
],
'relation': format_node_type(relation_node),
'order_cited': relation_node['order_cited'],
'cited_as': relation_node['cited_as'],
}
def _build_related_agent_list(self, work_node, relation_types):
return [
self._build_list_agent(relation_node)
for relation_node in work_node['agent_relations']
if relation_node.type in relation_types
]
def _build_work_lineage(self, work_node):
try:
parent_work = next(
relation_node['related']
for relation_node in work_node['outgoing_creative_work_relations']
if relation_node.type == 'ispartof'
)
except StopIteration:
return ()
parent_lineage = self._build_work_lineage(parent_work)
parent_data = {
'type': format_node_type(parent_work),
'types': format_node_type_lineage(parent_work),
'title': parent_work['title'],
'identifiers': [
identifier_node['uri']
for identifier_node in parent_work['identifiers']
],
}
return (
*parent_lineage,
parent_data,
)
|
#!/usr/bin/python
import sys
import pprint
import re
import traceback
import copy
from collections import OrderedDict
import numpy as np
import collections
def process(input):
try:
return int(input)
except:
try:
return float(input)
except:
return input
def loadResultAsDict(path):
result = OrderedDict()
query = ""
table_status = 0
table_title = []
table = []
try:
with open(path, 'r') as f:
for line in f.readlines():
if line[0:13] == "[SPARK_QUERY]":
result[query] = {'title': copy.deepcopy(table_title), 'data': copy.deepcopy(table)}
query = re.findall('q\d+[a,b]{0,1}', line)[0]
table_status = -1
table_title = []
table = []
if "--q" in line:
result[query] = {'title': copy.deepcopy(table_title), 'data': copy.deepcopy(table)}
query = re.findall('q\d+[a,b]{0,1}', line)[0]
table_status = 0
table_title = []
table = []
if query == "":
continue
if line[0] == '+':
table_status += 1
if table_status == 2:
table_status = 0
if line[0] == '|' and table_status == 0:
if len(table_title) != 0:
continue
table_title = [i.strip() for i in line.split('|')[1:-1]]
for i in range(len(table_title)):
table.append([])
if line[0] == '|' and table_status == 1:
idx = 0
tmp = [process(i.strip()) for i in line.split('|')[1:-1]]
if tmp[0] == 'Result':
continue
for data in tmp:
if idx < len(table):
table[idx].append(data)
idx += 1
except:
track = traceback.format_exc()
print(track)
return result
def isEqual(x, y):
if x == y:
return True
if isinstance(x, str) or isinstance(y, str):
return False
try:
if abs(x - y) < 0.1:
return True
except:
if np.isnan(x) or np.isnan(y):
return True
return False
if np.isnan(x) or np.isnan(y):
return True
return False
def isRangeEqual(x, y):
left = sortWithTypeCheck(x)
right = sortWithTypeCheck(y)
return left == right
def sortAsString(data):
for value in data:
if isinstance(value, str) == False:
raise TypeError("sortAsString while value is %d, first row is %s", value, data[0])
return sorted(data)
def sortAsNumber(data):
new_data = []
for value in data:
if isinstance(value, str) == True and value == 'NULL':
new_data.append(float('nan'))
else:
new_data.append(value)
return sorted(new_data, key=float)
def sortWithTypeCheck(data):
isStr = isinstance(data[0], str)
if isStr:
return sortAsString(data)
else:
return sortAsNumber(data)
def compare(t1, t2):
isSame = True
for query_name in t1.keys():
if not query_name in t2:
continue
for col_id in range(len(t1[query_name]['title'])):
if not t1[query_name]['title'] == t2[query_name]['title']:
isSame = False
skip = True
print("%s title are different" % query_name)
continue
skip = False
for col_id in range(len(t1[query_name]['data'])):
if skip:
break
checkWithNoSeq = False
for row_id in range(len(t1[query_name]['data'][col_id])):
if not isEqual(t1[query_name]['data'][col_id][row_id], t2[query_name]['data'][col_id][row_id]):
if (row_id - 10) > 0:
start = row_id - 10
else:
start = 0
if (row_id + 10) < len(t1[query_name]['data'][col_id]):
end = row_id + 10
else:
end = len(t1[query_name]['data'][col_id])
if query_name in ['q18', 'q24a', 'q24b', 'q39a', 'q49', 'q73', 'q77']:
checkWithNoSeq = True
else:
isSame = False
skip = True
print("%s data are different in [ColId is %d, ColName: %s, rowId is %d, data is [%s], [%s]]" % (query_name, col_id, t1[query_name]['title'][col_id], row_id, t1[query_name]['data'][col_id][row_id], t2[query_name]['data'][col_id][row_id]))
#print(t1[query_name]['data'][col_id][start:end])
#print(t2[query_name]['data'][col_id][start:end])
break
if checkWithNoSeq:
left = sortWithTypeCheck(t1[query_name]['data'][col_id])
right = sortWithTypeCheck(t2[query_name]['data'][col_id])
for row_id in range(len(left)):
if not isEqual(left[row_id], right[row_id]):
isSame = False
skip = True
print("%s data are different in [ColId is %d, ColName: %s, RowData: [%s] vs [%s]]" % (query_name, col_id, t1[query_name]['title'][col_id], left[row_id], right[row_id]))
break
return isSame
def main():
if len(sys.argv) < 3:
print("expected input should as below:\npython3 process.py ${file_1} ${file_2}")
sys.exit()
result_1 = loadResultAsDict(sys.argv[1])
result_2 = loadResultAsDict(sys.argv[2])
res = compare(result_1, result_2)
if res == True:
print("Two Tables are same")
else:
print("Two Tables are different")
# for query_name, data in result.items():
# print("query_name is %s" % (query_name))
# print("table is %s" % (','.join(data['title'])))
# first_row = [i[0] for i in data['data']]
# print first_row
if __name__ == "__main__":
main()
|
from gym.spaces import Box
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf = try_import_tf()
torch, nn = try_import_torch()
class CentralizedCriticModel(TFModelV2):
"""Multi-agent model that implements a centralized value function."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(CentralizedCriticModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
# Base of the model
self.model = FullyConnectedNetwork(obs_space, action_space,
num_outputs, model_config, name)
self.register_variables(self.model.variables())
# Central VF maps (obs, opp_obs, opp_act) -> vf_pred
obs = tf.keras.layers.Input(shape=(6, ), name="obs")
opp_obs = tf.keras.layers.Input(shape=(6, ), name="opp_obs")
opp_act = tf.keras.layers.Input(shape=(2, ), name="opp_act")
concat_obs = tf.keras.layers.Concatenate(axis=1)(
[obs, opp_obs, opp_act])
central_vf_dense = tf.keras.layers.Dense(
16, activation=tf.nn.tanh, name="c_vf_dense")(concat_obs)
central_vf_out = tf.keras.layers.Dense(
1, activation=None, name="c_vf_out")(central_vf_dense)
self.central_vf = tf.keras.Model(
inputs=[obs, opp_obs, opp_act], outputs=central_vf_out)
self.register_variables(self.central_vf.variables)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
return self.model.forward(input_dict, state, seq_lens)
def central_value_function(self, obs, opponent_obs, opponent_actions):
return tf.reshape(
self.central_vf(
[obs, opponent_obs,
tf.one_hot(opponent_actions, 2)]), [-1])
@override(ModelV2)
def value_function(self):
return self.model.value_function() # not used
class YetAnotherCentralizedCriticModel(TFModelV2):
"""Multi-agent model that implements a centralized value function.
It assumes the observation is a dict with 'own_obs' and 'opponent_obs', the
former of which can be used for computing actions (i.e., decentralized
execution), and the latter for optimization (i.e., centralized learning).
This model has two parts:
- An action model that looks at just 'own_obs' to compute actions
- A value model that also looks at the 'opponent_obs' / 'opponent_action'
to compute the value (it does this by using the 'obs_flat' tensor).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(YetAnotherCentralizedCriticModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
self.action_model = FullyConnectedNetwork(
Box(low=0, high=1, shape=(6, )), # one-hot encoded Discrete(6)
action_space,
num_outputs,
model_config,
name + "_action")
self.register_variables(self.action_model.variables())
self.value_model = FullyConnectedNetwork(obs_space, action_space, 1,
model_config, name + "_vf")
self.register_variables(self.value_model.variables())
def forward(self, input_dict, state, seq_lens):
self._value_out, _ = self.value_model({
"obs": input_dict["obs_flat"]
}, state, seq_lens)
return self.action_model({
"obs": input_dict["obs"]["own_obs"]
}, state, seq_lens)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class TorchCentralizedCriticModel(TorchModelV2, nn.Module):
"""Multi-agent model that implements a centralized VF."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
# Base of the model
self.model = TorchFC(obs_space, action_space, num_outputs,
model_config, name)
# Central VF maps (obs, opp_obs, opp_act) -> vf_pred
input_size = 6 + 6 + 2 # obs + opp_obs + opp_act
self.central_vf = nn.Sequential(
SlimFC(input_size, 16, activation_fn=nn.Tanh),
SlimFC(16, 1),
)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
model_out, _ = self.model(input_dict, state, seq_lens)
return model_out, []
def central_value_function(self, obs, opponent_obs, opponent_actions):
input_ = torch.cat([
obs, opponent_obs,
torch.nn.functional.one_hot(opponent_actions, 2).float()
], 1)
return torch.reshape(self.central_vf(input_), [-1])
@override(ModelV2)
def value_function(self):
return self.model.value_function() # not used
class YetAnotherTorchCentralizedCriticModel(TorchModelV2, nn.Module):
"""Multi-agent model that implements a centralized value function.
It assumes the observation is a dict with 'own_obs' and 'opponent_obs', the
former of which can be used for computing actions (i.e., decentralized
execution), and the latter for optimization (i.e., centralized learning).
This model has two parts:
- An action model that looks at just 'own_obs' to compute actions
- A value model that also looks at the 'opponent_obs' / 'opponent_action'
to compute the value (it does this by using the 'obs_flat' tensor).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.action_model = TorchFC(
Box(low=0, high=1, shape=(6, )), # one-hot encoded Discrete(6)
action_space,
num_outputs,
model_config,
name + "_action")
self.value_model = TorchFC(obs_space, action_space, 1, model_config,
name + "_vf")
self._model_in = None
def forward(self, input_dict, state, seq_lens):
# Store model-input for possible `value_function()` call.
self._model_in = [input_dict["obs_flat"], state, seq_lens]
return self.action_model({
"obs": input_dict["obs"]["own_obs"]
}, state, seq_lens)
def value_function(self):
value_out, _ = self.value_model({
"obs": self._model_in[0]
}, self._model_in[1], self._model_in[2])
return torch.reshape(value_out, [-1])
|
#!/usr/bin/env python3.9
import cx_Freeze
executables = [cx_Freeze.Executable("main.py")]
cx_Freeze.setup(
name="Escape Room",
options={"build_exe": {"packages":["pygame", "games"],
"include_files":["./assets"],
"optimize": 2}
},
executables = executables
)
|
import numpy as np
from flask import Flask
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import io
import base64
from flask import render_template
import sys
from flask import make_response
import math
app = Flask(__name__)
plt.switch_backend('agg') # solve main loop
class Sort():
def insertion_sort(self,list_):
j = 1
size = len(list_)
for j in range(1,size):
key = list_[j]
i= j -1
while i>=0 and key<list_[i]:
list_[i+1] = list_[i]
i-=1
list_[i + 1] = key
j+=1
return list_
def simple_smo(dataset, labels, C, max_iter):
''' 简化版SMO算法实现,未使用启发式方法对alpha对进行选择.
:param dataset: 所有特征数据向量
:param labels: 所有的数据标签
:param C: 软间隔常数, 0 <= alpha_i <= C
:param max_iter: 外层循环最大迭代次数
'''
dataset = np.array(dataset)
m, n = dataset.shape
labels = np.array(labels)
# 初始化参数
alphas = np.zeros(m)
b = 0
it = 0
def f(x):
"SVM分类器函数 y = w^Tx + b"
# Kernel function vector.
x = np.matrix(x).T
data = np.matrix(dataset)
ks = data * x
# Predictive value.
wx = np.matrix(alphas * labels) * ks
fx = wx + b
return fx[0, 0]
while it < max_iter:
pair_changed = 0
for i in range(m):
a_i, x_i, y_i = alphas[i], dataset[i], labels[i]
fx_i = f(x_i)
E_i = fx_i - y_i
j = select_j(i, m)
a_j, x_j, y_j = alphas[j], dataset[j], labels[j]
fx_j = f(x_j)
E_j = fx_j - y_j
K_ii, K_jj, K_ij = np.dot(x_i, x_i), np.dot(x_j, x_j), np.dot(x_i, x_j)
eta = K_ii + K_jj - 2 * K_ij
if eta <= 0:
print('WARNING eta <= 0')
continue
# 获取更新的alpha对
a_i_old, a_j_old = a_i, a_j
a_j_new = a_j_old + y_j * (E_i - E_j) / eta
# 对alpha进行修剪
if y_i != y_j:
L = max(0, a_j_old - a_i_old)
H = min(C, C + a_j_old - a_i_old)
else:
L = max(0, a_i_old + a_j_old - C)
H = min(C, a_j_old + a_i_old)
a_j_new = clip(a_j_new, L, H)
a_i_new = a_i_old + y_i * y_j * (a_j_old - a_j_new)
if abs(a_j_new - a_j_old) < 0.00001:
# print('WARNING alpha_j not moving enough')
continue
alphas[i], alphas[j] = a_i_new, a_j_new
# 更新阈值b
b_i = -E_i - y_i * K_ii * (a_i_new - a_i_old) - y_j * K_ij * (a_j_new - a_j_old) + b
b_j = -E_j - y_i * K_ij * (a_i_new - a_i_old) - y_j * K_jj * (a_j_new - a_j_old) + b
if 0 < a_i_new < C:
b = b_i
elif 0 < a_j_new < C:
b = b_j
else:
b = (b_i + b_j) / 2
pair_changed += 1
print('INFO iteration:{} i:{} pair_changed:{}'.format(it, i, pair_changed))
if pair_changed == 0:
it += 1
else:
it = 0
print('iteration number: {}'.format(it))
return alphas, b
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def qd_function(start,stop,step):
x_y_list = [np.arange(start=start,stop=stop,step=step) for i in range(2)]
X,Y= np.meshgrid(x_y_list[0], x_y_list[1])
Z1 = -(X ** 2)
Z2 = -(Y ** 2)
Z = 1.0 * (Z1 + 3 * Z2 + 2 * X * Y) + 6.0
return X,Y,Z
def coordinate_ascent(iters,init_coor):
x1 = []
x2 = []
x1.append(init_coor)
x2.append(init_coor)
j = 1
for i in range(iters):
#fix x2 ,update x1
x1_tmp = x2[j-1]
x1.append(x1_tmp)
x2.append(x2[j-1])
j = j+1 #数组上升
# fix x1 ,update x2
x2_tmp = x1[j-1]/3
x1.append(x1[j-1])
x2.append(x2_tmp)
j=j+1
return x1,x2
a = []*100
a[12]=1
print(len(a))
plt.figure()
X,Y,Z=qd_function(-3,3,0.025)
CS = plt.contour(X, Y, Z)
a,b = coordinate_ascent(10000,1.5)
Z1 = a.pop()**2
print(Z1)
Z2 = b.pop()**2
print(Z2)
Z = 1.0 * (Z1 + 3 * Z2 + 2 * 0 * 0) + 6.0
print('max=',Z)
plt.plot(a,b)
plt.title('Coordinate Ascent')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.savefig('./static/n.png')
#gradL = [sp.diff(L,c) for c in [x,y]] +[g]
#print(gradL)
# 解的情况分两种
# 1 内部解,interior solution ,u=0,g(x)<0,约束是inactivate的
# 2 边界解,boundary solution ,u>=0,约束是activate,
# 先求出b和u的关系,再根据b的情况,分别解出关于w1,w2关于b的解,
'''
如果是内部解的话,u1,u2,u3都等于0,g(x)都是小于0的,约束是inactive的,
这时候不等式约束无效,
solutio : w1=0,w2=0,
边界解,u=0
'''
'''
使用拉格朗日对偶求解,
对原问题的拉个朗日函数求解,如果不违反任何约束,对拉个朗日函数求极大就会回到原问题f,如果违反约束则拉格朗日函数的值
会变成无穷大。所以对原问题最小化就是对拉个朗日函数的求极大,然后再求极小,具体参考cs229 svm章节,
先对拉个拉个朗日函数a,b求极大,然后对w求极小(MIN MAX),对偶问题就是先对 w求极小,再对a,b求极大(MAX MIN)
'''
'''
对拉个朗日函数 w求偏导,w是矢量,每个w对应一个等式w=sum(ayx)
'''
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="posts")
group = models.ForeignKey(Group, on_delete=models.SET_NULL, blank=True, null=True)
# поле для картинки
image = models.ImageField(upload_to='posts/', blank=True, null=True)
class Meta:
ordering = ['-pub_date']
def __str__(self):
# выводим текст поста
return self.text
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="comments")
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="author_comments")
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="follower")
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="following")
class Meta:
unique_together = (
'user',
'author',
)
|
#!flask/bin/python
from flask import Blueprint, request, abort, redirect
import patreon
from my_site import config
from my_site.app.views.html_renderer import render_page
from my_site.app.views.LogIn.log_in import LogIn
from my_site.app.views.LandingPage.landing_page import LandingPage
from my_site.models.managers import patreon_user_mgr
from my_site.models.tables.user import User
auth_blueprint = Blueprint('auth_blueprint', __name__)
@auth_blueprint.route('/', methods=['GET'])
def landing_page():
# https://www.patreon.com/platform/documentation/oauth -- Step 1
# The landing page links to patreon.com/oauth2/authorize so the user can authorize this app to access their Patreon data.
return render_page(
inner=LogIn
)
@auth_blueprint.route('/oauth/redirect', methods=['GET'])
def oauth_redirect():
# https://www.patreon.com/platform/documentation/oauth -- Step 2
# After authorizing this app to access their Patreon data, the user is redirected back here.
# https://www.patreon.com/platform/documentation/oauth -- Step 3
# Use the code provided as a query parameter to get the user's access token and refresh token
oauth_client = patreon.OAuth(config.patreon_client_id, config.patreon_client_secret)
tokens = oauth_client.get_tokens(request.args.get('code'), 'http://localhost:5000/oauth/redirect')
# https://www.patreon.com/platform/documentation/oauth -- Step 4
# Save off the user's tokens and fetch their Patreon data.
user = patreon_user_mgr.update_user_for_tokens(
patreon_refresh_token=tokens['refresh_token'],
patreon_access_token=tokens['access_token']
)
# https://www.patreon.com/platform/documentation/oauth -- Step 5
# If the user signed in successfully, take them to their profile page.
if user:
return redirect('/users/{user_id}'.format(user_id=user.user_id))
else:
abort(403)
@auth_blueprint.route('/users/<int:user_id>', methods=['GET'])
def show_user(user_id):
user = User.get(user_id)
if not user:
abort(404)
return render_page(
inner=LandingPage,
user=user
)
|
from dataclasses import dataclass, field
@dataclass
class Schedule:
user: str
repository: str
url: str
schedule_id: str = field(default_factory=str)
def __post_init__(self):
if len(self.user) < 1:
raise ValueError('Invalid user')
if len(self.repository) < 1:
raise ValueError('Invalid repository')
if len(self.url) < 1:
raise ValueError('Invalid url')
|
import pathutils
import cmdlineutils
import termutils
import graphutils
|
import re
from pyrogram import Client, filters
from pyrogram.types import Message, InlineKeyboardMarkup
from config import prefix
from localization import use_chat_lang
from utils import require_admin, split_quotes, button_parser
from dbh import dbc, db
def add_filter(chat_id, trigger, raw_data, file_id, filter_type):
dbc.execute(
"INSERT INTO filters(chat_id, filter_name, raw_data, file_id, filter_type) VALUES(?, ?, ?, ?, ?)",
(chat_id, trigger, raw_data, file_id, filter_type),
)
db.commit()
def update_filter(chat_id, trigger, raw_data, file_id, filter_type):
dbc.execute(
"UPDATE filters SET raw_data = ?, file_id = ?, filter_type = ? WHERE chat_id = ? AND filter_name = ?",
(raw_data, file_id, filter_type, chat_id, trigger),
)
db.commit()
def rm_filter(chat_id, trigger):
dbc.execute(
"DELETE from filters WHERE chat_id = ? AND filter_name = ?",
(chat_id, trigger)
)
db.commit()
def get_all_filters(chat_id):
dbc.execute(
"SELECT * FROM filters WHERE chat_id = ?",
(chat_id,)
)
db.commit()
return dbc.fetchall()
def check_for_filters(chat_id, trigger):
all_filters = get_all_filters(chat_id)
for keywords in all_filters:
keyword = keywords[1]
if trigger == keyword:
return True
return False
@Client.on_message(filters.command(["filter", "savefilter"], prefix))
@require_admin(allow_in_private=True)
async def save_filter(c: Client, m: Message):
args = m.text.markdown.split(maxsplit=1)
split_text = split_quotes(args[1])
trigger = split_text[0].lower()
if m.reply_to_message is None and len(split_text) < 2:
await m.reply_text(
"There is no content in the filter",
quote=True
)
return
if m.reply_to_message and m.reply_to_message.photo:
file_id = m.reply_to_message.photo.file_id
raw_data = m.reply_to_message.caption.markdown if m.reply_to_message.caption is not None else None
filter_type = "photo"
elif m.reply_to_message and m.reply_to_message.document:
file_id = m.reply_to_message.document.file_id
raw_data = m.reply_to_message.caption.markdown if m.reply_to_message.caption is not None else None
filter_type = "document"
elif m.reply_to_message and m.reply_to_message.video:
file_id = m.reply_to_message.video.file_id
raw_data = m.reply_to_message.caption.markdown if m.reply_to_message.caption is not None else None
filter_type = "video"
elif m.reply_to_message and m.reply_to_message.audio:
file_id = m.reply_to_message.audio.file_id
raw_data = m.reply_to_message.caption.markdown if m.reply_to_message.caption is not None else None
filter_type = "audio"
elif m.reply_to_message and m.reply_to_message.animation:
file_id = m.reply_to_message.animation.file_id
raw_data = m.reply_to_message.caption.markdown if m.reply_to_message.caption is not None else None
filter_type = "animation"
elif m.reply_to_message and m.reply_to_message.sticker:
file_id = m.reply_to_message.sticker.file_id
raw_data = split_text[1]
filter_type = "sticker"
else:
file_id = None
raw_data = split_text[1]
filter_type = "text"
chat_id = m.chat.id
check_filter = check_for_filters(chat_id, trigger)
if check_filter:
update_filter(chat_id, trigger, raw_data, file_id, filter_type)
else:
add_filter(chat_id, trigger, raw_data, file_id, filter_type)
await m.reply_text(
f"Added filter **{trigger}**",
quote=True,
parse_mode="md"
)
@Client.on_message(filters.command(["delfilter", "rmfilter", "stop"], prefix))
@require_admin(allow_in_private=True)
async def delete_filter(c: Client, m: Message):
args = m.text.markdown.split(maxsplit=1)
trigger = args[1].lower()
chat_id = m.chat.id
check_filter = check_for_filters(chat_id, trigger)
if check_filter:
rm_filter(chat_id, trigger)
await m.reply_text(
f"Removed **{trigger}** from filters",
quote=True,
parse_mode="md"
)
else:
await m.reply_text(
f"There is no filter with name **{trigger}**",
quote=True,
parse_mode="md"
)
@Client.on_message(filters.command("filters", prefix))
async def get_all_filter(c: Client, m: Message):
chat_id = m.chat.id
reply_text = "Filters in this chat\n\n"
all_filters = get_all_filters(chat_id)
for filter_s in all_filters:
keyword = filter_s[1]
reply_text += f" - {keyword} \n"
if reply_text == "Filters in this chat\n\n":
await m.reply_text(
"Currently no filters in the chat",
quote=True
)
else:
await m.reply_text(
reply_text,
quote=True
)
@Client.on_message((filters.group | filters.private) & filters.text & filters.incoming, group=1)
async def serve_filter(c: Client, m: Message):
chat_id = m.chat.id
text = m.text
all_filters = get_all_filters(chat_id)
for filter_s in all_filters:
keyword = filter_s[1]
pattern = r"( |^|[^\w])" + re.escape(keyword) + r"( |$|[^\w])"
if re.search(pattern, text, flags=re.IGNORECASE):
data, button = button_parser(filter_s[2])
if filter_s[4] == "text":
await m.reply_text(
data,
quote=True,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "photo":
await m.reply_photo(
filter_s[3],
quote=True,
caption=data if not None else None,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "document":
await m.reply_document(
filter_s[3],
quote=True,
caption=data if not None else None,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "video":
await m.reply_video(
filter_s[3],
quote=True,
caption=data if not None else None,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "audio":
await m.reply_audio(
filter_s[3],
quote=True,
caption=data if not None else None,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "animation":
await m.reply_animation(
filter_s[3],
quote=True,
caption=data if not None else None,
parse_mode="md",
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
elif filter_s[4] == "sticker":
await m.reply_sticker(
filter_s[3],
quote=True,
reply_markup=InlineKeyboardMarkup(
button
) if len(button) != 0 else None
)
|
# -*- coding: utf-8 -*-
"""
We return the values based in the base currency.
For example, for 1 USD the return is a number like 0.000634 for Gold (XAU).
To get the gold rate in USD: 1/0.000634= 1577.28 USD
"""
import os
import math
import boto3
import redis
import requests
import pandas as pd
from datetime import date, datetime, timedelta
from helpers import get_today_date, get_days_ago_date, days_diff
from pandas_helpers import redis_to_dataframe
MAX_DAYS = 5
API_URL = 'https://www.metals-api.com/api'
def get_access_key():
ssm = boto3.client('ssm')
return ssm.get_parameter(Name='/goldfinger/api/key', WithDecryption=True)['Parameter']['Value']
def get_latest(currency, *symbols):
"""
"latest" endpoint - request the most recent exchange rate data
https://www.metals-api.com/api/latest
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
"""
symbols = ','.join(symbols)
uri = f'{API_URL}/latest?access_key={access_key}&base={currency}&symbols={symbols}'
return requests.get(uri).json()
def get_timeseries(currency, start_date, end_date, symbol):
"""
"timeseries" endpoint - request exchange rates for a specific period of time
https://www.metals-api.com/api/timeseries
? access_key = YOUR_ACCESS_KEY
& start_date = YYYY-MM-DD
& end_date = YYYY-MM-DD
& base = USD
& symbols = XAU,XAG <-- can actually only be one symbol
"""
uri = f'{API_URL}/timeseries?access_key={access_key}&start_date={start_date}&end_date={end_date}&base={currency}&symbols={symbol}'
return requests.get(uri).json()
def get_historical():
"""
"historical" endpoint - request historical rates for a specific day
https://www.metals-api.com/api/YYYY-MM-DD
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
"""
pass
def get_convert():
"""
"convert" endpoint - convert any amount from one currency to another
using real-time exchange rates
https://www.metals-api.com/api/convert
? access_key = YOUR_ACCESS_KEY
& from = USD
& to = EUR
& amount = 25
append an additional "date" parameter if you want to use
historical rates for your conversion
& date = YYYY-MM-DD
"""
pass
def get_fluctuation():
"""
"fluctuation" endpoint - request any currency's change parameters (margin
and percentage), optionally between two specified dates
https://www.metals-api.com/api/fluctuation
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
& type = weekly
append an additional "date" parameter if you want to use
historical rates for your conversion
& start_date = YYYY-MM-DD
& end_date = YYYY-MM-DD
"""
pass
def timeseries_to_redis(currency, start_date_str, end_date_str, symbol):
today = datetime.today()
end_date = datetime.strptime(end_date_str, '%Y-%m-%d')
start_date = datetime.strptime(start_date_str, '%Y-%m-%d')
_days_diff = days_diff(start_date_str, end_date_str)
loops = math.ceil(_days_diff / MAX_DAYS)
rates = {}
for loop in range(loops):
start = start_date
end = start_date + timedelta(MAX_DAYS)
if end > today:
end = today
end_str = end.strftime('%Y-%m-%d')
start_str = start.strftime('%Y-%m-%d')
start_date = end
print(f'{start_str} to {end_str}', end='')
# this does a hget each iteration, but I guess that's what a cache is for
if not date_range_in_redis(start, currency, symbol):
# redis does not have the keys in range
ret = get_timeseries(currency, start_str, end_str, symbol)
else:
print('...already in redis')
continue
if not ret['success']:
print(f'Bad response {ret}')
break
rates.update(ret['rates'])
print(rates)
# flatten dictionary
rates_to_date = {
k:v[symbol] for (k,v) in rates.items()
}
return {symbol: rates_to_date}
#end_date = get_today_date()
#start_date = get_days_ago_date(MAX_DAYS)
def date_range_in_redis(start_date, currency, symbol):
key = f'{symbol}-{currency}'
timeseries_data = r.hgetall(key)
timeseries_data = {
k.decode('utf-8'):float(v) for (k,v) in timeseries_data.items()
}
all_dates = set(timeseries_data.keys())
range_dates = set([(start_date + timedelta(days=x)).strftime('%Y-%m-%d') for x in range(MAX_DAYS)])
return range_dates.issubset(all_dates)
if __name__=="__main__":
global r
global access_key
access_key = get_access_key()
running_in_docker= os.environ.get('RUNNING_IN_DOCKER', False)
if running_in_docker:
r = redis.Redis(host='192.168.1.21')
else:
r = redis.Redis(host='127.0.0.1')
yesterday = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
for symbol in ['XAU', 'XAG']:
for currency in ['ZAR', 'USD']:
key = f'{symbol}-{currency}'
series = timeseries_to_redis(currency, '2020-01-01', yesterday, symbol)
print(series)
if series:
try:
r.hmset(key, series[symbol])
print(redis_to_dataframe(symbol))
except redis.exceptions.DataError:
print('empty dictionary')
else:
print('Something went wrong')
r.save()
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt, xrange
import subprocess
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
DEFAULT_FONT_NAME_MAC = 'Menlo'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
((os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir) if f.lower().endswith('ttf')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
"DejaVu Sans Mono" on \\*nix
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
if self.line_number_separator:
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
from __future__ import division, print_function
import abc
import os
import unittest
import nose.tools as ntools
from smqtk.utils.plugin import Pluggable, get_plugins, OS_ENV_PATH_SEP
__author__ = 'paul.tunison@kitware.com'
class DummyInterface (Pluggable):
@abc.abstractmethod
def inst_method(self, val):
"""
dummy abstract function
"""
class TestGetPluginGeneric (unittest.TestCase):
INTERNAL_PLUGIN_DIR = os.path.join(os.path.dirname(__file__),
"test_plugin_dir",
"internal_plugins")
INTERNAL_PLUGIN_MOD_PATH = \
'smqtk.tests.utils.test_plugin_dir.internal_plugins'
ENV_VAR = "TEST_PLUGIN_MODULE_PATH"
HELP_VAR = "TEST_PLUGIN_CLASS"
EXT_MOD_1 = 'smqtk.tests.utils.test_plugin_dir.external_1'
EXT_MOD_2 = 'smqtk.tests.utils.test_plugin_dir.external_2'
@classmethod
def get_dummy_plugins(cls):
return get_plugins(cls.INTERNAL_PLUGIN_MOD_PATH,
cls.INTERNAL_PLUGIN_DIR, cls.ENV_VAR,
cls.HELP_VAR, DummyInterface)
def test_get_internal_modules(self, do_return=False):
m = self.get_dummy_plugins()
ntools.assert_in('ImplFoo', m)
ntools.assert_in('ImplBar', m)
ntools.assert_in('ImplDoExport', m)
ntools.assert_equal(m['ImplFoo']().inst_method('a'), 'fooa')
ntools.assert_equal(m['ImplBar']().inst_method('b'), 'barb')
ntools.assert_equal(m['ImplDoExport']().inst_method('c'), 'doExportc')
ntools.assert_not_in('ImplNotUsable', m)
ntools.assert_not_in('SomethingElse', m)
ntools.assert_not_in('ImplNoExport', m)
ntools.assert_not_in('ImplSkipModule', m)
if do_return:
return m
def test_external_1_only(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = self.EXT_MOD_1
m = self.test_get_internal_modules(True)
ntools.assert_in('ImplExternal1', m)
ntools.assert_in('ImplExternal2', m)
ntools.assert_not_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal1']().inst_method('d'), 'external1d')
ntools.assert_equal(m['ImplExternal2']().inst_method('e'), 'external2e')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_external_1_with_trailing_sep(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = self.EXT_MOD_1+OS_ENV_PATH_SEP
m = self.test_get_internal_modules(True)
ntools.assert_in('ImplExternal1', m)
ntools.assert_in('ImplExternal2', m)
ntools.assert_not_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal1']().inst_method('d'), 'external1d')
ntools.assert_equal(m['ImplExternal2']().inst_method('e'), 'external2e')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_external_1_with_leading_sep(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = OS_ENV_PATH_SEP+self.EXT_MOD_1
m = self.test_get_internal_modules(True)
ntools.assert_in('ImplExternal1', m)
ntools.assert_in('ImplExternal2', m)
ntools.assert_not_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal1']().inst_method('d'), 'external1d')
ntools.assert_equal(m['ImplExternal2']().inst_method('e'), 'external2e')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_external_2_only(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = self.EXT_MOD_2
m = self.test_get_internal_modules(True)
ntools.assert_not_in('ImplExternal1', m)
ntools.assert_not_in('ImplExternal2', m)
ntools.assert_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal3']().inst_method('f'), 'external3f')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_external_1_and_2(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = OS_ENV_PATH_SEP.join([self.EXT_MOD_1,
self.EXT_MOD_2])
m = self.test_get_internal_modules(True)
ntools.assert_in('ImplExternal1', m)
ntools.assert_in('ImplExternal2', m)
ntools.assert_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal1']().inst_method('d'), 'external1d')
ntools.assert_equal(m['ImplExternal2']().inst_method('e'), 'external2e')
ntools.assert_equal(m['ImplExternal3']().inst_method('f'), 'external3f')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_junk_external_mod(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = "This is a junk string"
# This should skip module it can't find. In this case, the junk string
# is treated as a python module path, which is invalid. A warning is
# emitted but the plugin query succeeds as if the invalid chunk didn't
# exist.
self.test_get_internal_modules()
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
def test_external_1_and_2_and_garbage(self):
env_orig_value = os.environ.get(self.ENV_VAR, None)
os.environ[self.ENV_VAR] = OS_ENV_PATH_SEP.join([self.EXT_MOD_1,
self.EXT_MOD_2,
'asdgasfhsadf',
'some thing weird',
'but still uses sep'])
m = self.test_get_internal_modules(True)
ntools.assert_in('ImplExternal1', m)
ntools.assert_in('ImplExternal2', m)
ntools.assert_in('ImplExternal3', m)
ntools.assert_equal(m['ImplExternal1']().inst_method('d'), 'external1d')
ntools.assert_equal(m['ImplExternal2']().inst_method('e'), 'external2e')
ntools.assert_equal(m['ImplExternal3']().inst_method('f'), 'external3f')
if env_orig_value:
os.environ[self.ENV_VAR] = env_orig_value
else:
del os.environ[self.ENV_VAR]
|
"""empty message
Revision ID: 7b4eec5bf6a2
Revises:
Create Date: 2018-07-07 17:49:12.635921
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7b4eec5bf6a2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.Binary(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('users')
# ### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright © 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der
# angewandten Forschung e.V. All rights reserved.
#
# BSD 3-Clause License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# ″This product uses parts of foxBMS®″
#
# ″This product includes parts of foxBMS®″
#
# ″This product is derived from foxBMS®″
"""Testing 'qtpy' package"""
import logging
import argparse
# package to test
import qtpy # pylint: disable=unused-import
def main():
"""Testing 'qtpy' package"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
if __name__ == "__main__":
main()
|
import tensorflow as tf
from tensorflow.keras import layers, activations
class Shuffle(layers.Layer):
def __init__(self, **kwargs):
super(Shuffle, self).__init__(**kwargs)
def call(self, x):
c_idx = tf.range(0, tf.shape(x)[-1])
c_idx = tf.random.shuffle(c_idx)
x = tf.gather(x, c_idx, axis=-1)
return x
class Silu(layers.Layer):
def __init__(self, **kwargs):
super(Silu, self).__init__(**kwargs)
self.activation = tf.nn.silu
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Silu, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def SSEBlock(filters, name, **kargs):
def wrapper(x):
# if input and output of ParNet block have
# different number of channels (cifar), we need to pass input
# through conv1x1 to fit number of channels
if x.shape[-1] != filters:
x = layers.Conv2D(filters,
(1, 1),
use_bias=False,
name=name + '_conv1x1')(x)
x = layers.BatchNormalization(name=name + '_bn')(x)
se = layers.GlobalAveragePooling2D(
keepdims=True, name=name + '_se_gp')(x)
se = layers.Conv2D(filters,
(1, 1),
use_bias=False,
name=name + '_se_conv1x1')(se)
se = layers.Activation('sigmoid', name=name + '_se_sigmoid')(se)
output = layers.Multiply(name=name + '_multiply')([x, se])
return output
return wrapper
def ParnetBlock(filters, name, train=True, **kwargs):
def wrapper(x):
se = SSEBlock(filters, name + '_sse')(x)
conv3 = layers.Conv2D(filters,
(3, 3),
padding='same',
use_bias=not train,
name=name + '_conv3x3_conv')(x)
if train:
conv3 = layers.BatchNormalization(name=name + '_conv3x3_bn')(conv3)
conv1 = layers.Conv2D(filters,
(1, 1),
use_bias=False,
name=name + '_conv1x1_conv')(x)
conv1 = layers.BatchNormalization(name=name + '_conv1x1_bn')(conv1)
output = layers.Add(name=name + '_add')([se, conv1, conv3])
else:
output = layers.Add(name=name + '_add')([se, conv3])
output = Silu(name=name + '_silu')(output)
return output
return wrapper
def DownsamplingBlock(filters, name, strides=2, groups=1, **kwargs):
def wrapper(x):
pool_1 = layers.AveragePooling2D((2, 2),
padding='same',
name=name + '_pool2d_pool')(x)
pool_1 = layers.Conv2D(filters,
(1, 1),
groups=groups,
use_bias=False,
name=name + '_pool2d_conv1')(pool_1)
pool_1 = layers.BatchNormalization(name=name + '_pool2d_bn')(pool_1)
conv = layers.Conv2D(filters,
(3, 3),
strides=strides,
padding='same',
groups=groups,
use_bias=False,
name=name + '_conv3_conv')(x)
conv = layers.BatchNormalization(name=name + '_conv3_bn')(conv)
global_pool = layers.GlobalAveragePooling2D(
keepdims=True,
name=name + '_gp_pool')(x)
global_pool = layers.Conv2D(filters,
(1, 1),
strides=strides,
groups=groups,
use_bias=False,
name=name + '_gp_conv')(global_pool)
global_pool = layers.Activation('sigmoid',
name=name + '_sigmoid')(global_pool)
output = layers.Add(name=name + '_add')([pool_1, conv])
output = layers.Multiply(name=name + '_multiply')([output, global_pool])
output = Silu(name=name + '_silu')(output)
return output
return wrapper
def FusionBlock(filters, name, strides=2, groups=2, **kwargs):
def wrapper(x, y):
merged = layers.Concatenate(axis=-1,
name=name + '_concatenation')([x, y])
# merged = Shuffle(name=name + '_shuffle')(merged)
output = DownsamplingBlock(filters,
name + '_downsampling',
strides,
groups)(merged)
return output
return wrapper
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BigDataPoolsOperations(object):
"""BigDataPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
"""Get Big Data pool.
Get a Big Data pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param big_data_pool_name: Big Data pool name.
:type big_data_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfo, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.BigDataPoolResourceInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
big_data_pool_patch_info, # type: "_models.BigDataPoolPatchInfo"
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
"""Update a Big Data pool.
Patch a Big Data pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param big_data_pool_name: Big Data pool name.
:type big_data_pool_name: str
:param big_data_pool_patch_info: The updated Big Data pool properties.
:type big_data_pool_patch_info: ~azure.mgmt.synapse.models.BigDataPoolPatchInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfo, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.BigDataPoolResourceInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(big_data_pool_patch_info, 'BigDataPoolPatchInfo')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
big_data_pool_info, # type: "_models.BigDataPoolResourceInfo"
force=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force is not None:
query_parameters['force'] = self._serialize.query("force", force, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(big_data_pool_info, 'BigDataPoolResourceInfo')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
big_data_pool_info, # type: "_models.BigDataPoolResourceInfo"
force=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BigDataPoolResourceInfo"]
"""Create a Big Data pool.
Create a new Big Data pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param big_data_pool_name: Big Data pool name.
:type big_data_pool_name: str
:param big_data_pool_info: The Big Data pool to create.
:type big_data_pool_info: ~azure.mgmt.synapse.models.BigDataPoolResourceInfo
:param force: Whether to stop any running jobs in the Big Data pool.
:type force: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BigDataPoolResourceInfo or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.BigDataPoolResourceInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name,
big_data_pool_info=big_data_pool_info,
force=force,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[object]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[object]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[object]
"""Delete a Big Data pool.
Delete a Big Data pool from the workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param big_data_pool_name: Big Data pool name.
:type big_data_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either object or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[object]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[object]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}'} # type: ignore
def list_by_workspace(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BigDataPoolResourceInfoListResult"]
"""List the Big Data pools in a workspace.
List Big Data pools in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BigDataPoolResourceInfoListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.BigDataPoolResourceInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BigDataPoolResourceInfoListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorContract, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools'} # type: ignore
|
# 测试case
DATA = [
('测试两数相加', 10, 20, 30),
('测试两数相加', 10.0, 20, 'x type is not int')
]
DATA2 = [
('测试两数相减的绝对值', 10, 20, 15),
('测试两数相减的绝对值', 10, '2', 'y type is not int')
]
DATA3 = [
('测试替换掉字符串中的某些字符', '--hell -world++', ['-', '+'], 't', 'tthellotttworldtt')
]
|
# Thank For CatUserBot
# Ported By @VckyouuBitch
# FROM Geez - Projects <https://github.com/Vckyou/GeezProjects>
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.types import (
MessageEntityMentionName,
)
from userbot import bot, BOTLOG, BOTLOG_CHATID, CMD_HELP
from userbot.events import geezbot_cmd
from userbot import CUSTOM_CMD as geez
async def get_full_user(event):
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`User ID Is Required")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except Exception as err:
return await event.edit("Something Went Wrong", str(err))
return user_obj, extra
async def get_user_sender_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
@bot.on(geezbot_cmd(outgoing=True, pattern=r"gban(?: |$)(.*)"))
async def gspider(userbot):
lol = userbot
sender = await lol.get_sender()
me = await lol.client.get_me()
if not sender.id == me.id:
friday = await lol.reply("Gbanning User..")
else:
friday = await lol.edit("Wait Processing.....")
me = await userbot.client.get_me()
await friday.edit(f"**Global Ban user..**")
my_mention = "[{}](tg://user?id={})".format(me.first_name, me.id)
f"@{me.username}" if me.username else my_mention
await userbot.get_chat()
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
else:
userbot.chat.title
try:
user, reason = await get_full_user(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await friday.edit(f"**Terjadi Kesalahan!!**")
if user:
if user.id == 1803347744:
return await friday.edit(
f"**Didn't , Your Father Teach You ? That You Cant Gban your creator🖕**"
)
try:
from userbot.modules.sql_helper.gmute_sql import gmute
except BaseException:
pass
try:
await userbot.client(BlockRequest(user))
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, view_messages=False)
a += 1
await friday.edit(f"**GBANNED // Total Affected Chats **: `{a}`")
except BaseException:
b += 1
else:
await friday.edit(f"**Reply to a user !!**")
try:
if gmute(user.id) is False:
return await friday.edit(f"**Error! User telah di gbanned.**")
except BaseException:
pass
return await friday.edit(
f"**Gbanned [{user.first_name}](tg://user?id={user.id}) Dari : {a} Group**"
)
if BOTLOG:
await userbot.client.send_message(
BOTLOG_CHATID,
"#GMUTE\n"
f"USER: [{user.first_name}](tg://user?id={user.id})\n"
f"CHAT: {userbot.chat.title}(`{userbot.chat_id}`)",
)
@bot.on(geezbot_cmd(outgoing=True, pattern=r"ungban(?: |$)(.*)"))
async def gspider(userbot):
lol = userbot
sender = await lol.get_sender()
me = await lol.client.get_me()
if not sender.id == me.id:
friday = await lol.reply("`Wait Let Me Process`")
else:
friday = await lol.edit("Just a Second ")
me = await userbot.client.get_me()
await friday.edit(f"Trying To Ungban User !")
my_mention = "[{}](tg://user?id={})".format(me.first_name, me.id)
f"@{me.username}" if me.username else my_mention
await userbot.get_chat()
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
else:
userbot.chat.title
try:
user, reason = await get_full_user(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await friday.edit("Terjadi Kesalahan!!")
if user:
if user.id == 1803347744:
return await friday.edit("**You Cant gban him... as a result you can not ungban him... He is My Creator!**")
try:
from userbot.modules.sql_helper.gmute_sql import ungmute
except BaseException:
pass
try:
await userbot.client(UnblockRequest(user))
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, send_messages=True)
a += 1
await friday.edit(f"**UNGBANNING // AFFECTED CHATS - {a} **")
except BaseException:
b += 1
else:
await friday.edit("**Reply to a user !!**")
try:
if ungmute(user.id) is False:
return await friday.edit("**Error! User probably already ungbanned.**")
except BaseException:
pass
return await friday.edit(
f"**UNGBANNED // USER - [{user.first_name}](tg://user?id={user.id}) CHATS : {a} **"
)
CMD_HELP.update({
"gban": f"\
`{geez}gban reason`\
\nUsage: Globally Ban users from all the Group Administrations bots where you are SUDO.\
\n\n`{geez}ungban reason`\
\nUsage: Globally unBan users from all the Group Administrations bots where you are SUDO"
})
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-08 21:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('games', '0005_auto_20170809_0130'),
]
operations = [
migrations.CreateModel(
name='UserQuestionHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.Question')),
],
),
]
|
from template import kernel_gpu
kernel_meta = kernel_gpu.kernel_meta
kernel_initializer = kernel_gpu.kernel_initializer
kernel_query = '''\
static vsi_status _query_kernel
(
vsi_nn_kernel_t * kernel,
vsi_nn_tensor_t * const * const inputs,
vsi_nn_tensor_t * const * const outputs
/* Add extra params */
)
{
vsi_status status = VSI_FAILURE;
vsi_nn_kernel_dtype_e in_dtype;
vsi_nn_kernel_dtype_e out_dtype;
const _kernel_map_type * kernel_map = _%KERNEL_NAME%_kernel_map;
size_t kernel_map_size = _cnt_of_array( _%KERNEL_NAME%_kernel_map );
vx_param_description_t * param_def = _%KERNEL_NAME%_kernel_param_def;
size_t param_def_size = _cnt_of_array( _%KERNEL_NAME%_kernel_param_def );
vx_kernel_initialize_f initializer = _%KERNEL_NAME%_initializer;
uint32_t key;
int i;
in_dtype = vsi_nn_kernel_map_dtype( inputs[0]->attr.dtype.vx_type );
out_dtype = vsi_nn_kernel_map_dtype( outputs[0]->attr.dtype.vx_type );
key = %upper(KERNEL_NAME)%_HASH_KEY( in_dtype, out_dtype );
for( i = 0; i < kernel_map_size; i ++ )
{
if( kernel_map[i].key == key )
{
break;
}
}
if( i < kernel_map_size )
{
snprintf( kernel->info.name, VX_MAX_KERNEL_NAME, "%s", kernel_map[i].function_name );
kernel->info.parameters = param_def;
kernel->info.numParams = param_def_size;
kernel->info.initialize = initializer;
// Register code source
vsi_nn_kernel_add_source( kernel, VSI_NN_GPU_SOURCE_FMT_CODE, 2,
"vsi_nn_kernel_header",
kernel_map[i].source_name );
// Register binary source
vsi_nn_kernel_add_source( kernel, VSI_NN_GPU_SOURCE_FMT_EXECUTABLE, 1,
kernel_map[i].source_name );
status = VSI_SUCCESS;
}
return status;
} /* _query_kernel() */
'''
kernel_check = kernel_gpu.kernel_check
kernel_function = kernel_gpu.kernel_function
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020 Masahiko Hashimoto <hashimom@geeko.jp>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import time
import grpc
import egoisticlily.proto.egoisticlily_pb2_grpc
import egoisticlily.proto.egoisticlily_pb2
from concurrent import futures
from egoisticlily.converter import Converter
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class EgoisticLilyGateway(egoisticlily.proto.egoisticlily_pb2_grpc.EgoisticLilyServiceServicer):
def __init__(self, model):
""" EgoisticLily gRPCサーバークラス
:param model: モデルパス
"""
self.converter = Converter(model)
def Convert(self, request, context):
""" 変換
:param request:
:param context:
:return:
"""
ret_str = self.converter(request.in_str)
response = egoisticlily.proto.egoisticlily_pb2.ConvertResp(status=200, out_str=ret_str)
return response
def main():
""" EgoisticLily gRPCサーバーモジュール
:return:
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-m', '--model', help='model path', required=True)
arg_parser.add_argument('-p', '--port', help='server port number', default='50055')
args = arg_parser.parse_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
egoisticlily.proto.egoisticlily_pb2_grpc.add_EgoisticLilyServiceServicer_to_server(
EgoisticLilyGateway(args.model), server)
# portの設定
port_str = '[::]:' + args.port
server.add_insecure_port(port_str)
server.start()
print("EgoisticLily Server Start! Port %d" % int(args.port))
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
main()
|
arr=[1, 4, 20, 3, 10, 5]
n=len(arr)
#Max triplet sum in array
def func1(arr,n):
first,second,third=-99999999,-99999999,-99999999
for i in range(n):
if arr[i]>third:
first=second
second=third
third=arr[i]
elif arr[i]>second:
first=second
second=arr[i]
elif arr[i]>first:
first=arr[i]
print(first+second+third)
|
import turtle
wn = turtle.Screen()
tarta = turtle.Turtle()
tarta.pensize(3)
arquivo = open(r"C:\Users\CASSIO\Documents\Python\Turtle\desenho.txt")
for linha in arquivo:
item = linha.split()
if item[0] == 'UP':
tarta.up()
elif item[0] == 'DOWN':
tarta.down()
else:
tarta.goto(int(item[0]), int(item[1]))
arquivo.close()
wn.exitonclick()
|
import random
import pytest
from fastapi import FastAPI, testclient
from fastapi_crudrouter import MemoryCRUDRouter
from tests import Potato
URL = "/potato"
def get_client(**kwargs):
app = FastAPI()
app.include_router(MemoryCRUDRouter(schema=Potato, prefix=URL, **kwargs))
return testclient.TestClient(app)
@pytest.mark.parametrize("i", list(range(1, len(MemoryCRUDRouter.get_routes()) + 1)))
def test_exclude_internal(i):
keys = random.sample(MemoryCRUDRouter.get_routes(), k=i)
kwargs = {r + "_route": False for r in keys}
router = MemoryCRUDRouter(schema=Potato, prefix=URL, **kwargs)
assert len(router.routes) == len(MemoryCRUDRouter.get_routes()) - i
def test_exclude_delete_all():
client = get_client(delete_all_route=False)
assert client.delete(URL).status_code == 405
assert client.get(URL).status_code == 200
def test_exclude_all():
routes = MemoryCRUDRouter.get_routes()
kwargs = {r + "_route": False for r in routes}
client = get_client(**kwargs)
assert client.delete(URL).status_code == 404
assert client.get(URL).status_code == 404
assert client.post(URL).status_code == 404
assert client.put(URL).status_code == 404
for id_ in [-1, 1, 0, 14]:
assert client.get(f"{URL}/{id_}").status_code == 404
assert client.post(f"{URL}/{id_}").status_code == 404
assert client.put(f"{URL}/{id_}").status_code == 404
assert client.delete(f"{URL}/{id_}").status_code == 404
|
# -*- coding: utf-8 -*-
'''
Encapsulate the different transports available to Salt.
'''
from __future__ import absolute_import
# for backwards compatibility
class Channel(object):
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
# determine the ttype
if 'transport' in opts:
ttype = opts['transport']
elif 'transport' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport']
# the raet ioflo implementation still uses this channel, we need
# this as compatibility
if ttype == 'raet':
import salt.transport.raet
return salt.transport.raet.RAETReqChannel(opts, **kwargs)
# TODO: deprecation warning, should use
# salt.transport.channel.Channel.factory()
from salt.transport.client import ReqChannel
return ReqChannel.factory(opts, **kwargs)
|
import pygame
import random
WHITE = 255, 255, 255
class Asteroid:
minimum_size = 20
def __init__(self, x, y, size=200):
self.x = x
self.y = y
self.size = size
self.vx = random.random() * 10 - 5
self.vy = random.random() * 10 - 5
def draw(self, screen):
pygame.draw.rect(screen, WHITE, (self.x, self.y, self.size, self.size), 5)
def move(self, width, height):
self.x += self.vx
self.y += self.vy
self.x %= width
self.y %= height
if abs(self.vx) > 2:
self.vx *= 0.999
if abs(self.vy) > 2:
self.vy *= 0.999
def explode(self):
parts = []
# split into 4 parts of half edge size each including the current part
self.size /= 2
if self.size > self.minimum_size:
for p in range(3):
parts.append(Asteroid(self.x, self.y, self.size))
return parts
def inside(self, pos):
return pygame.Rect(self.x, self.y, self.size, self.size).collidepoint(pos[0], pos[1])
|
""" Dictionary learning.
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
import warnings
from math import ceil
import numpy as np
from scipy import linalg
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches)
from ..utils.extmath import randomized_svd, row_norms, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' "
"coding method.".format(method)
)
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0,
positive=False):
"""Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components) or None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
check_input : bool, default=True
If `False`, the input arrays `X` and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError("Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(
dictionary.shape, X.shape))
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
_check_positive_coding(algorithm, positive)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False,
positive=positive, max_iter=max_iter)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True,
positive=positive)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, *, gram=None, cov=None,
algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None,
copy_cov=True, init=None, max_iter=1000, n_jobs=None,
check_input=True, verbose=0, positive=False):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if effective_n_jobs(n_jobs) == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, A=None, B=None, verbose=False,
random_state=None, positive=False):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| == 1
dictionary[k] /= linalg.norm(dictionary[k])
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
def dict_learning(X, n_components, *, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=None, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False, positive_dict=False,
positive_code=False, method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
# flip the initial code's sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = np.asfortranarray(dictionary)
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs, positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
# Update dictionary in place
_update_dict(dictionary, X, code, verbose=verbose,
random_state=random_state, positive=positive_dict)
# Cost function
current_cost = (0.5 * np.sum((X - code @ dictionary)**2)
+ alpha * np.sum(np.abs(code)))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False,
method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int, default=2
Number of dictionary atoms to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=100
Number of mini-batch iterations to perform.
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios.
callback : callable, default=None
callable that gets invoked every five iterations.
batch_size : int, default=3
The number of samples to take in each batch.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default=0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
return_inner_stats : bool, default=False
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If `return_inner_stats` is `True`, `return_code` is
ignored.
inner_stats : tuple of (A, B) ndarrays, default=None
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid losing the history of the evolution.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = check_array(dictionary, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code.T, this_code)
B *= beta
B += np.dot(this_X.T, this_code)
# Update dictionary in place
_update_dict(dictionary, this_X, this_code, A, B, verbose=verbose,
random_state=random_state, positive=positive_dict)
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary, (A, B), ii - iter_offset + 1
else:
return dictionary, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code, max_iter=method_max_iter,
verbose=verbose)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary, ii - iter_offset + 1
else:
return code, dictionary
if return_n_iter:
return dictionary, ii - iter_offset + 1
else:
return dictionary
class _BaseSparseCoding(TransformerMixin):
"""Base class from SparseCoder and DictionaryLearning algorithms."""
def __init__(self, transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
"""Private method allowing to accomodate both DictionaryLearning and
SparseCoder."""
X = self._validate_data(X, reset=False)
# transform_alpha has to be changed in _transform
# this is done for consistency with the value of alpha
if (hasattr(self, "alpha") and self.alpha != 1. and
self.transform_alpha is None):
warnings.warn("By default transform_alpha will be equal to"
"alpha instead of 1.0 starting from version 1.2",
FutureWarning)
transform_alpha = 1. # TODO change to self.alpha in 1.2
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X, dictionary, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha, max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, positive=self.positive_code)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
class SparseCoder(_BaseSparseCoding, BaseEstimator):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution;
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
the estimated components are sparse;
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`lasso_lars`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The unchanged dictionary atoms.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in
1.1 (renaming of 0.26). Use `dictionary` instead.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import SparseCoder
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> coder = SparseCoder(
... dictionary=dictionary, transform_algorithm='lasso_lars',
... transform_alpha=1e-10,
... )
>>> coder.transform(X)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
See Also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
_required_parameters = ["dictionary"]
def __init__(self, dictionary, *, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=None, positive_code=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.dictionary = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : Ignored
y : Ignored
Returns
-------
self : object
"""
return self
@deprecated("The attribute 'components_' is deprecated " # type: ignore
"in 0.24 and will be removed in 1.1 (renaming of 0.26). Use "
"the 'dictionary' instead.")
@property
def components_(self):
return self.dictionary
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
return super()._transform(X, self.dictionary)
def _more_tags(self):
return {"requires_fit": False}
@property
def n_components_(self):
return self.dictionary.shape[0]
@property
def n_features_in_(self):
return self.dictionary.shape[1]
class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=n_features
Number of dictionary elements to extract.
alpha : float, default=1.0
Sparsity controlling parameter.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for numerical error.
fit_algorithm : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (:func:`~sklearn.linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
faster if the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(:func:`~sklearn.linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
will be faster if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
n_jobs : int or None, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the code, for warm restart. Only used if `code_init`
and `dict_init` are not None.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary, for warm restart. Only used if
`code_init` and `dict_init` are not None.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import DictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> dict_learner = DictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.87...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.08...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, *, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=None, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None, positive_code=False,
positive_dict=False, transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : Ignored
Returns
-------
self : object
Returns the object itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, alpha=self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=None
Number of dictionary elements to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=1000
Total number of iterations to perform.
fit_algorithm : {'lars', 'cd'}, default='lars'
The algorithm used:
- `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`)
- `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
batch_size : int, default=3
Number of samples in each mini-batch.
shuffle : bool, default=True
Whether to shuffle the samples before forming batches.
dict_init : ndarray of shape (n_components, n_features), default=None
initial value of the dictionary for warm restart scenarios
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components extracted from the data.
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid losing the
history of the evolution, but they shouldn't have any use for the
end user.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Number of iterations run.
iter_offset_ : int
The number of iteration on data batches that has been
performed before.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generattor or by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import MiniBatchDictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42)
>>> dict_learner = MiniBatchDictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.86...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.07...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, *, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=None, batch_size=3, shuffle=True,
dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None,
positive_code=False, positive_dict=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs, transform_alpha,
split_sign, n_jobs, positive_code, transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
self.random_state_ = random_state
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
iter_offset : int, default=None
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
X = self._validate_data(X, reset=(iter_offset == 0))
U, (A, B) = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=1, method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + 1
return self
|
#!/usr/bin/env python
from os import environ
from sys import argv
def main():
environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings')
try:
from django.core.management import execute_from_command_line
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from ImportError
execute_from_command_line(argv)
if __name__ == '__main__':
main()
|
spam=input()
|
# -*- coding: utf-8 -*
import os
import random
import re
import six
import argparse
import io
import math
import sys
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf-8')
prog = re.compile("[^a-z ]", flags=0)
def parse_args():
parser = argparse.ArgumentParser(
description="Paddle Fluid word2 vector preprocess")
parser.add_argument(
'--build_dict_corpus_dir', type=str, help="The dir of corpus")
parser.add_argument(
'--input_corpus_dir', type=str, help="The dir of input corpus")
parser.add_argument(
'--output_corpus_dir', type=str, help="The dir of output corpus")
parser.add_argument(
'--dict_path',
type=str,
default='./dict',
help="The path of dictionary ")
parser.add_argument(
'--min_count',
type=int,
default=5,
help="If the word count is less then min_count, it will be removed from dict"
)
parser.add_argument(
'--downsample',
type=float,
default=0.001,
help="filter word by downsample")
parser.add_argument(
'--filter_corpus',
action='store_true',
default=False,
help='Filter corpus')
parser.add_argument(
'--build_dict',
action='store_true',
default=False,
help='Build dict from corpus')
return parser.parse_args()
def text_strip(text):
#English Preprocess Rule
return prog.sub("", text.lower())
# Shameless copy from Tensorflow https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if _is_unicode(s):
return s
try:
return _to_unicode(s)
except UnicodeDecodeError:
res = _to_unicode(s, ignore_errors=True)
return res
def _is_unicode(s):
if six.PY2:
if isinstance(s, unicode):
return True
else:
if isinstance(s, str):
return True
return False
def _to_unicode(s, ignore_errors=False):
if _is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def filter_corpus(args):
"""
filter corpus and convert id.
"""
word_count = dict()
word_to_id_ = dict()
word_all_count = 0
id_counts = []
word_id = 0
#read dict
with io.open(args.dict_path, 'r', encoding='utf-8') as f:
for line in f:
word, count = line.split()[0], int(line.split()[1])
word_count[word] = count
word_to_id_[word] = word_id
word_id += 1
id_counts.append(count)
word_all_count += count
#write word2id file
print("write word2id file to : " + args.dict_path + "_word_to_id_")
with io.open(
args.dict_path + "_word_to_id_", 'w+', encoding='utf-8') as fid:
for k, v in word_to_id_.items():
fid.write(k + " " + str(v) + '\n')
#filter corpus and convert id
if not os.path.exists(args.output_corpus_dir):
os.makedirs(args.output_corpus_dir)
for file in os.listdir(args.input_corpus_dir):
with io.open(
os.path.join(args.output_corpus_dir, 'convert_' + file),
"w",
encoding='utf-8') as wf:
with io.open(
os.path.join(args.input_corpus_dir, file),
encoding='utf-8') as rf:
print(os.path.join(args.input_corpus_dir, file))
for line in rf:
signal = False
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
idx = word_to_id_[item]
else:
idx = word_to_id_[native_to_unicode('<UNK>')]
count_w = id_counts[idx]
corpus_size = word_all_count
keep_prob = (
math.sqrt(count_w /
(args.downsample * corpus_size)) + 1
) * (args.downsample * corpus_size) / count_w
r_value = random.random()
if r_value > keep_prob:
continue
wf.write(_to_unicode(str(idx) + " "))
signal = True
if signal:
wf.write(_to_unicode("\n"))
def build_dict(args):
"""
proprocess the data, generate dictionary and save into dict_path.
:param corpus_dir: the input data dir.
:param dict_path: the generated dict path. the data in dict is "word count"
:param min_count:
:return:
"""
# word to count
word_count = dict()
for file in os.listdir(args.build_dict_corpus_dir):
with io.open(
args.build_dict_corpus_dir + "/" + file, encoding='utf-8') as f:
print("build dict : ", args.build_dict_corpus_dir + "/" + file)
for line in f:
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
word_count[item] = word_count[item] + 1
else:
word_count[item] = 1
item_to_remove = []
for item in word_count:
if word_count[item] <= args.min_count:
item_to_remove.append(item)
unk_sum = 0
for item in item_to_remove:
unk_sum += word_count[item]
del word_count[item]
#sort by count
word_count[native_to_unicode('<UNK>')] = unk_sum
word_count = sorted(
word_count.items(), key=lambda word_count: -word_count[1])
with io.open(args.dict_path, 'w+', encoding='utf-8') as f:
for k, v in word_count:
f.write(k + " " + str(v) + '\n')
if __name__ == "__main__":
args = parse_args()
if args.build_dict:
build_dict(args)
elif args.filter_corpus:
filter_corpus(args)
else:
print(
"error command line, please choose --build_dict or --filter_corpus")
|
from Sailor import Sailor
from SensorsForTesting import Sensors
from Bearing import Bearing
'''Test1
Erwartungshaltung: Schiff soll nichts ändern!
'''
sens = Sensors()
sailor = Sailor(sens)
print ("\nTest1: Erwartungshaltung: Schiff soll nichts ändern!")
desiredBearing = Bearing(90)
sens.setWindDirection(0)
sens.setCompassBearing(90)
sens.setWinkelgesch(0)
sens.setCourseMadeGood(90)
sens.setMeanCompassBearing(90)
sailor.sail(desiredBearing,1)
'''Test2
Erwartungshaltung: Schiff soll nichts ändern!
'''
sens = Sensors()
sailor = Sailor(sens)
print ("\nTest2: Erwartungshaltung: Schiff soll nichts ändern!")
desiredBearing = Bearing(90)
sens.setWindDirection(0)
sens.setCompassBearing(100)
sens.setWinkelgesch(0)
sens.setCourseMadeGood(90)
sens.setMeanCompassBearing(100)
sailor.sail(desiredBearing,1)
'''Test3
Erwartungshaltung: Schiff soll weiter nach links (Backbord) fahren!
'''
sens = Sensors()
sailor = Sailor(sens)
print ("\nTest3: Erwartungshaltung: Schiff soll weiter nach links (Backbord) fahren!")
desiredBearing = Bearing(90)
sens.setWindDirection(0)
sens.setCompassBearing(100)
sens.setWinkelgesch(0)
sens.setCourseMadeGood(90)
sens.setMeanCompassBearing(95)
sailor.sail(desiredBearing,1)
'''Test4
Fahren hoch am Wind! Daher kann der Kurs nicht gefahren werden,
stattdessen wird 30 grad am Wind gefahren!
Erwartungshaltung: Schiff soll nichts ändern!
'''
sens = Sensors()
sailor = Sailor(sens)
print ("\nTest4: Erwartungshaltung: Schiff soll nichts ändern!")
desiredBearing = Bearing(0)
sens.setWindDirection(0)
sens.setCompassBearing(30)
sens.setWinkelgesch(0)
sens.setCourseMadeGood(30)
sens.setMeanCompassBearing(30)
sailor.sail(desiredBearing,1)
'''Test5
Wende machen!
'''
sens = Sensors()
sailor = Sailor(sens)
print ("\nTest5: Erwartungshaltung: Schiff soll weiter nach links (Backbord) fahren!")
desiredBearing = Bearing(330)
sens.setWindDirection(0)
sens.setCompassBearing(30)
sens.setWinkelgesch(0)
sens.setCourseMadeGood(30)
sens.setMeanCompassBearing(30)
sailor.sail(desiredBearing,1)
|
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import os
import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import ACT2FN
from .configuration_gpt2 import GPT2Config
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from .modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
else:
present = (None,)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
outputs = [hidden_states] + outputs
return outputs # hidden_states, present, (cross_attentions, attentions)
class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
GPT2_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
``past_key_values[0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
passed as ``input_ids``.
Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
past_key_values (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
have their past given to this model should not be passed as ``input_ids`` as they have already been
computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
:obj:`past_key_values`).
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=BaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
# checkpointing only works with tuple returns, not with lists
return tuple(output for output in module(*inputs, use_cache, output_attentions))
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
layer_past,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_attentions = all_attentions + (outputs[2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
GPT2_START_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
authorized_missing_keys = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=CausalLMOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
GPT2_START_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1[``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Example::
>>> import torch
>>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2, return_dict=True)
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.lm_logits
>>> mc_logits = outputs.mc_logits
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
mc_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
lm_loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return GPT2DoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a sequence classification head on top (linear layer).
:class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
other causal models (e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
the last value in each row of the batch).
""",
GPT2_START_DOCSTRING,
)
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
authorized_missing_keys = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.init_weights()
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="microsoft/dialogrpt",
output_type=SequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
#!/usr/bin/env python
"""utils.py: get utility functions."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import numpy as np
import sys
sys.path.extend(["code/", "code/rt/", "code/sd/"])
import pandas as pd
from math import radians, degrees, sin, cos, asin, acos, sqrt, atan
def read_eclipse_file(fname="dataset/August21Eclipse.csv"):
e = pd.read_csv(fname, parse_dates=["Time"])
return e
def great_circle(lon1, lat1, lon2, lat2, R=6371):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
return R * ( acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2)) )
def haversine(lon1, lat1, lon2, lat2, R=6371):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
return 2 * R * asin(sqrt(a))
def vincenty_inverse(lon1, lat1, lon2, lat2, R=6371, maxIter=200, tol=1e-12):
R = R*1e3
f = 1/298.257223563
b = (1-f)*R
u_1 = atan((1-f)*tan(radians(lat1)))
u_2 = atan((1-f)*tan(radians(lat2)))
L = radians(lon2-lon1)
Lambda = L
sin_u1=sin(u_1)
cos_u1=cos(u_1)
sin_u2=sin(u_2)
cos_u2=cos(u_2)
iters = 0
for i in range(0, maxIter):
iters += 1
cos_lambda = cos(Lambda)
sin_lambda = sin(Lambda)
sin_sigma = sqrt((cos_u2*sin(Lambda))**2+(cos_u1*sin_u2-sin_u1*cos_u2*cos_lambda)**2)
cos_sigma = sin_u1*sin_u2+cos_u1*cos_u2*cos_lambda
sigma = atan2(sin_sigma,cos_sigma)
sin_alpha = (cos_u1*cos_u2*sin_lambda)/sin_sigma
cos_sq_alpha = 1-sin_alpha**2
cos2_sigma_m = cos_sigma-((2*sin_u1*sin_u2)/cos_sq_alpha)
C = (f/16)*cos_sq_alpha*(4+f*(4-3*cos_sq_alpha))
Lambda_prev = Lambda
Lambda = L+(1-C)*f*sin_alpha*(sigma+C*sin_sigma*(cos2_sigma_m+C*cos_sigma*(-1+2*cos2_sigma_m**2)))
# successful convergence
diff = abs(Lambda_prev-Lambda)
if diff<=tol: break
u_sq=cos_sq_alpha*((a**2-b**2)/b**2)
A=1+(u_sq/16384)*(4096+u_sq*(-768+u_sq*(320-175*u_sq)))
B=(u_sq/1024)*(256+u_sq*(-128+u_sq*(74-47*u_sq)))
delta_sig=B*sin_sigma*(cos2_sigma_m+0.25*B*(cos_sigma*(-1+2*cos2_sigma_m**2)-(1/6)*B*cos2_sigma_m*(-3+4*sin_sigma**2)*
(-3+4*cos2_sigma_m**2)))
dist = b*A*(sigma-delta_sig)/1e3
return dist
def azimuthAngle( lon1, lat1, lon2, lat2):
angle = 0.0;
dx = lon2 - lon1
dy = lat2 - lat1
if lon2 == lon1:
angle = math.pi / 2.0
if lat2 == lat1: angle = 0.0
elif lat2 < lat1: angle = 3.0 * math.pi / 2.0
elif lon2 > lon1 and lat2 > lat1: angle = math.atan(dx / dy)
elif lon2 > lon1 and lat2 < lat1: angle = math.pi / 2 + math.atan(-dy / dx)
elif lon2 < lon1 and lat2 < lat1: angle = math.pi + math.atan(dx / dy)
elif lon2 < lon1 and lat2 > lat1: angle = 3.0 * math.pi / 2.0 + math.atan(dy / -dx)
return (angle * 180 / math.pi)
def get_gridded_parameters(q, xparam="beam", yparam="slist", zparam="v", r=0, rounding=False):
"""
Method converts scans to "beam" and "slist" or gate
"""
plotParamDF = q[ [xparam, yparam, zparam] ]
if rounding:
plotParamDF.loc[:, xparam] = np.round(plotParamDF[xparam].tolist(), r)
plotParamDF.loc[:, yparam] = np.round(plotParamDF[yparam].tolist(), r)
else:
plotParamDF[xparam] = plotParamDF[xparam].tolist()
plotParamDF[yparam] = plotParamDF[yparam].tolist()
plotParamDF = plotParamDF.groupby( [xparam, yparam] ).mean().reset_index()
plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )
x = plotParamDF.index.values
y = plotParamDF.columns.levels[1].values
X, Y = np.meshgrid( x, y )
# Mask the nan values! pcolormesh can't handle them well!
Z = np.ma.masked_where(
np.isnan(plotParamDF[zparam].values),
plotParamDF[zparam].values)
return X,Y,Z
|
"""
I/O for the STL format, cf.
<https://en.wikipedia.org/wiki/STL_(file_format)>.
"""
import logging
import os
import numpy
from ._exceptions import ReadError, WriteError
from ._files import open_file
from ._mesh import Mesh
def read(filename):
with open_file(filename, "rb") as f:
# Checking if the file is ASCII format is normally done by checking if the
# first 5 characters of the header is "solid".
# ```
# header = f.read(80).decode("utf-8")
# ```
# Unfortunately, there are mesh files out there which are binary and still put
# "solid" there.
# A suggested alternative is to do as if the file is binary, read the
# num_triangles and see if it matches the file size
# (https://stackoverflow.com/a/7394842/353337).
f.read(80)
num_triangles = numpy.fromfile(f, count=1, dtype=numpy.uint32)[0]
# for each triangle, one has 3 float32 (facet normal), 9 float32 (facet), and 1
# int16 (attribute count), 50 bytes in total
is_binary = 84 + num_triangles * 50 == os.path.getsize(filename)
if is_binary:
out = _read_binary(f, num_triangles)
else:
# skip header
f.seek(0)
f.readline()
out = _read_ascii(f)
return out
# numpy.loadtxt is super slow
# Code adapted from <https://stackoverflow.com/a/8964779/353337>.
def iter_loadtxt(
infile, delimiter=" ", skiprows=0, comments=["#"], dtype=float, usecols=None
):
def iter_func():
for _ in range(skiprows):
next(infile)
for line in infile:
line = line.decode("utf-8").strip()
if line.startswith(comments):
continue
items = line.split(delimiter)
usecols_ = range(len(items)) if usecols is None else usecols
for idx in usecols_:
yield dtype(items[idx])
iter_loadtxt.rowlength = len(line) if usecols is None else len(usecols)
data = numpy.fromiter(iter_func(), dtype=dtype)
data = data.reshape((-1, iter_loadtxt.rowlength))
return data
def _read_ascii(f):
# The file has the form
# ```
# solid foo
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
# # [...] more facets [...]
# endsolid
# ```
# In the interest of speed, don't verify the format and instead just skip
# the text.
# TODO Pandas is MUCH faster than numpy for i/o, see
# <https://stackoverflow.com/a/18260092/353337>.
# import pandas
# data = pandas.read_csv(
# f,
# skiprows=lambda row: row == 0 or (row - 1) % 7 in [0, 1, 5, 6],
# skipfooter=1,
# usecols=(1, 2, 3),
# )
# numpy.loadtxt is super slow
# data = numpy.loadtxt(
# f,
# comments=["solid", "facet", "outer loop", "endloop", "endfacet", "endsolid"],
# usecols=(1, 2, 3),
# )
data = iter_loadtxt(
f,
comments=("solid", "facet", "outer loop", "endloop", "endfacet", "endsolid"),
usecols=(1, 2, 3),
)
if data.shape[0] % 3 != 0:
raise ReadError()
facets = numpy.split(data, data.shape[0] // 3)
points, cells = data_from_facets(facets)
return Mesh(points, cells)
def data_from_facets(facets):
# Now, all facets contain the point coordinate. Try to identify individual
# points and build the data arrays.
pts = numpy.concatenate(facets)
# TODO equip `unique()` with a tolerance
# Use return_index so we can use sort on `idx` such that the order is
# preserved; see <https://stackoverflow.com/a/15637512/353337>.
_, idx, inv = numpy.unique(pts, axis=0, return_index=True, return_inverse=True)
k = numpy.argsort(idx)
points = pts[idx[k]]
inv_k = numpy.argsort(k)
cells = {"triangle": inv_k[inv].reshape(-1, 3)}
return points, cells
def _read_binary(f, num_triangles):
# for each triangle, one has 3 float32 (facet normal), 9 float32 (facet), and 1
# int16 (attribute count)
out = numpy.fromfile(
f,
count=num_triangles,
dtype=numpy.dtype(
[("normal", "f4", (3,)), ("facet", "f4", (3, 3)), ("attr count", "i2")]
),
)
# discard normals, attribute count
facets = out["facet"]
if not numpy.all(out["attr count"] == 0):
raise ReadError()
points, cells = data_from_facets(facets)
return Mesh(points, cells)
def write(filename, mesh, binary=False):
if "triangle" not in mesh.cells:
raise WriteError(
"STL can only write triangle cells (not {}).".format(
", ".join(list(mesh.cells.keys()))
)
)
if len(mesh.cells) > 1:
keys = set(mesh.cells.keys())
keys.remove("triangle")
logging.warning(
"STL can only write triangle cells. Discarding {}.".format(", ".join(keys))
)
if mesh.points.shape[1] == 2:
logging.warning(
"STL requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = numpy.column_stack(
[mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]
)
if binary:
_binary(filename, mesh.points, mesh.cells)
else:
_write_ascii(filename, mesh.points, mesh.cells)
def _compute_normals(pts):
normals = numpy.cross(pts[:, 1] - pts[:, 0], pts[:, 2] - pts[:, 0])
nrm = numpy.sqrt(numpy.einsum("ij,ij->i", normals, normals))
normals = (normals.T / nrm).T
return normals
def _write_ascii(filename, points, cells):
pts = points[cells["triangle"]]
normals = _compute_normals(pts)
with open_file(filename, "wb") as fh:
fh.write("solid\n".encode("utf-8"))
for local_pts, normal in zip(pts, normals):
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
out = ["facet normal {} {} {}".format(*normal), " outer loop"]
for pt in local_pts:
out += [" vertex {} {} {}".format(*pt)]
out += [" endloop", "endfacet"]
fh.write(("\n".join(out) + "\n").encode("utf-8"))
fh.write("endsolid\n".encode("utf-8"))
def _binary(filename, points, cells):
pts = points[cells["triangle"]]
normals = _compute_normals(pts)
with open_file(filename, "wb") as fh:
# 80 character header data
msg = "This file was generated by meshio."
msg += (79 - len(msg)) * "X"
msg += "\n"
fh.write(msg.encode("utf-8"))
fh.write(numpy.uint32(len(cells["triangle"])))
for pt, normal in zip(pts, normals):
fh.write(normal.astype(numpy.float32))
fh.write(pt.astype(numpy.float32))
fh.write(numpy.uint16(0))
|
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for map_structure.py'''
import unittest
from cabby.geo.map_processing import map_structure
from cabby.geo import regions
class MapTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Process the map for an area in D.C.
cls.map = map_structure.Map(regions.get_region('DC'), 18)
def testPOIInGraph(self):
osmids = self.map.poi['osmid'].tolist()
for osmid in osmids:
self.assertIn(osmid, self.map.nx_graph.nodes)
def testOSMNodes(self):
node_osmids = set(self.map.nodes['osmid'])
nxg_osmids = set(self.map.nx_graph)
self.assertEqual(len(node_osmids), len(nxg_osmids))
diff = node_osmids.difference(nxg_osmids)
self.assertEqual(len(diff), 0)
def testEdgesTrueLength(self):
true_length_is_null = self.map.edges.true_length.isnull().values.any()
self.assertFalse(true_length_is_null)
def testAttributeInGraph(self):
self.assertIn('1#1360050503', self.map.nx_graph.nodes)
node = self.map.nx_graph.nodes['1#1360050503']
self.assertEqual('primary', node['highway'])
def testSingleOutput(self):
# Verify that a known POI is present.
specific_poi_found = self.map.poi[self.map.poi[
'name'] == 'Dental Dream']
# Check that the number of Frick Building POI found is exactly 1.
self.assertEqual(specific_poi_found.shape[0], 1)
# Check the cellid.
list_cells = self.map.poi[self.map.poi[
'name'] == 'Dental Dream']['s2cellids'].tolist()[0]
expected_ids = [9923620797002285056]
found_ids = [list_cells[i].id() for i in range(len(list_cells))]
for expected, found in zip(expected_ids, found_ids):
self.assertEqual(expected, found)
if __name__ == "__main__":
unittest.main()
|
# Standard Library
import builtins
import json
import copy
import numpy as np
# Import from third library
from up.utils.general.log_helper import default_logger as logger
from up.utils.general.registry_factory import EVALUATOR_REGISTRY
from up.data.metrics.base_evaluator import Evaluator
from up.utils.general.petrel_helper import PetrelHelper
from up.data.metrics import Metric
# fix pycocotools py2-style bug
builtins.unicode = str
__all__ = ['KittiEvaluator']
@EVALUATOR_REGISTRY.register('kitti')
class KittiEvaluator(Evaluator):
def __init__(self, gt_file, recall_thresh_list=[0.3, 0.5, 0.7]):
"""
Arguments:
gt_file (str): directory or json file of annotations
iou_types (str): list of iou types of [keypoints, bbox, segm]
"""
super(KittiEvaluator, self).__init__()
self.gt_file = gt_file
self.recall_thresh_list = recall_thresh_list
def load_dts(self, res_file, res):
out = []
if res is not None:
out = res
else:
logger.info(f'loading res from {res_file}')
out = []
with PetrelHelper.open(res_file) as f:
for line in f:
out.append(json.loads(line))
out = [x for res_gpus in out for res_bs in res_gpus for x in res_bs]
for idx in range(len(out)):
for k, v in out[idx].items():
if isinstance(v, list):
out[idx][k] = np.array(v)
return out
def get_metric(self, ret):
metric = {
'gt_num': 0,
}
for cur_thresh in self.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
for i in range(len(ret)):
recall_dict = ret[i]['recall_dict']
for cur_thresh in self.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] += recall_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += recall_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += recall_dict.get('gt', 0)
disp_dict = {}
min_thresh = self.recall_thresh_list[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)],
metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
gt_num_cnt = metric['gt_num']
recall_dict = {}
for cur_thresh in self.recall_thresh_list:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
recall_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
recall_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
return recall_dict
def eval(self, res_file, class_names, kitti_infos, res, **kwargs):
if 'annos' not in kitti_infos[0].keys():
return None, Metric
det_annos = self.load_dts(res_file, res)
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_det_annos = sorted(eval_det_annos, key=lambda e: e['frame_id'])
eval_gt_annos = [copy.deepcopy(info['annos']) for info in sorted(kitti_infos,
key=lambda e:e['point_cloud']['lidar_idx'])]
recall_dict = self.get_metric(eval_det_annos)
result, recall_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
ave_recall = []
for k, v in recall_dict.items():
if "3d/moderate" in k:
ave_recall.append(v)
recall_dict["AVE_3d/moderate"] = np.array(ave_recall).mean()
recall_dict = Metric(recall_dict)
recall_dict.set_cmp_key("AVE_3d/moderate")
logger.info(json.dumps(result, indent=2))
return recall_dict
@staticmethod
def add_subparser(name, subparsers):
subparser = subparsers.add_parser(
name,
help='subcommand for kitty evaluation')
subparser.add_argument(
'--anno_dir',
required=True,
help='directory holding kitty annotations')
subparser.add_argument(
'--res_file',
required=True,
help='file with each line of a result in json string format')
return subparser
|
from unittest import TestCase
from unittest.mock import MagicMock
import re
import bq_utils
import common
import resources
from retraction import retract_data_bq as rbq
class RetractDataBqTest(TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.hpo_id = 'fake'
self.project_id = 'fake-project-id'
self.ehr_dataset_id = 'ehr20190801_fake'
self.unioned_dataset_id = 'unioned_ehr20190801'
self.combined_dataset_id = 'combined20190801'
self.deid_dataset_id = 'r2021q2r1_deid'
self.sandbox_dataset_id = 'sandbox_dataset'
self.client = MagicMock()
self.client.list_tables = MagicMock()
self.pid_table_id = 'pid_table'
self.retraction_type_1 = 'rdr_and_ehr'
self.retraction_type_2 = 'only_ehr'
self.tables_to_retract_unioned = rbq.TABLES_FOR_RETRACTION | {
common.FACT_RELATIONSHIP, common.PERSON
}
# Type 1 retraction should affect person table (rdr and ehr)
self.tables_to_retract_combined_deid_type_1 = rbq.TABLES_FOR_RETRACTION | {
common.FACT_RELATIONSHIP, common.PERSON
}
# Type 2 retraction should not affect person table (only ehr)
self.tables_to_retract_combined_deid_type_2 = self.tables_to_retract_combined_deid_type_1 - {
common.PERSON
}
self.existing_table_ids = resources.CDM_TABLES
# mock existing tables for all tests except ehr
mock_table_ids = []
for table_id in self.existing_table_ids:
mock_table_id = MagicMock()
mock_table_id.table_id = table_id
mock_table_ids.append(mock_table_id)
self.client.list_tables.return_value = mock_table_ids
def test_queries_to_retract_from_ehr_dataset(self):
hpo_person = bq_utils.get_table_id(self.hpo_id, common.PERSON)
hpo_death = bq_utils.get_table_id(self.hpo_id, common.DEATH)
# hpo tables
existing_table_ids = [hpo_person, hpo_death]
for table in self.tables_to_retract_unioned:
table_id = bq_utils.get_table_id(self.hpo_id, table)
existing_table_ids.append(table_id)
# unioned tables
ignored_tables = []
for cdm_table in resources.CDM_TABLES:
unioned_table_id = rbq.UNIONED_EHR + cdm_table
existing_table_ids.append(unioned_table_id)
if cdm_table not in self.tables_to_retract_unioned:
ignored_tables.append(unioned_table_id)
# mock existing tables
mock_table_ids = []
for table_id in existing_table_ids:
mock_table_id = MagicMock()
mock_table_id.table_id = table_id
mock_table_ids.append(mock_table_id)
self.client.list_tables.return_value = mock_table_ids
person_id_query = rbq.JINJA_ENV.from_string(rbq.PERSON_ID_QUERY).render(
person_research_id=rbq.PERSON_ID,
pid_project=self.project_id,
sandbox_dataset_id=self.sandbox_dataset_id,
pid_table_id=self.pid_table_id)
qs = rbq.queries_to_retract_from_ehr_dataset(self.client,
self.project_id,
self.ehr_dataset_id,
self.hpo_id,
person_id_query)
expected_tables = set(existing_table_ids) - set(ignored_tables)
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
self.assertSetEqual(expected_tables, actual_tables)
def test_queries_to_retract_from_combined_dataset(self):
ignored_tables = []
for cdm_table in resources.CDM_TABLES:
if cdm_table not in self.tables_to_retract_combined_deid_type_1:
ignored_tables.append(cdm_table)
person_id_query = rbq.JINJA_ENV.from_string(rbq.PERSON_ID_QUERY).render(
person_research_id=rbq.PERSON_ID,
pid_project=self.project_id,
sandbox_dataset_id=self.sandbox_dataset_id,
pid_table_id=self.pid_table_id)
# Test type 1 retraction (rdr and ehr)
qs = rbq.queries_to_retract_from_dataset(self.client, self.project_id,
self.combined_dataset_id,
person_id_query,
self.retraction_type_1)
expected_tables = set(self.existing_table_ids) - set(ignored_tables)
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
if table not in [
common.PERSON, common.DEATH, common.FACT_RELATIONSHIP
]:
self.assertIn(str(0), query)
self.assertSetEqual(expected_tables, actual_tables)
# Test type 2 retraction (only ehr)
qs = rbq.queries_to_retract_from_dataset(self.client, self.project_id,
self.deid_dataset_id,
person_id_query,
self.retraction_type_2)
# Exclude person table
expected_tables = set(
self.existing_table_ids) - set(ignored_tables) - {common.PERSON}
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
if table not in [
common.PERSON, common.DEATH, common.FACT_RELATIONSHIP
]:
self.assertIn(str(2 * rbq.ID_CONSTANT_FACTOR), query)
self.assertSetEqual(expected_tables, actual_tables)
def test_queries_to_retract_from_deid_dataset(self):
ignored_tables = []
for cdm_table in resources.CDM_TABLES:
if cdm_table not in self.tables_to_retract_combined_deid_type_1:
ignored_tables.append(cdm_table)
research_id_query = rbq.JINJA_ENV.from_string(
rbq.PERSON_ID_QUERY).render(
person_research_id=rbq.RESEARCH_ID,
pid_project=self.project_id,
sandbox_dataset_id=self.sandbox_dataset_id,
pid_table_id=self.pid_table_id)
# Test type 1 retraction (rdr and ehr)
qs = rbq.queries_to_retract_from_dataset(self.client, self.project_id,
self.deid_dataset_id,
research_id_query,
self.retraction_type_1)
expected_tables = set(self.existing_table_ids) - set(ignored_tables)
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
if table not in [
common.PERSON, common.DEATH, common.FACT_RELATIONSHIP
]:
self.assertIn(str(0), query)
self.assertSetEqual(expected_tables, actual_tables)
# Test type 2 retraction (only ehr)
qs = rbq.queries_to_retract_from_dataset(self.client, self.project_id,
self.deid_dataset_id,
research_id_query,
self.retraction_type_2)
# Exclude person table
expected_tables = set(
self.existing_table_ids) - set(ignored_tables) - {common.PERSON}
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
if table not in [
common.PERSON, common.DEATH, common.FACT_RELATIONSHIP
]:
self.assertIn(str(2 * rbq.ID_CONSTANT_FACTOR), query)
self.assertSetEqual(expected_tables, actual_tables)
def test_queries_to_retract_from_unioned_dataset(self):
ignored_tables = []
for cdm_table in resources.CDM_TABLES:
if cdm_table not in self.tables_to_retract_unioned:
ignored_tables.append(cdm_table)
person_id_query = rbq.JINJA_ENV.from_string(rbq.PERSON_ID_QUERY).render(
person_research_id=rbq.PERSON_ID,
pid_project=self.project_id,
sandbox_dataset_id=self.sandbox_dataset_id,
pid_table_id=self.pid_table_id)
qs = rbq.queries_to_retract_from_dataset(self.client, self.project_id,
self.unioned_dataset_id,
person_id_query,
self.retraction_type_2)
expected_tables = set(self.existing_table_ids) - set(ignored_tables)
actual_tables = set()
for query in qs:
fq_table = re.search('`(.*)`', query)
if fq_table:
table = fq_table.group(1).split('.')[-1]
actual_tables.add(table)
if table not in [
common.PERSON, common.DEATH, common.FACT_RELATIONSHIP
]:
self.assertIn(str(0), query)
self.assertSetEqual(expected_tables, actual_tables)
|
"""
Module for executing all of the GDAL tests. None
of these tests require the use of the database.
"""
from unittest import TestSuite, TextTestRunner
# Importing the GDAL test modules.
from django.contrib.gis.tests import \
test_gdal_driver, test_gdal_ds, test_gdal_envelope, \
test_gdal_geom, test_gdal_srs
test_suites = [test_gdal_driver.suite(),
test_gdal_ds.suite(),
test_gdal_envelope.suite(),
test_gdal_geom.suite(),
test_gdal_srs.suite(),
]
def suite():
"Builds a test suite for the GDAL tests."
s = TestSuite()
map(s.addTest, test_suites)
return s
def run(verbosity=1):
"Runs the GDAL tests."
TextTestRunner(verbosity=verbosity).run(suite())
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.job_policies import JobPolicies # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestJobPolicies(unittest.TestCase):
"""JobPolicies unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJobPolicies(self):
"""Test JobPolicies"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.job_policies.JobPolicies() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
# AVL Mode Package
# Released under MIT License
# Copyright (c) 2020 TytusDb Team
# Developers: SG#16
from DataAccessLayer.handler import Handler
from Models.avl_tree import AVLTree
class TupleModule:
def __init__(self):
self.handler = Handler()
self.dbs = None
def insert(self, database: str, table: str, register: list) -> int:
try:
if not isinstance(database, str) or not isinstance(table, str) or not isinstance(register, list):
raise
filtro = False
self.dbs = self.handler.rootinstance()
for db in self.dbs:
if db.name.upper() == database.upper():
if self.handler.exists(database, table):
filtro = True
break
else:
return 3
if filtro:
avl = self.handler.tableinstance(database, table)
if len(register) != avl.numberColumns:
return 5
if not len(avl.pklist) == 0:
auxPk = ""
for key in avl.pklist:
auxPk += "-" + str(register[key])
auxPk = auxPk[1:]
if avl.search(auxPk):
return 4
else:
avl.add(auxPk, register)
else:
index = avl.hidden
while True:
if avl.search(str(index)):
index += 1
continue
avl.add(str(index), register)
break
avl.hidden = index
self.handler.tableupdate(avl)
return 0
else:
return 2
except:
return 1
def loadCSV(self, file: str, database: str, table: str) -> list:
try:
if not isinstance(database, str) or not isinstance(table, str) or not file.endswith(".csv"):
raise
reader = self.handler.readcsv(file)
self.dbs = self.handler.rootinstance()
for db in self.dbs:
if db.name.upper() == database.upper():
if self.handler.exists(database, table):
result = []
avl = self.handler.tableinstance(database, table)
for fila in reader:
result.append(self.__insert(avl, fila))
self.handler.tableupdate(avl)
return result
else:
return []
return []
except:
return []
def extractRow(self, database: str, table: str, columns: list) -> list:
try:
if not isinstance(database, str) or not isinstance(table, str) or not isinstance(columns, list):
raise
self.dbs = self.handler.rootinstance()
for db in self.dbs:
if db.name.upper() == database.upper():
if self.handler.exists(database, table):
avl = self.handler.tableinstance(database, table)
node = avl.search(self.__concat(columns))
if node:
return node
return []
else:
return []
return []
except:
return []
def update(self, database: str, table: str, register: dict, columns: list) -> int:
try:
if not isinstance(database, str) or not isinstance(table, str) or not isinstance(columns, list):
raise
self.dbs = self.handler.rootinstance()
existeDB = False
for db in self.dbs:
if db.name.upper() == database.upper():
existeDB = True
break
if self.handler.exists(database, table) and existeDB:
avl = self.handler.tableinstance(database, table)
auxStr = ""
for key in columns:
auxStr += "-" + str(key)
auxStr = auxStr[1:]
avltmp = avl.search(auxStr)
if avltmp:
if len(register) <= avl.numberColumns:
simple = True
for key in register:
if key in avl.pklist:
simple = False
content = avltmp
oldcontent = avltmp[:]
for key in register:
content[key] = register[key]
if simple:
avl.update(auxStr, content)
self.handler.tableupdate(avl)
else:
tmp = []
for key in avl.pklist:
tmp.append(oldcontent[key])
self.delete(database, table, tmp)
self.insert(database, table, content)
return 0
else:
return 1
else:
return 4
elif existeDB is False:
return 2
else:
return 3
except:
return 1
def delete(self, database: str, table: str, columns: list) -> int:
try:
if not isinstance(database, str) or not isinstance(table, str) or not isinstance(columns, list):
raise
self.dbs = self.handler.rootinstance()
for db in self.dbs:
if db.name.upper() == database.upper():
if self.handler.exists(database, table):
avl = self.handler.tableinstance(database, table)
pk = self.__concat(columns)
if avl.search(pk):
avl.delete(pk)
self.handler.tableupdate(avl)
return 0
return 4
else:
return 3
return 2
except:
return 1
def truncate(self, database: str, table: str) -> int:
try:
if not isinstance(database, str) or not isinstance(table, str):
raise
self.dbs = self.handler.rootinstance()
for base in self.dbs:
if base.name.upper() == database.upper():
if self.handler.exists(database, table):
avl = self.handler.tableinstance(database, table)
newAvl = AVLTree(database, table, avl.numberColumns, avl.pklist)
self.handler.tableupdate(newAvl)
return 0
else:
return 3
return 2
except:
return 1
@staticmethod
def __concat(keys) -> str:
res = ""
for pk in keys:
res += "-" + str(pk)
res = res[1:]
return res
@staticmethod
def __insert(avl, register: list) -> int:
try:
if not isinstance(register, list):
raise
if len(register) != avl.numberColumns:
return 5
if not len(avl.pklist) == 0:
auxPk = ""
for key in avl.pklist:
auxPk += "-" + str(register[key])
auxPk = auxPk[1:]
if avl.search(auxPk):
return 4
else:
avl.add(auxPk, register)
else:
index = avl.hidden
while True:
if avl.search(str(index)):
index += 1
continue
avl.add(str(index), register)
break
avl.hidden = index
return 0
except:
return 1
|
"""
ML Engine
By: Mansour Ahmadi (mansourweb@gmail.com)
Yaohui Chen (yaohway@gmail.com)
Created Date: 3 Jun 2019
Last Modified Date: 16 June 2019
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.svm import SVR
import sklearn
import os
import tempfile
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.model_selection import GridSearchCV
class MLEngine:
def __init__(self, model_dir=None, classifier='rf', dataset_path=None,
columns=list()):
model_file_name = 'reachability_model.pkl'
self.classifier = classifier
if len(columns) == 0:
self.columns = ['reachable label', 'reachable blocks','path length',
'undiscovered neighbours', 'new cov', 'size', 'cmp',
'indcall', 'extcall', 'reached labels', 'queue size',
'mem ops', 'edge difference']
else:
self.columns = columns
self.best_model_params = {}
self.features = pd.DataFrame(columns=self.columns)
self.labels = []
if self.classifier == 'rf':
if dataset_path is None:
self.clf = RandomForestRegressor(n_estimators=10, max_depth=4)
self.dataset_path = ''
else:
self.dataset_path = dataset_path
print 'Initializing model from: ', self.dataset_path
self.build_model()
elif self.classifier == 'svr':
if dataset_path is None:
self.clf = SVR(kernel='linear', C=1.0)
self.dataset_path = ''
else:
self.dataset_path = dataset_path
print 'Initializing model from: ', self.dataset_path
self.build_model()
else:
print 'Classifier is not supported'
if model_dir is not None:
self.model_file_path = os.path.join(model_dir, self.classifier + '_' + model_file_name)
else:
model_dir = tempfile.mkdtemp()
self.model_file_path = os.path.join(model_dir, model_file_name)
print 'Model is saved here: {}'.format(self.model_file_path)
# if self.classifier == 'sgd':
# self.clf = SGDRegressor(max_iter=1000, alpha=1, penalty='l1')
def load_model(self):
pass
def build_model(self):
if not os.path.exists(self.dataset_path):
print 'dataset does not exist'
exit(1)
self.find_optimal_param()
self.model_construction()
def model_construction(self):
self.clf = RandomForestRegressor(n_estimators=self.best_model_params['n_estimators'],
max_depth=self.best_model_params['max_depth'])
self.update_model(features=[], labels=[])
def predict(self, features):
try:
return self.clf.predict(np.array(features).reshape(1, -1))[0]
except sklearn.exceptions.NotFittedError:
print 'The model is not fitted yet.'
return sum(features[0])
def update_model(self, features, labels):
features_dict_list = list()
if len(self.labels) == 0:
self.labels = np.array(labels).reshape(-1, 1)
else:
if len(labels) > 0:
self.labels = np.concatenate((self.labels, np.array(labels).reshape(-1, 1)))
else:
self.labels = np.array(self.labels).reshape(-1, 1)
for feature in features:
features_dict_list.append(dict(zip(self.columns, feature)))
if self.features.shape[0] == 0:
self.features = pd.DataFrame(features_dict_list)
else:
self.features = pd.concat([self.features, pd.DataFrame(features_dict_list)])
# self.labels = np.array(self.labels).reshape(-1, 1)
if self.classifier == 'rf' or self.classifier == 'svr':
self.clf.fit(X=self.features, y=self.labels.ravel())
def remove_model(self, model_file_path=''):
if model_file_path == '':
os.remove(self.model_file_path)
else:
os.remove(model_file_path)
def save_model(self):
pickle.dump(self.clf, open(self.model_file_path, 'wb'))
def find_optimal_param(self):
dataset = pd.read_csv(self.dataset_path)
self.labels = dataset.label
dataset.drop('window', axis=1, inplace=True)
dataset.drop('label', axis=1, inplace=True)
dataset.drop('id', axis=1, inplace=True)
self.features = dataset
if self.classifier == 'rf':
grid = self.get_rfregressor_params()
elif self.classifier == 'svr':
grid = self.get_svregressor_params()
gd_sr = GridSearchCV(estimator=grid['clf'],
param_grid=grid['grid_param'],
scoring='neg_mean_squared_error',
cv=5,
n_jobs=-1)
gd_sr.fit(self.features, self.labels)
print grid['name'], gd_sr.best_params_, 'MSE: ', gd_sr.best_score_
self.best_model_params = gd_sr.best_params_
def get_rfregressor_params(self):
grid_param = {
'n_estimators': [10, 20, 50, 70],
'max_depth': [3, 4, 5, 6]
}
clf = RandomForestRegressor()
return {'clf': clf, 'grid_param': grid_param, 'name': 'rfreg'}
def get_svregressor_params(self):
grid_param = {
'kernel': ['linear', 'rbf', 'poly'],
'C': [0.01, 0.1, 1, 10, 100]
}
clf = SVR()
return {'clf': clf, 'grid_param': grid_param, 'name': 'svreg'}
def get_corr(self, output='/tmp/corr_mat.pdf'):
dataset = pd.read_csv(self.dataset_path)
plt.figure(figsize=(25, 20))
sb.heatmap(dataset.corr(), annot=True, cmap=sb.diverging_palette(20, 220, n=200))
plt.savefig(output, pad_inches=0)
@staticmethod
def get_feature_importance(dataset_dir='', boxplot_path=''):
list_of_files = list()
for (dir_path, dir_names, file_names) in os.walk(dataset_dir):
list_of_files += [os.path.join(dir_path, file_name) for file_name in file_names
if file_name.endswith('rf_data.csv')]
clf = RandomForestRegressor(n_estimators=100, max_depth=4)
features_importance_all = []
for file_path in list_of_files:
dataset = pd.read_csv(file_path)
y = dataset.label
dataset.drop('label', axis=1, inplace=True)
dataset.drop('id', axis=1, inplace=True)
x = dataset
clf.fit(X=x, y=y)
features_name = dataset.columns.values
features_importance = {}
for index, feature_importance in enumerate(clf.feature_importances_):
features_importance[features_name[index].title()] = feature_importance
features_importance_all.append(features_importance)
feature_importance_data = pd.DataFrame(features_importance_all)
median = feature_importance_data.median()
median.sort_values(ascending=False, inplace=True)
feature_importance_data = feature_importance_data[median.index]
# plt.figure(figsize=(20, 10))
fig = plt.figure()
fig.subplots_adjust(bottom=0.3)
plt.grid()
plt.xticks(rotation=45, horizontalalignment='right')
plt.xlabel('Feature', fontsize=14)
plt.ylabel('Gini Importance', fontsize=14)
feature_importance_data.boxplot(rot=45)
plt.savefig(boxplot_path, bbox_inches='tight', pad_inches=0)
def testRandomForest():
print "TEST RF"
mlengine = MLEngine()
predicted_value = mlengine.predict([[0,2,1,5,4,3,5,5,3,2,2,2,2]])
print "predicted value0 : ", predicted_value
mlengine.update_model([[0,2,1,6,3,4,2,4,6,4,4,4,5]], [[200]])
predicted_value = mlengine.predict([[0,2,1,8,7,5,5,3,6,7,5,5,5]])
print "predicted value1 : ", predicted_value
mlengine.update_model([[1,3,2,8,6,5,4,6,5,2,2,2,5]], [[143]])
predicted_value = mlengine.predict([[2,3,2,7,4,5,6,9,0,6,5,5,5]])
print "predicted value2: ", predicted_value
def testRandomForestInit():
print "TEST RF INIT"
mlengine = MLEngine(dataset_path='/home/eric/work/savior/newtcpdump_data.csv')
predicted_value = mlengine.predict([[0,2,1,3,2,4,2,4,5,1,5,5,5]])
print "predicted value0 : ", predicted_value
mlengine.update_model([[2,2,1,6,8,7,8,4,5,3,5,5,5,5]], [[200]])
predicted_value = mlengine.predict([[0,2,1,7,8,4,4,5,2,3,1,6,6,6]])
print "predicted value1 : ", predicted_value
mlengine.update_model([[6,6,1,3,2,6,4,8,5,3,1,5,5]], [[143]])
predicted_value = mlengine.predict([[5,6,2,6,3,2,5,6,3,4,1,6,6,6]])
print "predicted value2: ", predicted_value
def testSVM():
print "TEST SVM"
mlengine = MLEngine(classifier='svr')
predicted_value = mlengine.predict([[0, 2, 1]])
print "predicted value0 : ", predicted_value
mlengine.update_model([[0, 2, 1]], [[200]])
predicted_value = mlengine.predict([[0, 2, 1]])
print "predicted value1 : ", predicted_value
mlengine.update_model([[1, 3, 2]], [[143]])
predicted_value = mlengine.predict([[2, 3, 2]])
print "predicted value2: ", predicted_value
def testSVMInit():
print "TEST SVM INIT"
mlengine = MLEngine(classifier='svr',
dataset_path='/home/eric/work/savior/newtcpdump_data.csv')
predicted_value = mlengine.predict([[0, 2, 1]])
print "predicted value0 : ", predicted_value
mlengine.update_model([[0, 2, 1]], [[200]])
predicted_value = mlengine.predict([[0, 2, 1]])
print "predicted value1 : ", predicted_value
mlengine.update_model([[1, 3, 2]], [[143]])
predicted_value = mlengine.predict([[2, 3, 2]])
print "predicted value2: ", predicted_value
def test_corr():
mlengine = MLEngine(dataset_path='/tmp/tcpdump_data.csv')
mlengine.get_corr("/tmp/corr_tcpdump.pdf")
def test_feature_importance():
MLEngine.get_feature_importance('/Users/mansourahmadi/Bank/Work/NEU/MEUZZ/meuzz-learning-data',
'/tmp/feature_importance.pdf')
if __name__ == "__main__":
testRandomForest()
# testRandomForestInit()
# testSVM()
# testSVMInit()
# test_corr()
# test_feature_importance()
|
from collections import OrderedDict
from functools import partial
import sympy
import numpy as np
from psutil import virtual_memory
from devito.cgen_utils import INT, cast_mapper
from devito.data import Data, default_allocator, first_touch
from devito.dimension import Dimension, DefaultDimension
from devito.equation import Eq, Inc
from devito.exceptions import InvalidArgument
from devito.finite_difference import (centered, cross_derivative,
first_derivative, left, right,
second_derivative, generic_derivative,
second_cross_derivative)
from devito.logger import debug, error, warning
from devito.parameters import configuration
from devito.symbolics import indexify, retrieve_indexed
from devito.types import AbstractCachedFunction, AbstractCachedSymbol
from devito.tools import ReducerMap, prod
__all__ = ['Constant', 'Function', 'TimeFunction', 'SparseFunction',
'SparseTimeFunction']
class Constant(AbstractCachedSymbol):
"""
Symbol representing constant values in symbolic equations.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses ``*args`` to (re-)create the dimension arguments of the
symbolic function.
"""
is_Input = True
is_Constant = True
is_Scalar = True
def __init__(self, *args, **kwargs):
if not self._cached():
self.dtype = kwargs.get('dtype', np.float32)
self._value = kwargs.get('value')
@property
def data(self):
"""The value of the data object, as a scalar (int, float, ...)."""
return self._value
@data.setter
def data(self, val):
self._value = val
@property
def base(self):
return self
@property
def _arg_names(self):
"""Return a tuple of argument names introduced by this symbol."""
return (self.name,)
def _arg_defaults(self, alias=None):
"""
Returns a map of default argument values defined by this symbol.
"""
key = alias or self
return {key.name: self.data}
def _arg_values(self, **kwargs):
"""
Returns a map of argument values after evaluating user input. If no
user input is provided, return a default value.
:param kwargs: Dictionary of user-provided argument overrides.
"""
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, Constant):
return new._arg_defaults(alias=self)
else:
return {self.name: new}
else:
return self._arg_defaults()
def _arg_check(self, args, dspace):
"""
Check that ``args`` contains legal runtime values bound to ``self``.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for %s" % self.name)
key = args[self.name]
try:
# Might be a plain number, w/o a dtype field
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Constant data type %s" % (key.dtype, self.name, self.dtype))
except AttributeError:
pass
class TensorFunction(AbstractCachedFunction):
"""
Utility class to encapsulate all symbolic types that represent
tensor (array) data.
.. note::
Users should not instantiate this class. Use :class:`Function` or
:class:`SparseFunction` (or their subclasses) instead.
"""
# Required by SymPy, otherwise the presence of __getitem__ will make SymPy
# think that a TensorFunction is actually iterable, thus breaking many of
# its key routines (e.g., solve)
_iterable = False
is_Input = True
is_TensorFunction = True
is_Tensor = True
def __init__(self, *args, **kwargs):
if not self._cached():
# Staggered mask
self._staggered = kwargs.get('staggered', tuple(0 for _ in self.indices))
if len(self.staggered) != len(self.indices):
raise ValueError("'staggered' needs %s entries for indices %s"
% (len(self.indices), self.indices))
# Data-related properties
self.initializer = kwargs.get('initializer')
if self.initializer is not None:
assert(callable(self.initializer))
self._first_touch = kwargs.get('first_touch', configuration['first_touch'])
self._data = None
self._allocator = kwargs.get('allocator', default_allocator())
def __getitem__(self, index):
"""Shortcut for ``self.indexed[index]``."""
return self.indexed[index]
def _allocate_memory(func):
"""Allocate memory as a :class:`Data`."""
def wrapper(self):
if self._data is None:
debug("Allocating memory for %s%s" % (self.name, self.shape_allocated))
self._data = Data(self.shape_allocated, self.indices, self.dtype,
allocator=self._allocator)
if self._first_touch:
first_touch(self)
if self.initializer is not None:
if self._first_touch:
warning("`first touch` together with `initializer` causing "
"redundant data initialization")
try:
self.initializer(self._data)
except ValueError:
# Perhaps user only wants to initialise the physical domain
self.initializer(self._data[self._mask_domain])
else:
self._data.fill(0)
return func(self)
return wrapper
@property
def _data_buffer(self):
"""Reference to the data. Unlike ``data, data_with_halo, data_allocated``,
this *never* returns a view of the data. This method is for internal use only."""
return self.data_allocated
@property
def _mem_external(self):
return True
@property
def shape(self):
"""
Shape of the domain associated with this :class:`TensorFunction`.
The domain constitutes the area of the data written to in a
stencil update.
"""
return self.shape_domain
@property
def shape_domain(self):
"""
Shape of the domain associated with this :class:`TensorFunction`.
The domain constitutes the area of the data written to in a
stencil update.
.. note::
Alias to ``self.shape``.
"""
return tuple(i - j for i, j in zip(self._shape, self.staggered))
@property
def shape_with_halo(self):
"""
Shape of the domain plus the read-only stencil boundary associated
with this :class:`Function`.
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape_domain, self._halo))
@property
def shape_allocated(self):
"""
Shape of the allocated data associated with this :class:`Function`.
It includes the domain and halo regions, as well as any additional
padding outside of the halo.
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape_with_halo, self._padding))
@property
def data(self):
"""
The function data values, as a :class:`numpy.ndarray`.
Elements are stored in row-major format.
"""
return self.data_domain
@property
@_allocate_memory
def data_domain(self):
"""
The domain data values.
Elements are stored in row-major format.
.. note::
Alias to ``self.data``.
"""
return self._data[self._mask_domain]
@property
@_allocate_memory
def data_with_halo(self):
"""
The domain+halo data values.
Elements are stored in row-major format.
"""
return self._data[self._mask_with_halo]
@property
@_allocate_memory
def data_allocated(self):
"""
The allocated data values, that is domain+halo+padding.
Elements are stored in row-major format.
"""
return self._data
@property
def space_dimensions(self):
"""Tuple of :class:`Dimension`s that define physical space."""
return tuple(d for d in self.indices if d.is_Space)
@property
def staggered(self):
return self._staggered
@property
def symbolic_shape(self):
"""
Return the symbolic shape of the object. This includes: ::
* the padding, halo, and domain regions. While halo and padding are
known quantities (integers), the domain size is represented by a symbol.
* the shifting induced by the ``staggered`` mask
"""
symbolic_shape = super(TensorFunction, self).symbolic_shape
return tuple(sympy.Add(i, -j, evaluate=False)
for i, j in zip(symbolic_shape, self.staggered))
@property
def _arg_names(self):
"""Return a tuple of argument names introduced by this function."""
return (self.name,)
def _arg_defaults(self, alias=None):
"""
Returns a map of default argument values defined by this symbol.
:param alias: (Optional) name under which to store values.
"""
key = alias or self
args = ReducerMap({key.name: self._data_buffer})
# Collect default dimension arguments from all indices
for i, s, o, k in zip(self.indices, self.shape, self.staggered, key.indices):
args.update(i._arg_defaults(start=0, size=s+o, alias=k))
return args
def _arg_values(self, **kwargs):
"""
Returns a map of argument values after evaluating user input. If no
user input is provided, return a default value.
:param kwargs: Dictionary of user-provided argument overrides.
"""
# Add value override for own data if it is provided, otherwise
# use defaults
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, TensorFunction):
# Set new values and re-derive defaults
values = new._arg_defaults(alias=self).reduce_all()
else:
# We've been provided a pure-data replacement (array)
values = {self.name: new}
# Add value overrides for all associated dimensions
for i, s, o in zip(self.indices, new.shape, self.staggered):
values.update(i._arg_defaults(size=s+o-sum(self._offset_domain[i])))
else:
values = self._arg_defaults(alias=self).reduce_all()
return values
def _arg_check(self, args, intervals):
"""
Check that ``args`` contains legal runtime values bound to ``self``.
:raises InvalidArgument: If, given the runtime arguments ``args``, an
out-of-bounds access will be performed.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for `%s`" % self.name)
key = args[self.name]
if len(key.shape) != self.ndim:
raise InvalidArgument("Shape %s of runtime value `%s` does not match "
"dimensions %s" % (key.shape, self.name, self.indices))
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Function data type %s" % (key.dtype, self.name, self.dtype))
for i, s in zip(self.indices, key.shape):
i._arg_check(args, s, intervals[i])
class Function(TensorFunction):
"""A :class:`TensorFunction` providing operations to express
finite-difference approximation. A ``Function`` encapsulates
space-varying data; for time-varying data, use :class:`TimeFunction`.
:param name: Name of the symbol
:param grid: :class:`Grid` object from which to infer the data shape
and :class:`Dimension` indices.
:param space_order: Discretisation order for space derivatives. By default,
``space_order`` points are available on both sides of
a generic point of interest, including those on the grid
border. Sometimes, fewer points may be necessary; in
other cases, depending on the PDE being approximated,
more points may be necessary. In such cases, one
can pass a 3-tuple ``(o, lp, rp)`` instead of a single
integer representing the discretization order. Here,
``o`` is the discretization order, while ``lp`` and ``rp``
indicate how many points are expected on left (``lp``)
and right (``rp``) of a point of interest.
:param shape: (Optional) shape of the domain region in grid points.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param dtype: (Optional) data type of the buffered data.
:param staggered: (Optional) tuple containing staggering offsets.
:param padding: (Optional) allocate extra grid points at a space dimension
boundary. These may be used for data alignment. Defaults to 0.
In alternative to an integer, a tuple, indicating the padding
in each dimension, may be passed; in this case, an error is
raised if such tuple has fewer entries then the number of space
dimensions.
:param initializer: (Optional) A callable to initialize the data
:param allocator: (Optional) An object of type :class:`MemoryAllocator` to
specify where to allocate the function data when running
on a NUMA architecture. Refer to ``default_allocator()``'s
__doc__ for more information about possible allocators.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses ``*args`` to (re-)create the dimension arguments of the
symbolic function.
.. note::
If the parameter ``grid`` is provided, the values for ``shape``,
``dimensions`` and ``dtype`` will be derived from it.
.. note::
:class:`Function` objects are assumed to be constant in time
and therefore do not support time derivatives. Use
:class:`TimeFunction` for time-varying grid data.
.. note::
The tuple :param staggered: contains a ``1`` in each dimension
entry that should be staggered, and ``0`` otherwise. For example,
``staggered=(1, 0, 0)`` entails discretization on horizontal edges,
``staggered=(0, 0, 1)`` entails discretization on vertical edges,
``staggered=(0, 1, 1)`` entails discretization side facets and
``staggered=(1, 1, 1)`` entails discretization on cells.
"""
is_Function = True
def __init__(self, *args, **kwargs):
if not self._cached():
super(Function, self).__init__(*args, **kwargs)
# Grid
self.grid = kwargs.get('grid')
# Data type (provided or inferred)
if self.grid is None:
self.dtype = kwargs.get('dtype', np.float32)
else:
self.dtype = kwargs.get('dtype', self.grid.dtype)
# Halo region
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
self.space_order = space_order
halo = (space_order, space_order)
elif isinstance(space_order, tuple) and len(space_order) == 3:
self.space_order, left_points, right_points = space_order
halo = (left_points, right_points)
else:
raise ValueError("'space_order' must be int or 3-tuple of ints")
self._halo = tuple(halo if i in self._halo_indices else (0, 0)
for i in self.indices)
# Padding region
padding = kwargs.get('padding', 0)
if isinstance(padding, int):
padding = tuple((padding,)*2 for i in range(self.ndim))
elif isinstance(padding, tuple) and len(padding) == self.ndim:
padding = tuple((i,)*2 if isinstance(i, int) else i for i in padding)
else:
raise ValueError("'padding' must be int or %d-tuple of ints" % self.ndim)
self._padding = padding
# Dynamically add derivative short-cuts
self._initialize_derivatives()
def _initialize_derivatives(self):
"""
Dynamically create notational shortcuts for space derivatives.
"""
for dim in self.space_dimensions:
# First derivative, centred
dx = partial(first_derivative, order=self.space_order,
dim=dim, side=centered)
setattr(self.__class__, 'd%s' % dim.name,
property(dx, 'Return the symbolic expression for '
'the centered first derivative wrt. '
'the %s dimension' % dim.name))
# First derivative, left
dxl = partial(first_derivative, order=self.space_order,
dim=dim, side=left)
setattr(self.__class__, 'd%sl' % dim.name,
property(dxl, 'Return the symbolic expression for '
'the left-sided first derivative wrt. '
'the %s dimension' % dim.name))
# First derivative, right
dxr = partial(first_derivative, order=self.space_order,
dim=dim, side=right)
setattr(self.__class__, 'd%sr' % dim.name,
property(dxr, 'Return the symbolic expression for '
'the right-sided first derivative wrt. '
'the %s dimension' % dim.name))
# Second derivative
dx2 = partial(generic_derivative, deriv_order=2, dim=dim,
fd_order=int(self.space_order / 2))
setattr(self.__class__, 'd%s2' % dim.name,
property(dx2, 'Return the symbolic expression for '
'the second derivative wrt. the '
'%s dimension' % dim.name))
# Fourth derivative
dx4 = partial(generic_derivative, deriv_order=4, dim=dim,
fd_order=max(int(self.space_order / 2), 2))
setattr(self.__class__, 'd%s4' % dim.name,
property(dx4, 'Return the symbolic expression for '
'the fourth derivative wrt. the '
'%s dimension' % dim.name))
for dim2 in self.space_dimensions:
# First cross derivative
dxy = partial(cross_derivative, order=self.space_order,
dims=(dim, dim2))
setattr(self.__class__, 'd%s%s' % (dim.name, dim2.name),
property(dxy, 'Return the symbolic expression for '
'the first cross derivative wrt. the '
'%s and %s dimensions' %
(dim.name, dim2.name)))
# Second cross derivative
dx2y2 = partial(second_cross_derivative, dims=(dim, dim2),
order=self.space_order)
setattr(self.__class__, 'd%s2%s2' % (dim.name, dim2.name),
property(dx2y2, 'Return the symbolic expression for '
'the second cross derivative wrt. the '
'%s and %s dimensions' %
(dim.name, dim2.name)))
@classmethod
def __indices_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
if grid is None:
if dimensions is None:
error("Creating a Function object requries either "
"a 'grid' or the 'dimensions' argument.")
raise ValueError("Unknown symbol dimensions or shape")
else:
if dimensions is not None:
warning("Creating Function with 'grid' and 'dimensions' "
"argument; ignoring the 'dimensions' and using 'grid'.")
dimensions = grid.dimensions
return dimensions
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
if grid is None:
shape = kwargs.get('shape')
if shape is None:
raise ValueError("Function needs either 'shape' or 'grid' argument")
else:
shape = grid.shape_domain
return shape
@property
def _halo_indices(self):
"""Return the function indices for which a halo region is defined."""
return self.indices
@property
def laplace(self):
"""
Generates a symbolic expression for the Laplacian, the second
derivative wrt. all spatial dimensions.
"""
derivs = tuple('d%s2' % d.name for d in self.space_dimensions)
return sum([getattr(self, d) for d in derivs[:self.ndim]])
def laplace2(self, weight=1):
"""
Generates a symbolic expression for the double Laplacian
wrt. all spatial dimensions.
"""
order = self.space_order/2
first = sum([second_derivative(self, dim=d, order=order)
for d in self.space_dimensions])
return sum([second_derivative(first * weight, dim=d, order=order)
for d in self.space_dimensions])
class TimeFunction(Function):
"""
A special :class:`Function` encapsulating time-varying data.
:param name: Name of the resulting :class:`sympy.Function` symbol
:param grid: :class:`Grid` object from which to infer the data shape
and :class:`Dimension` indices.
:param space_order: Discretisation order for space derivatives. By default,
``space_order`` points are available on both sides of
a generic point of interest, including those on the grid
border. Sometimes, fewer points may be necessary; in
other cases, depending on the PDE being approximated,
more points may be necessary. In such cases, one
can pass a 3-tuple ``(o, lp, rp)`` instead of a single
integer representing the discretization order. Here,
``o`` is the discretization order, while ``lp`` and ``rp``
indicate how many points are expected on left (``lp``)
and right (``rp``) of a point of interest.
:param time_order: Discretization order for time derivatives.
:param shape: (Optional) shape of the domain region in grid points.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param dtype: (Optional) data type of the buffered data
:param save: (Optional) Save the intermediate results to the data buffer.
Defaults to `None`, indicating the use of alternating buffers.
If intermediate results are required, the value of save must be
set to the required size of the time dimension.
:param time_dim: (Optional) The :class:`Dimension` object to use to represent
time in this symbol. Defaults to the time dimension provided
by the :class:`Grid`.
:param staggered: (Optional) tuple containing staggering offsets.
:param padding: (Optional) allocate extra grid points at a space dimension
boundary. These may be used for data alignment. Defaults to 0.
In alternative to an integer, a tuple, indicating the padding
in each dimension, may be passed; in this case, an error is
raised if such tuple has fewer entries then the number of
space dimensions.
:param initializer: (Optional) A callable to initialize the data
:param allocator: (Optional) An object of type :class:`MemoryAllocator` to
specify where to allocate the function data when running
on a NUMA architecture. Refer to ``default_allocator()``'s
__doc__ for more information about possible allocators.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses ``*args`` to (re-)create the dimension arguments of the
symbolic function.
.. note::
If the parameter ``grid`` is provided, the values for ``shape``,
``dimensions`` and ``dtype`` will be derived from it.
The parameter ``shape`` should only define the spatial shape of
the grid. The temporal dimension will be inserted automatically
as the leading dimension, according to the ``time_dim``,
``time_order`` and whether we want to write intermediate
timesteps in the buffer. The same is true for explicitly
provided dimensions, which will be added to the automatically
derived time dimensions symbol. For example:
.. code-block:: python
In []: TimeFunction(name="a", dimensions=(x, y, z))
Out[]: a(t, x, y, z)
In []: TimeFunction(name="a", shape=(20, 30))
Out[]: a(t, x, y)
"""
is_TimeFunction = True
_time_position = 0
"""Position of time index among the function indices."""
def __init__(self, *args, **kwargs):
if not self._cached():
super(TimeFunction, self).__init__(*args, **kwargs)
# Check we won't allocate too much memory for the system
available_mem = virtual_memory().available
if np.dtype(self.dtype).itemsize * self.size > available_mem:
warning("Trying to allocate more memory for symbol %s " % self.name +
"than available on physical device, this will start swapping")
self.time_dim = kwargs.get('time_dim', self.indices[self._time_position])
self.time_order = kwargs.get('time_order', 1)
self.save = type(kwargs.get('save', None) or None)
if not isinstance(self.time_order, int):
raise ValueError("'time_order' must be int")
@classmethod
def __indices_setup__(cls, **kwargs):
save = kwargs.get('save')
grid = kwargs.get('grid')
time_dim = kwargs.get('time_dim')
if grid is None:
error('TimeFunction objects require a grid parameter.')
raise ValueError('No grid provided for TimeFunction.')
if time_dim is None:
time_dim = grid.time_dim if save else grid.stepping_dim
elif not (isinstance(time_dim, Dimension) and time_dim.is_Time):
raise ValueError("'time_dim' must be a time dimension")
indices = list(Function.__indices_setup__(**kwargs))
indices.insert(cls._time_position, time_dim)
return tuple(indices)
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
save = kwargs.get('save') or None # Force to None if 0/False/None/...
shape = kwargs.get('shape')
time_order = kwargs.get('time_order', 1)
if grid is None:
if shape is None:
raise ValueError("TimeFunction needs either 'shape' or 'grid' argument")
if save is not None:
raise ValueError("Ambiguity detected: provide either 'grid' and 'save' "
"or 'shape', where 'shape[0] == save'")
else:
if save is not None:
if not isinstance(save, int):
raise ValueError("save must be an int indicating the number of " +
"timesteps to be saved (is %s)" % type(save))
shape = (save,) + grid.shape_domain
else:
shape = (time_order + 1,) + grid.shape_domain
return shape
@property
def _halo_indices(self):
return tuple(i for i in self.indices if not i.is_Time)
@property
def forward(self):
"""Symbol for the time-forward state of the function"""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.indices[0]
return self.subs(_t, _t + i * _t.spacing)
@property
def backward(self):
"""Symbol for the time-backward state of the function"""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.indices[0]
return self.subs(_t, _t - i * _t.spacing)
@property
def dt(self):
"""Symbol for the first derivative wrt the time dimension"""
_t = self.indices[0]
if self.time_order == 1:
# This hack is needed for the first-order diffusion test
indices = [_t, _t + _t.spacing]
else:
width = int(self.time_order / 2)
indices = [(_t + i * _t.spacing) for i in range(-width, width + 1)]
return self.diff(_t).as_finite_difference(indices)
@property
def dt2(self):
"""Symbol for the second derivative wrt the t dimension"""
_t = self.indices[0]
width_t = int(self.time_order / 2)
indt = [(_t + i * _t.spacing) for i in range(-width_t, width_t + 1)]
return self.diff(_t, _t).as_finite_difference(indt)
class AbstractSparseFunction(TensorFunction):
"""
An abstract class to define behaviours common to any kind of sparse
functions, whether using precomputed coefficients or computing them
on the fly. This is an internal class only and should never be
instantiated.
"""
# Symbols that are encapsulated within this symbol (e.g. coordinates)
_child_functions = []
def __init__(self, *args, **kwargs):
if not self._cached():
super(AbstractSparseFunction, self).__init__(*args, **kwargs)
npoint = kwargs.get('npoint')
if not isinstance(npoint, int) and npoint > 0:
raise ValueError('SparseFunction requires parameter `npoint` (> 0)')
self.npoint = npoint
# Grid must be provided
grid = kwargs.get('grid')
if kwargs.get('grid') is None:
raise ValueError('SparseFunction objects require a grid parameter')
self.grid = grid
self.dtype = kwargs.get('dtype', self.grid.dtype)
self.space_order = kwargs.get('space_order', 0)
# Halo region
self._halo = tuple((0, 0) for i in range(self.ndim))
# Padding region
self._padding = tuple((0, 0) for i in range(self.ndim))
@classmethod
def __indices_setup__(cls, **kwargs):
"""
Return the default dimension indices for a given data shape.
"""
dimensions = kwargs.get('dimensions')
if dimensions is not None:
return dimensions
else:
return (Dimension(name='p_%s' % kwargs["name"]),)
@classmethod
def __shape_setup__(cls, **kwargs):
return kwargs.get('shape', (kwargs.get('npoint'),))
def _arg_defaults(self, alias=None):
"""
Returns a map of default argument values defined by this symbol.
:param alias: (Optional) name under which to store values.
"""
key = alias or self
args = super(AbstractSparseFunction, self)._arg_defaults(alias=alias)
for child_name in self._child_functions:
child = getattr(self, child_name)
args.update(child._arg_defaults(alias=getattr(key, child_name)))
return args
@property
def _arg_names(self):
"""Return a tuple of argument names introduced by this function."""
return tuple([self.name] + [x for x in self._child_functions])
class SparseFunction(AbstractSparseFunction):
"""
A special :class:`TensorFunction` representing a set of sparse point
objects that are not aligned with the computational grid.
A :class:`SparseFunction` provides symbolic interpolation routines
to convert between grid-aligned :class:`Function` objects and sparse
data points.
:param name: Name of the function.
:param npoint: Number of points to sample.
:param grid: :class:`Grid` object defining the computational domain.
:param shape: (Optional) shape of the function. Defaults to ``(npoints,)``.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param coordinates: (Optional) coordinate data for the sparse points.
:param space_order: Discretisation order for space derivatives.
:param dtype: Data type of the buffered data.
:param initializer: (Optional) A callable to initialize the data
:param allocator: (Optional) An object of type :class:`MemoryAllocator` to
specify where to allocate the function data when running
on a NUMA architecture. Refer to ``default_allocator()``'s
__doc__ for more information about possible allocators.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses `*args` to (re-)create the dimension arguments of the
symbolic function.
"""
is_SparseFunction = True
_child_functions = ['coordinates']
def __init__(self, *args, **kwargs):
if not self._cached():
super(SparseFunction, self).__init__(*args, **kwargs)
# Set up coordinates of sparse points
coordinates = Function(name='%s_coords' % self.name, dtype=self.dtype,
dimensions=(self.indices[-1], Dimension(name='d')),
shape=(self.npoint, self.grid.dim), space_order=0)
coordinate_data = kwargs.get('coordinates')
if coordinate_data is not None:
coordinates.data[:] = coordinate_data[:]
self.coordinates = coordinates
@property
def coefficients(self):
"""Symbolic expression for the coefficients for sparse point
interpolation according to:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
:returns: List of coefficients, eg. [b_11, b_12, b_21, b_22]
"""
# Grid indices corresponding to the corners of the cell
x1, y1, z1, x2, y2, z2 = sympy.symbols('x1, y1, z1, x2, y2, z2')
# Coordinate values of the sparse point
px, py, pz = self.point_symbols
if self.grid.dim == 2:
A = sympy.Matrix([[1, x1, y1, x1*y1],
[1, x1, y2, x1*y2],
[1, x2, y1, x2*y1],
[1, x2, y2, x2*y2]])
p = sympy.Matrix([[1],
[px],
[py],
[px*py]])
# Map to reference cell
x, y = self.grid.dimensions
reference_cell = {x1: 0, y1: 0, x2: x.spacing, y2: y.spacing}
elif self.grid.dim == 3:
A = sympy.Matrix([[1, x1, y1, z1, x1*y1, x1*z1, y1*z1, x1*y1*z1],
[1, x1, y2, z1, x1*y2, x1*z1, y2*z1, x1*y2*z1],
[1, x2, y1, z1, x2*y1, x2*z1, y2*z1, x2*y1*z1],
[1, x1, y1, z2, x1*y1, x1*z2, y1*z2, x1*y1*z2],
[1, x2, y2, z1, x2*y2, x2*z1, y2*z1, x2*y2*z1],
[1, x1, y2, z2, x1*y2, x1*z2, y2*z2, x1*y2*z2],
[1, x2, y1, z2, x2*y1, x2*z2, y1*z2, x2*y1*z2],
[1, x2, y2, z2, x2*y2, x2*z2, y2*z2, x2*y2*z2]])
p = sympy.Matrix([[1],
[px],
[py],
[pz],
[px*py],
[px*pz],
[py*pz],
[px*py*pz]])
# Map to reference cell
x, y, z = self.grid.dimensions
reference_cell = {x1: 0, y1: 0, z1: 0, x2: x.spacing,
y2: y.spacing, z2: z.spacing}
else:
raise NotImplementedError('Interpolation coefficients not implemented '
'for %d dimensions.' % self.grid.dim)
A = A.subs(reference_cell)
return A.inv().T.dot(p)
@property
def point_symbols(self):
"""Symbol for coordinate value in each dimension of the point"""
return sympy.symbols('px, py, pz')
@property
def point_increments(self):
"""Index increments in each dimension for each point symbol"""
if self.grid.dim == 2:
return ((0, 0), (0, 1), (1, 0), (1, 1))
elif self.grid.dim == 3:
return ((0, 0, 0), (0, 1, 0), (1, 0, 0), (0, 0, 1),
(1, 1, 0), (0, 1, 1), (1, 0, 1), (1, 1, 1))
else:
raise NotImplementedError('Point increments not defined '
'for %d dimensions.' % self.grid.dim)
@property
def coordinate_symbols(self):
"""Symbol representing the coordinate values in each dimension"""
p_dim = self.indices[-1]
return tuple([self.coordinates.indexify((p_dim, i))
for i in range(self.grid.dim)])
@property
def coordinate_indices(self):
"""Symbol for each grid index according to the coordinates"""
indices = self.grid.dimensions
return tuple([INT(sympy.Function('floor')((c - o) / i.spacing))
for c, o, i in zip(self.coordinate_symbols, self.grid.origin,
indices[:self.grid.dim])])
@property
def coordinate_bases(self):
"""Symbol for the base coordinates of the reference grid point"""
indices = self.grid.dimensions
return tuple([cast_mapper[self.dtype](c - o - idx * i.spacing)
for c, o, idx, i in zip(self.coordinate_symbols,
self.grid.origin,
self.coordinate_indices,
indices[:self.grid.dim])])
def interpolate(self, expr, offset=0, u_t=None, p_t=None, cummulative=False):
"""Creates a :class:`sympy.Eq` equation for the interpolation
of an expression onto this sparse point collection.
:param expr: The expression to interpolate.
:param offset: Additional offset from the boundary for
absorbing boundary conditions.
:param u_t: (Optional) time index to use for indexing into
field data in `expr`.
:param p_t: (Optional) time index to use for indexing into
the sparse point data.
:param cummulative: (Optional) If True, perform an increment rather
than an assignment. Defaults to False.
"""
expr = indexify(expr)
# Apply optional time symbol substitutions to expr
if u_t is not None:
time = self.grid.time_dim
t = self.grid.stepping_dim
expr = expr.subs(t, u_t).subs(time, u_t)
variables = list(retrieve_indexed(expr))
# List of indirection indices for all adjacent grid points
index_matrix = [tuple(idx + ii + offset for ii, idx
in zip(inc, self.coordinate_indices))
for inc in self.point_increments]
# Generate index substituions for all grid variables
idx_subs = []
for i, idx in enumerate(index_matrix):
v_subs = [(v, v.base[v.indices[:-self.grid.dim] + idx])
for v in variables]
idx_subs += [OrderedDict(v_subs)]
# Substitute coordinate base symbols into the coefficients
subs = OrderedDict(zip(self.point_symbols, self.coordinate_bases))
rhs = sum([expr.subs(vsub) * b.subs(subs)
for b, vsub in zip(self.coefficients, idx_subs)])
# Apply optional time symbol substitutions to lhs of assignment
lhs = self if p_t is None else self.subs(self.indices[0], p_t)
rhs = rhs + lhs if cummulative is True else rhs
return [Eq(lhs, rhs)]
def inject(self, field, expr, offset=0, u_t=None, p_t=None):
"""Symbol for injection of an expression onto a grid
:param field: The grid field into which we inject.
:param expr: The expression to inject.
:param offset: Additional offset from the boundary for
absorbing boundary conditions.
:param u_t: (Optional) time index to use for indexing into `field`.
:param p_t: (Optional) time index to use for indexing into `expr`.
"""
expr = indexify(expr)
field = indexify(field)
variables = list(retrieve_indexed(expr)) + [field]
# Apply optional time symbol substitutions to field and expr
if u_t is not None:
field = field.subs(field.indices[0], u_t)
if p_t is not None:
expr = expr.subs(self.indices[0], p_t)
# List of indirection indices for all adjacent grid points
index_matrix = [tuple(idx + ii + offset for ii, idx
in zip(inc, self.coordinate_indices))
for inc in self.point_increments]
# Generate index substitutions for all grid variables except
# the `SparseFunction` types
idx_subs = []
for i, idx in enumerate(index_matrix):
v_subs = [(v, v.base[v.indices[:-self.grid.dim] + idx])
for v in variables if not v.base.function.is_SparseFunction]
idx_subs += [OrderedDict(v_subs)]
# Substitute coordinate base symbols into the coefficients
subs = OrderedDict(zip(self.point_symbols, self.coordinate_bases))
return [Inc(field.subs(vsub),
field.subs(vsub) + expr.subs(subs).subs(vsub) * b.subs(subs))
for b, vsub in zip(self.coefficients, idx_subs)]
class SparseTimeFunction(SparseFunction):
"""
A time-dependent :class:`SparseFunction`.
:param name: Name of the function.
:param nt: Size of the time dimension for point data.
:param npoint: Number of points to sample.
:param grid: :class:`Grid` object defining the computational domain.
:param shape: (Optional) shape of the function. Defaults to ``(nt, npoints,)``.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param coordinates: (Optional) coordinate data for the sparse points.
:param space_order: (Optional) Discretisation order for space derivatives.
Default to 0.
:param time_order: (Optional) Discretisation order for time derivatives.
Default to 1.
:param dtype: (Optional) Data type of the buffered data.
:param initializer: (Optional) A callable to initialize the data
:param allocator: (Optional) An object of type :class:`MemoryAllocator` to
specify where to allocate the function data when running
on a NUMA architecture. Refer to ``default_allocator()``'s
__doc__ for more information about possible allocators.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses `*args` to (re-)create the dimension arguments of the
symbolic function.
"""
is_SparseTimeFunction = True
_time_position = 0
"""Position of time index among the function indices."""
def __init__(self, *args, **kwargs):
if not self._cached():
super(SparseTimeFunction, self).__init__(*args, **kwargs)
nt = kwargs.get('nt')
if not isinstance(nt, int) and nt > 0:
raise ValueError('SparseTimeFunction requires int parameter `nt`')
self.nt = nt
self.time_dim = self.indices[self._time_position]
self.time_order = kwargs.get('time_order', 1)
if not isinstance(self.time_order, int):
raise ValueError("'time_order' must be int")
@classmethod
def __indices_setup__(cls, **kwargs):
"""
Return the default dimension indices for a given data shape.
"""
dimensions = kwargs.get('dimensions')
if dimensions is not None:
return dimensions
else:
return (kwargs['grid'].time_dim, Dimension(name='p_%s' % kwargs["name"]))
@classmethod
def __shape_setup__(cls, **kwargs):
return kwargs.get('shape', (kwargs.get('nt'), kwargs.get('npoint'),))
class PrecomputedSparseFunction(AbstractSparseFunction):
"""
A specialised type of SparseFunction where the interpolation is externally defined.
Currently, this means that the grid points and associated coefficients for each
sparse point is precomputed at the time this object is being created.
:param name: Name of the function.
:param nt: Size of the time dimension for point data.
:param npoint: Number of points to sample.
:param grid: :class:`Grid` object defining the computational domain.
:param r: The number of gridpoints in each dimension to interpolate a single sparse
point to. e.g. 2 for linear interpolation.
:param gridpoints: The *reference* grid point corresponding to each sparse point.
Of all the gridpoints that one sparse point would be interpolated
to, this is the grid point closest to the origin, i.e. the one
with the lowest value of each coordinate dimension. Must be a
two-dimensional array of shape [npoint][grid.ndim].
:param coefficients: An array containing the coefficient for each of the r^2 (2D) or
r^3 (3D) gridpoints that each sparsefunction will be interpolated
to. The coefficient is split across the n dimensions such that
the contribution of the point (i, j, k) will be multiplied by
coefficients[..., i]*coefficients[..., j]*coefficients[...,k]. So
for r=6, we will store 18 coefficients per sparse point (instead
of potentially 216). Shape must be [npoint][grid.ndim][r].
:param shape: (Optional) shape of the function. Defaults to ``(nt, npoints,)``.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param space_order: (Optional) Discretisation order for space derivatives.
Default to 0.
:param time_order: (Optional) Discretisation order for time derivatives.
Default to 1.
:param dtype: (Optional) Data type of the buffered data.
:param initializer: (Optional) A callable to initialize the data
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses `*args` to (re-)create the dimension arguments of the
symbolic function.
"""
is_PrecomputedSparseFunction = True
_child_functions = ['gridpoints', 'coefficients']
def __init__(self, *args, **kwargs):
if not self._cached():
super(PrecomputedSparseFunction, self).__init__(*args, **kwargs)
# Grid points per sparse point (2 in the case of bilinear and trilinear)
r = kwargs.get('r')
if not isinstance(r, int) and r > 0:
raise ValueError('Interpolation requires parameter `r` (>0)')
self.r = r
gridpoints = Function(name="%s_gridpoints" % self.name, dtype=np.int32,
dimensions=(self.indices[-1], Dimension(name='d')),
shape=(self.npoint, self.grid.dim), space_order=0)
gridpoints_data = kwargs.get('gridpoints', None)
assert(gridpoints_data is not None)
gridpoints.data[:] = gridpoints_data[:]
self.gridpoints = gridpoints
coefficients = Function(name="%s_coefficients" % self.name, dtype=self.dtype,
dimensions=(self.indices[-1], Dimension(name='d'),
Dimension(name='i')),
shape=(self.npoint, self.grid.dim, self.r),
space_order=0)
coefficients_data = kwargs.get('coefficients', None)
assert(coefficients_data is not None)
coefficients.data[:] = coefficients_data[:]
self.coefficients = coefficients
warning("Ensure that the provided coefficient and grid point values are " +
"computed on the final grid that will be used for other " +
"computations.")
def interpolate(self, expr, offset=0, u_t=None, p_t=None, cummulative=False):
"""Creates a :class:`sympy.Eq` equation for the interpolation
of an expression onto this sparse point collection.
:param expr: The expression to interpolate.
:param offset: Additional offset from the boundary for
absorbing boundary conditions.
:param u_t: (Optional) time index to use for indexing into
field data in `expr`.
:param p_t: (Optional) time index to use for indexing into
the sparse point data.
:param cummulative: (Optional) If True, perform an increment rather
than an assignment. Defaults to False.
"""
expr = indexify(expr)
# Apply optional time symbol substitutions to expr
if u_t is not None:
time = self.grid.time_dim
t = self.grid.stepping_dim
expr = expr.subs(t, u_t).subs(time, u_t)
coefficients = self.coefficients.indexed
gridpoints = self.gridpoints.indexed
p, _, _ = self.coefficients.indices
dim_subs = []
coeffs = []
for i, d in enumerate(self.grid.dimensions):
rd = DefaultDimension(name="r%s" % d.name, default_value=self.r)
dim_subs.append((d, INT(rd + gridpoints[p, i])))
coeffs.append(coefficients[p, i, rd])
# Apply optional time symbol substitutions to lhs of assignment
lhs = self if p_t is None else self.subs(self.indices[0], p_t)
rhs = prod(coeffs) * expr.subs(dim_subs)
return [Eq(lhs, lhs + rhs)]
def inject(self, field, expr, offset=0, u_t=None, p_t=None):
"""Symbol for injection of an expression onto a grid
:param field: The grid field into which we inject.
:param expr: The expression to inject.
:param offset: Additional offset from the boundary for
absorbing boundary conditions.
:param u_t: (Optional) time index to use for indexing into `field`.
:param p_t: (Optional) time index to use for indexing into `expr`.
"""
expr = indexify(expr)
field = indexify(field)
# Apply optional time symbol substitutions to field and expr
if u_t is not None:
field = field.subs(field.indices[0], u_t)
if p_t is not None:
expr = expr.subs(self.indices[0], p_t)
gridpoints = self.gridpoints.indexed
coefficients = self.coefficients.indexed
p, _ = self.gridpoints.indices
dim_subs = []
coeffs = []
for i, d in enumerate(self.grid.dimensions):
rd = DefaultDimension(name="r%s" % d.name, default_value=self.r)
dim_subs.append((d, INT(rd + gridpoints[p, i])))
coeffs.append(coefficients[p, i, rd])
rhs = prod(coeffs) * expr
field = field.subs(dim_subs)
return [Eq(field, field + rhs.subs(dim_subs))]
|
"""
Django settings for devhub project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%sl=sr@84$rrenr8=!p@7733st0ggv4&6(#ipbq()1yvbd2rjv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'devhub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'devhub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Custom User model
# https://docs.djangoproject.com/en/dev/topics/auth/customizing/#using-a-custom-user-model-when-starting-a-project
AUTH_USER_MODEL = 'account.Account'
|
from typing import Optional, Union, List, Callable
import logging
import torch
from torch.distributions import Poisson, Gamma, Bernoulli, Normal
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from scvi.inference import Posterior
from . import UnsupervisedTrainer
from scvi.dataset import GeneExpressionDataset
from scvi.models import TOTALVI, Classifier
from scvi.models.utils import one_hot
logger = logging.getLogger(__name__)
class TotalPosterior(Posterior):
r"""The functional data unit for totalVI. A `TotalPosterior` instance is instantiated with a model and
a `gene_dataset`, and as well as additional arguments that for Pytorch's `DataLoader`. A subset of indices
can be specified, for purposes such as splitting the data into train/test/validation. Each trainer instance of the `TotalTrainer` class can therefore have multiple
`TotalPosterior` instances to train a model. A `TotalPosterior` instance also comes with many methods or
utilities for its corresponding data.
:param model: A model instance from class ``TOTALVI``
:param gene_dataset: A gene_dataset instance like ``CbmcDataset()`` with attribute ``protein_expression``
:param shuffle: Specifies if a `RandomSampler` or a `SequentialSampler` should be used
:param indices: Specifies how the data should be split with regards to train/test or labelled/unlabelled
:param use_cuda: Default: ``True``
:param data_loader_kwarg: Keyword arguments to passed into the `DataLoader`
Examples:
Let us instantiate a `trainer`, with a gene_dataset and a model
>>> gene_dataset = CbmcDataset()
>>> totalvi = TOTALVI(gene_dataset.nb_genes, len(gene_dataset.protein_names),
... n_batch=gene_dataset.n_batches, use_cuda=True)
>>> trainer = TotalTrainer(vae, gene_dataset)
>>> trainer.train(n_epochs=400)
"""
def __init__(
self,
model: TOTALVI,
gene_dataset: GeneExpressionDataset,
shuffle: bool = False,
indices: Optional[np.ndarray] = None,
use_cuda: bool = True,
data_loader_kwargs=dict(),
):
super().__init__(
model,
gene_dataset,
shuffle=shuffle,
indices=indices,
use_cuda=use_cuda,
data_loader_kwargs=data_loader_kwargs,
)
# Add protein tensor as another tensor to be loaded
self.data_loader_kwargs.update(
{
"collate_fn": gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}
)
}
)
self.data_loader = DataLoader(gene_dataset, **self.data_loader_kwargs)
def corrupted(self):
return self.update(
{
"collate_fn": self.gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}, corrupted=True
)
}
)
def uncorrupted(self):
return self.update(
{
"collate_fn": self.gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}
)
}
)
@torch.no_grad()
def elbo(self):
elbo = self.compute_elbo(self.model)
return elbo
elbo.mode = "min"
@torch.no_grad()
def reconstruction_error(self, mode="total"):
ll_gene, ll_protein = self.compute_reconstruction_error(self.model)
if mode == "total":
return ll_gene + ll_protein
elif mode == "gene":
return ll_gene
else:
return ll_protein
reconstruction_error.mode = "min"
@torch.no_grad()
def marginal_ll(self, n_mc_samples=1000):
ll = self.compute_marginal_log_likelihood()
return ll
@torch.no_grad()
def get_protein_background_mean(self):
background_mean = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x, y, batch_index=batch_index, label=label, n_samples=1
)
b_mean = outputs["py_"]["rate_back"]
background_mean += [np.array(b_mean.cpu())]
return np.concatenate(background_mean)
def compute_elbo(self, vae: TOTALVI, **kwargs):
""" Computes the ELBO.
The ELBO is the reconstruction error + the KL divergences
between the variational distributions and the priors.
It differs from the marginal log likelihood.
Specifically, it is a lower bound on the marginal log likelihood
plus a term that is constant with respect to the variational distribution.
It still gives good insights on the modeling of the data, and is fast to compute.
"""
# Iterate once over the posterior and computes the total log_likelihood
elbo = 0
for i_batch, tensors in enumerate(self):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_gene_l,
kl_div_back_pro,
) = vae(
x,
y,
local_l_mean,
local_l_var,
batch_index=batch_index,
label=labels,
**kwargs,
)
elbo += torch.sum(
reconst_loss_gene
+ reconst_loss_protein
+ kl_div_z
+ kl_div_gene_l
+ kl_div_back_pro
).item()
n_samples = len(self.indices)
return elbo / n_samples
def compute_reconstruction_error(self, vae: TOTALVI, **kwargs):
r""" Computes log p(x/z), which is the reconstruction error .
Differs from the marginal log likelihood, but still gives good
insights on the modeling of the data, and is fast to compute
This is really a helper function to self.ll, self.ll_protein, etc.
"""
# Iterate once over the posterior and computes the total log_likelihood
log_lkl_gene = 0
log_lkl_protein = 0
for i_batch, tensors in enumerate(self):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_l_gene,
kl_div_back_pro,
) = vae(
x,
y,
local_l_mean,
local_l_var,
batch_index=batch_index,
label=labels,
**kwargs,
)
log_lkl_gene += torch.sum(reconst_loss_gene).item()
log_lkl_protein += torch.sum(reconst_loss_protein).item()
n_samples = len(self.indices)
return log_lkl_gene / n_samples, log_lkl_protein / n_samples
def compute_marginal_log_likelihood(
self, n_samples_mc: int = 100, batch_size: int = 96
):
""" Computes a biased estimator for log p(x, y), which is the marginal log likelihood.
Despite its bias, the estimator still converges to the real value
of log p(x, y) when n_samples_mc (for Monte Carlo) goes to infinity
(a fairly high value like 100 should be enough). 5000 is the standard in machine learning publications.
Due to the Monte Carlo sampling, this method is not as computationally efficient
as computing only the reconstruction loss
"""
# Uses MC sampling to compute a tighter lower bound on log p(x)
log_lkl = 0
for i_batch, tensors in enumerate(self.update({"batch_size": batch_size})):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
to_sum = torch.zeros(x.size()[0], n_samples_mc)
for i in range(n_samples_mc):
# Distribution parameters and sampled variables
outputs = self.model.inference(x, y, batch_index, labels)
qz_m = outputs["qz_m"]
qz_v = outputs["qz_v"]
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
px_ = outputs["px_"]
py_ = outputs["py_"]
log_library = outputs["untran_l"]
# really need not softmax transformed random variable
z = outputs["untran_z"]
log_pro_back_mean = outputs["log_pro_back_mean"]
# Reconstruction Loss
(
reconst_loss_gene,
reconst_loss_protein,
) = self.model.get_reconstruction_loss(x, y, px_, py_)
# Log-probabilities
p_l_gene = (
Normal(local_l_mean, local_l_var.sqrt())
.log_prob(log_library)
.sum(dim=-1)
)
p_z = Normal(0, 1).log_prob(z).sum(dim=-1)
p_mu_back = self.model.back_mean_prior.log_prob(log_pro_back_mean).sum(
dim=-1
)
p_xy_zl = -(reconst_loss_gene + reconst_loss_protein)
q_z_x = Normal(qz_m, qz_v.sqrt()).log_prob(z).sum(dim=-1)
q_l_x = Normal(ql_m, ql_v.sqrt()).log_prob(log_library).sum(dim=-1)
q_mu_back = (
Normal(py_["back_alpha"], py_["back_beta"])
.log_prob(log_pro_back_mean)
.sum(dim=-1)
)
to_sum[:, i] = (
p_z + p_l_gene + p_mu_back + p_xy_zl - q_z_x - q_l_x - q_mu_back
)
batch_log_lkl = torch.logsumexp(to_sum, dim=-1) - np.log(n_samples_mc)
log_lkl += torch.sum(batch_log_lkl).item()
n_samples = len(self.indices)
# The minus sign is there because we actually look at the negative log likelihood
return -log_lkl / n_samples
@torch.no_grad()
def get_latent(self, sample: bool = False):
"""
Output posterior z mean or sample, batch index, and label
:param sample: z mean or z sample
:return: 4-tuple of np.ndarrays, latent, batch_indices, labels, library_gene
"""
latent = []
batch_indices = []
labels = []
library_gene = []
for tensors in self:
x, local_l_mean, local_l_var, batch_index, label, y = tensors
give_mean = not sample
latent += [
self.model.sample_from_posterior_z(
x, y, batch_index, give_mean=give_mean
).cpu()
]
batch_indices += [batch_index.cpu()]
labels += [label.cpu()]
library_gene += [
self.model.sample_from_posterior_l(
x, y, batch_index, give_mean=give_mean
).cpu()
]
return (
np.array(torch.cat(latent)),
np.array(torch.cat(batch_indices)),
np.array(torch.cat(labels)).ravel(),
np.array(torch.cat(library_gene)).ravel(),
)
@torch.no_grad()
def differential_expression_stats(self, M_sampling: int = 100):
raise NotImplementedError
@torch.no_grad()
def generate(
self, n_samples: int = 100, batch_size: int = 64
): # with n_samples>1 return original list/ otherwise sequential
"""
Return samples from posterior predictive. Proteins are concatenated to genes.
:param n_samples: Number of posterior predictive samples
:return: Tuple of posterior samples, original data
"""
original_list = []
posterior_list = []
for tensors in self.update({"batch_size": batch_size}):
x, _, _, batch_index, labels, y = tensors
with torch.no_grad():
outputs = self.model.inference(
x, y, batch_index=batch_index, label=labels, n_samples=n_samples
)
px_ = outputs["px_"]
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = Bernoulli(pi).sample()
protein_rate = (
py_["rate_fore"] * (1 - mixing_sample)
+ py_["rate_back"] * mixing_sample
)
rate = torch.cat((px_["rate"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = Gamma(r, (1 - p) / p).sample()
data = Poisson(l_train).sample().cpu().numpy()
# """
# In numpy (shape, scale) => (concentration, rate), with scale = p /(1 - p)
# rate = (1 - p) / p # = 1/scale # used in pytorch
# """
original_list += [np.array(torch.cat((x, y), dim=-1).cpu())]
posterior_list += [data]
posterior_list[-1] = np.transpose(posterior_list[-1], (1, 2, 0))
return (
np.concatenate(posterior_list, axis=0),
np.concatenate(original_list, axis=0),
)
@torch.no_grad()
def get_sample_dropout(self, n_samples: int = 1, give_mean: bool = True):
""" Zero-inflation mixing component for genes
"""
px_dropouts = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x, y, batch_index=batch_index, label=label, n_samples=n_samples
)
px_dropout = torch.sigmoid(outputs["px_"]["dropout"])
px_dropouts += [px_dropout.cpu()]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
px_dropouts = torch.cat(px_dropouts, dim=1)
# (cells, features, samples)
px_dropouts = px_dropouts.permute(1, 2, 0)
else:
px_dropouts = torch.cat(px_dropouts, dim=0)
if give_mean is True and n_samples > 1:
px_dropouts = torch.mean(px_dropouts, dim=-1)
px_dropouts = px_dropouts.cpu().numpy()
return px_dropouts
@torch.no_grad()
def get_sample_mixing(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[int] = None,
):
""" Returns mixing bernoulli parameter for protein negative binomial mixtures (probability background)
:param n_samples: number of samples from posterior distribution
:param sample_protein_mixing: Sample mixing bernoulli, setting background to zero
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on as integer.
:return: array of probability background
:rtype: :py:class:`np.ndarray`
"""
py_mixings = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=transform_batch,
)
py_mixing = torch.sigmoid(outputs["py_"]["mixing"])
py_mixings += [py_mixing.cpu()]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
py_mixings = torch.cat(py_mixings, dim=1)
# (cells, features, samples)
py_mixings = py_mixings.permute(1, 2, 0)
else:
py_mixings = torch.cat(py_mixings, dim=0)
if give_mean is True and n_samples > 1:
py_mixings = torch.mean(py_mixings, dim=-1)
py_mixings = py_mixings.cpu().numpy()
return py_mixings
@torch.no_grad()
def get_sample_scale(
self,
transform_batch=None,
eps=0.5,
normalize_pro=False,
sample_bern=True,
include_bg=False,
):
"""Helper function to provide normalized expression for DE testing.
For normalized, denoised expression, please use
`get_normalized_denoised_expression()`
:param transform_batch: Int of batch to "transform" all cells into
:param eps: Prior count to add to protein normalized expression
:param normalize_pro: bool, whether to make protein expression sum to one in a cell
:param include_bg: bool, whether to include the background component of expression
:rtype: :py:class:`np.ndarray`
"""
scales = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
model_scale = self.model.get_sample_scale(
x,
y,
batch_index=batch_index,
label=label,
n_samples=1,
transform_batch=transform_batch,
eps=eps,
normalize_pro=normalize_pro,
sample_bern=sample_bern,
include_bg=include_bg,
)
# prior count for proteins
scales += [torch.cat(model_scale, dim=-1).cpu().numpy()]
return np.concatenate(scales)
@torch.no_grad()
def get_normalized_denoised_expression(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[Union[int, List[int]]] = None,
sample_protein_mixing: bool = True,
):
"""Returns the tensors of denoised normalized gene and protein expression
:param n_samples: number of samples from posterior distribution
:param sample_protein_mixing: Sample mixing bernoulli, setting background to zero
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:return: Denoised genes, denoised proteins
:rtype: 2-tuple of :py:class:`np.ndarray`
"""
scale_list_gene = []
scale_list_pro = []
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
for tensors in self:
x, _, _, batch_index, label, y = tensors
px_scale = torch.zeros_like(x)
py_scale = torch.zeros_like(y)
if n_samples > 1:
px_scale = torch.stack(n_samples * [px_scale])
py_scale = torch.stack(n_samples * [py_scale])
for b in transform_batch:
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=b,
)
px_scale += outputs["px_"]["scale"]
py_ = outputs["py_"]
# probability of background
protein_mixing = 1 / (1 + torch.exp(-py_["mixing"]))
if sample_protein_mixing is True:
protein_mixing = Bernoulli(protein_mixing).sample()
py_scale += py_["rate_fore"] * (1 - protein_mixing)
px_scale /= len(transform_batch)
py_scale /= len(transform_batch)
scale_list_gene.append(px_scale.cpu())
scale_list_pro.append(py_scale.cpu())
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
scale_list_gene = torch.cat(scale_list_gene, dim=1)
scale_list_pro = torch.cat(scale_list_pro, dim=1)
# (cells, features, samples)
scale_list_gene = scale_list_gene.permute(1, 2, 0)
scale_list_pro = scale_list_pro.permute(1, 2, 0)
else:
scale_list_gene = torch.cat(scale_list_gene, dim=0)
scale_list_pro = torch.cat(scale_list_pro, dim=0)
if give_mean is True and n_samples > 1:
scale_list_gene = torch.mean(scale_list_gene, dim=-1)
scale_list_pro = torch.mean(scale_list_pro, dim=-1)
scale_list_gene = scale_list_gene.cpu().numpy()
scale_list_pro = scale_list_pro.cpu().numpy()
return scale_list_gene, scale_list_pro
@torch.no_grad()
def get_protein_mean(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[Union[int, List[int]]] = None,
):
"""Returns the tensors of protein mean (with foreground and background)
:param n_samples: number of samples from posterior distribution
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:rtype: :py:class:`np.ndarray`
"""
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
rate_list_pro = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
protein_rate = torch.zeros_like(y)
if n_samples > 1:
protein_rate = torch.stack(n_samples * [protein_rate])
for b in transform_batch:
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=b,
)
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
protein_rate += py_["rate_fore"] * (1 - pi) + py_["rate_back"] * pi
protein_rate /= len(transform_batch)
rate_list_pro.append(protein_rate.cpu())
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
rate_list_pro = torch.cat(rate_list_pro, dim=1)
# (cells, features, samples)
rate_list_pro = rate_list_pro.permute(1, 2, 0)
else:
rate_list_pro = torch.cat(rate_list_pro, dim=0)
if give_mean is True and n_samples > 1:
rate_list_pro = torch.mean(rate_list_pro, dim=-1)
rate_list_pro = rate_list_pro.cpu().numpy()
return rate_list_pro
@torch.no_grad()
def generate_denoised_samples(
self,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1,
transform_batch: Optional[int] = None,
):
""" Return samples from an adjusted posterior predictive. Proteins are concatenated to genes.
:param n_samples: How may samples per cell
:param batch_size: Mini-batch size for sampling. Lower means less GPU memory footprint
:rna_size_factor: size factor for RNA prior to sampling gamma distribution
:transform_batch: int of which batch to condition on for all cells
:return:
"""
posterior_list = []
for tensors in self.update({"batch_size": batch_size}):
x, _, _, batch_index, labels, y = tensors
with torch.no_grad():
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=labels,
n_samples=n_samples,
transform_batch=transform_batch,
)
px_ = outputs["px_"]
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = Bernoulli(pi).sample()
protein_rate = py_["rate_fore"]
rate = torch.cat((rna_size_factor * px_["scale"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# make background 0
data[:, :, self.gene_dataset.nb_genes :] = (
data[:, :, self.gene_dataset.nb_genes :]
* (1 - mixing_sample).cpu().numpy()
)
posterior_list += [data]
posterior_list[-1] = np.transpose(posterior_list[-1], (1, 2, 0))
return np.concatenate(posterior_list, axis=0)
@torch.no_grad()
def generate_feature_correlation_matrix(
self,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Union[int, List[int]]] = None,
correlation_mode: str = "spearman",
):
""" Wrapper of `generate_denoised_samples()` to create a gene-protein gene-protein corr matrix
:param n_samples: How may samples per cell
:param batch_size: Mini-batch size for sampling. Lower means less GPU memory footprint
:rna_size_factor: size factor for RNA prior to sampling gamma distribution
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:return:
"""
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
corr_mats = []
for b in transform_batch:
denoised_data = self.generate_denoised_samples(
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if correlation_mode == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
else:
corr_matrix = spearmanr(flattened, axis=0)[0]
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
return corr_matrix
@torch.no_grad()
def imputation(self, n_samples: int = 1):
""" Gene imputation
"""
imputed_list = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
px_rate = self.model.get_sample_rate(
x, y, batch_index=batch_index, label=label, n_samples=n_samples
)
imputed_list += [np.array(px_rate.cpu())]
imputed_list = np.concatenate(imputed_list)
return imputed_list.squeeze()
@torch.no_grad()
def imputation_list(self, n_samples: int = 1):
""" This code is identical to same function in posterior.py
Except, we use the totalVI definition of `model.get_sample_rate`
"""
original_list = []
imputed_list = []
batch_size = self.data_loader_kwargs["batch_size"] // n_samples
for tensors, corrupted_tensors in zip(
self.uncorrupted().sequential(batch_size=batch_size),
self.corrupted().sequential(batch_size=batch_size),
):
batch = tensors[0]
actual_batch_size = batch.size(0)
dropout_x, _, _, batch_index, labels, y = corrupted_tensors
px_rate = self.model.get_sample_rate(
dropout_x, y, batch_index=batch_index, label=labels, n_samples=n_samples
)
px_rate = px_rate[:, : self.gene_dataset.nb_genes]
indices_dropout = torch.nonzero(batch - dropout_x)
if indices_dropout.size() != torch.Size([0]):
i = indices_dropout[:, 0]
j = indices_dropout[:, 1]
batch = batch.unsqueeze(0).expand(
(n_samples, batch.size(0), batch.size(1))
)
original = np.array(batch[:, i, j].view(-1).cpu())
imputed = np.array(px_rate[..., i, j].view(-1).cpu())
cells_index = np.tile(np.array(i.cpu()), n_samples)
original_list += [
original[cells_index == i] for i in range(actual_batch_size)
]
imputed_list += [
imputed[cells_index == i] for i in range(actual_batch_size)
]
else:
original_list = np.array([])
imputed_list = np.array([])
return original_list, imputed_list
@torch.no_grad()
def differential_expression_score(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
mode: Optional[str] = "vanilla",
batchid1: Optional[Union[List[int], np.ndarray]] = None,
batchid2: Optional[Union[List[int], np.ndarray]] = None,
use_observed_batches: Optional[bool] = False,
n_samples: int = 5000,
use_permutation: bool = True,
M_permutation: int = 10000,
all_stats: bool = True,
change_fn: Optional[Union[str, Callable]] = None,
m1_domain_fn: Optional[Callable] = None,
delta: Optional[float] = 0.5,
**kwargs,
) -> pd.DataFrame:
r"""
Unified method for differential expression inference.
This function is an extension of the `get_bayes_factors` method
providing additional genes information to the user
# FUNCTIONING
Two modes coexist:
- the "vanilla" mode follows protocol described in arXiv:1709.02082
In this case, we perform hypothesis testing based on:
M_1: h_1 > h_2
M_2: h_1 <= h_2
DE can then be based on the study of the Bayes factors:
log (p(M_1 | x_1, x_2) / p(M_2 | x_1, x_2)
- the "change" mode (described in bioRxiv, 794289)
consists in estimating an effect size random variable (e.g., log fold-change) and
performing Bayesian hypothesis testing on this variable.
The `change_fn` function computes the effect size variable r based two inputs
corresponding to the normalized means in both populations
Hypotheses:
M_1: r \in R_0 (effect size r in region inducing differential expression)
M_2: r not \in R_0 (no differential expression)
To characterize the region R_0, the user has two choices.
1. A common case is when the region [-delta, delta] does not induce differential
expression.
If the user specifies a threshold delta,
we suppose that R_0 = \mathbb{R} \ [-delta, delta]
2. specify an specific indicator function f: \mathbb{R} -> {0, 1} s.t.
r \in R_0 iff f(r) = 1
Decision-making can then be based on the estimates of
p(M_1 | x_1, x_2)
# POSTERIOR SAMPLING
Both modes require to sample the normalized means posteriors
To that purpose we sample the Posterior in the following way:
1. The posterior is sampled n_samples times for each subpopulation
2. For computation efficiency (posterior sampling is quite expensive), instead of
comparing the obtained samples element-wise, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling
q(z_A | x_A) and q(z_B | x_B)
# BATCH HANDLING
Currently, the code covers several batch handling configurations:
1. If `use_observed_batches`=True, then batch are considered as observations
and cells' normalized means are conditioned on real batch observations
2. If case (cell group 1) and control (cell group 2) are conditioned on the same
batch ids.
set(batchid1) = set(batchid2):
e.g. batchid1 = batchid2 = None
3. If case and control are conditioned on different batch ids that do not intersect
i.e., set(batchid1) != set(batchid2)
and intersection(set(batchid1), set(batchid2)) = \emptyset
This function does not cover other cases yet and will warn users in such cases.
# PARAMETERS
# Mode parameters
:param mode: one of ["vanilla", "change"]
## Genes/cells/batches selection parameters
:param idx1: bool array masking subpopulation cells 1. Should be True where cell is
from associated population
:param idx2: bool array masking subpopulation cells 2. Should be True where cell is
from associated population
:param batchid1: List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
:param batchid2: List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
:param use_observed_batches: Whether normalized means are conditioned on observed
batches
## Sampling parameters
:param n_samples: Number of posterior samples
:param use_permutation: Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when use_permutation=True
:param change_fn: function computing effect size based on both normalized means
:param m1_domain_fn: custom indicator function of effect size regions
inducing differential expression
:param delta: specific case of region inducing differential expression.
In this case, we suppose that R \ [-delta, delta] does not induce differential expression
(LFC case)
:param all_stats: whether additional metrics should be provided
:\**kwargs: Other keywords arguments for `get_sample_scale()`
:return: Differential expression properties
"""
all_info = self.get_bayes_factors(
idx1=idx1,
idx2=idx2,
mode=mode,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
use_permutation=use_permutation,
M_permutation=M_permutation,
change_fn=change_fn,
m1_domain_fn=m1_domain_fn,
delta=delta,
**kwargs,
)
col_names = np.concatenate(
[self.gene_dataset.gene_names, self.gene_dataset.protein_names]
)
if all_stats is True:
nan = np.array([np.nan] * len(self.gene_dataset.protein_names))
(
mean1,
mean2,
nonz1,
nonz2,
norm_mean1,
norm_mean2,
) = self.gene_dataset.raw_counts_properties(idx1, idx2)
mean1_pro = self.gene_dataset.protein_expression[idx1, :].mean(0)
mean2_pro = self.gene_dataset.protein_expression[idx2, :].mean(0)
nonz1_pro = (self.gene_dataset.protein_expression[idx1, :] > 0).mean(0)
nonz2_pro = (self.gene_dataset.protein_expression[idx2, :] > 0).mean(0)
# TODO implement properties for proteins
genes_properties_dict = dict(
raw_mean1=np.concatenate([mean1, mean1_pro]),
raw_mean2=np.concatenate([mean2, mean2_pro]),
non_zeros_proportion1=np.concatenate([nonz1, nonz1_pro]),
non_zeros_proportion2=np.concatenate([nonz2, nonz2_pro]),
raw_normalized_mean1=np.concatenate([norm_mean1, nan]),
raw_normalized_mean2=np.concatenate([norm_mean2, nan]),
)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
return res
@torch.no_grad()
def generate_parameters(self):
raise NotImplementedError
default_early_stopping_kwargs = {
"early_stopping_metric": "elbo",
"save_best_state_metric": "elbo",
"patience": 45,
"threshold": 0,
"reduce_lr_on_plateau": True,
"lr_patience": 30,
"lr_factor": 0.6,
"posterior_class": TotalPosterior,
}
class TotalTrainer(UnsupervisedTrainer):
r"""The VariationalInference class for the unsupervised training of an autoencoder.
Args:
:model: A model instance from class ``TOTALVI``
:gene_dataset: A gene_dataset instance like ``CbmcDataset()`` with attribute ``protein_expression``
:train_size: The train size, either a float between 0 and 1 or and integer for the number of training samples
to use Default: ``0.90``.
:test_size: The test size, either a float between 0 and 1 or and integer for the number of training samples
to use Default: ``0.10``. Note that if train and test do not add to 1 the remainder is placed in a validation set
:pro_recons_weight: Scaling factor on the reconstruction loss for proteins. Default: ``1.0``.
:n_epochs_kl_warmup: Number of epochs for annealing the KL terms for `z` and `mu` of the ELBO (from 0 to 1). If None, no warmup performed, unless
`n_iter_kl_warmup` is set.
:n_iter_kl_warmup: Number of minibatches for annealing the KL terms for `z` and `mu` of the ELBO (from 0 to 1). If set to "auto",
the number of iterations is equal to 75% of the number of cells. `n_epochs_kl_warmup` takes precedence if it is not None. If both are None,
then no warmup is performed.
:\*\*kwargs: Other keywords arguments from the general Trainer class.
"""
default_metrics_to_monitor = ["elbo"]
def __init__(
self,
model,
dataset,
train_size=0.90,
test_size=0.10,
pro_recons_weight=1.0,
n_epochs_kl_warmup=None,
n_iter_kl_warmup="auto",
early_stopping_kwargs=default_early_stopping_kwargs,
discriminator=None,
use_adversarial_loss=False,
kappa=None,
**kwargs,
):
self.n_genes = dataset.nb_genes
self.n_proteins = model.n_input_proteins
self.use_adversarial_loss = use_adversarial_loss
self.kappa = kappa
self.pro_recons_weight = pro_recons_weight
super().__init__(
model,
dataset,
n_epochs_kl_warmup=n_epochs_kl_warmup,
n_iter_kl_warmup=0.75 * len(dataset)
if n_iter_kl_warmup == "auto"
else n_iter_kl_warmup,
early_stopping_kwargs=early_stopping_kwargs,
**kwargs,
)
if use_adversarial_loss is True and discriminator is None:
discriminator = Classifier(
n_input=self.model.n_latent,
n_hidden=32,
n_labels=self.gene_dataset.n_batches,
n_layers=2,
logits=True,
)
self.discriminator = discriminator
if self.use_cuda and self.discriminator is not None:
self.discriminator.cuda()
if type(self) is TotalTrainer:
(
self.train_set,
self.test_set,
self.validation_set,
) = self.train_test_validation(
model, dataset, train_size, test_size, type_class=TotalPosterior
)
self.train_set.to_monitor = []
self.test_set.to_monitor = ["elbo"]
self.validation_set.to_monitor = ["elbo"]
def loss(self, tensors):
(
sample_batch_X,
local_l_mean,
local_l_var,
batch_index,
label,
sample_batch_Y,
) = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_l_gene,
kl_div_back_pro,
) = self.model(
sample_batch_X,
sample_batch_Y,
local_l_mean,
local_l_var,
batch_index,
label,
)
loss = torch.mean(
reconst_loss_gene
+ self.pro_recons_weight * reconst_loss_protein
+ self.kl_weight * kl_div_z
+ kl_div_l_gene
+ self.kl_weight * kl_div_back_pro
)
return loss
def loss_discriminator(
self, z, batch_index, predict_true_class=True, return_details=True
):
n_classes = self.gene_dataset.n_batches
cls_logits = torch.nn.LogSoftmax(dim=1)(self.discriminator(z))
if predict_true_class:
cls_target = one_hot(batch_index, n_classes)
else:
one_hot_batch = one_hot(batch_index, n_classes)
cls_target = torch.zeros_like(one_hot_batch)
# place zeroes where true label is
cls_target.masked_scatter_(
~one_hot_batch.bool(), torch.ones_like(one_hot_batch) / (n_classes - 1)
)
l_soft = cls_logits * cls_target
loss = -l_soft.sum(dim=1).mean()
return loss
def _get_z(self, tensors):
(
sample_batch_X,
local_l_mean,
local_l_var,
batch_index,
label,
sample_batch_Y,
) = tensors
z = self.model.sample_from_posterior_z(
sample_batch_X, sample_batch_Y, batch_index, give_mean=False
)
return z
def train(self, n_epochs=500, lr=4e-3, eps=0.01, params=None):
super().train(n_epochs=n_epochs, lr=lr, eps=eps, params=params)
def on_training_loop(self, tensors_list):
if self.use_adversarial_loss:
if self.kappa is None:
kappa = 1 - self.kl_weight
else:
kappa = self.kappa
batch_index = tensors_list[0][3]
if kappa > 0:
z = self._get_z(*tensors_list)
# Train discriminator
d_loss = self.loss_discriminator(z.detach(), batch_index, True)
d_loss *= kappa
self.d_optimizer.zero_grad()
d_loss.backward()
self.d_optimizer.step()
# Train generative model to fool discriminator
fool_loss = self.loss_discriminator(z, batch_index, False)
fool_loss *= kappa
# Train generative model
self.optimizer.zero_grad()
self.current_loss = loss = self.loss(*tensors_list)
if kappa > 0:
(loss + fool_loss).backward()
else:
loss.backward()
self.optimizer.step()
else:
self.current_loss = loss = self.loss(*tensors_list)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def training_extras_init(self, lr_d=1e-3, eps=0.01):
if self.discriminator is not None:
self.discriminator.train()
d_params = filter(
lambda p: p.requires_grad, self.discriminator.parameters()
)
self.d_optimizer = torch.optim.Adam(d_params, lr=lr_d, eps=eps)
def training_extras_end(self):
if self.discriminator is not None:
self.discriminator.eval()
|
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://jet-python.rtfd.org."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='jet-python',
version='0.1',
description='Python Jet.com API Client',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Fulfil.IO Inc.',
author_email='tech-support@fulfil.io',
url='https://github.com/fulfilio/jet-python',
packages=[
'jet',
],
package_dir={'jet': 'jet'},
include_package_data=True,
install_requires=[
'python-dateutil',
'requests',
],
license='MIT',
zip_safe=False,
keywords='jet jet.com python',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'ipc_fuzzer_replay',
'type': 'executable',
'dependencies': [
'../message_lib/message_lib.gyp:ipc_message_lib',
'../../../ipc/mojo/ipc_mojo.gyp:ipc_mojo'
],
'sources': [
'replay.cc',
'replay_process.cc',
'replay_process.h',
],
'include_dirs': [
'../../..',
],
'defines': [
'USE_CUPS',
],
},
],
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import abc
import collections
import functools
# Set headless-friendly backend.
import matplotlib;
matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow as tf
import cv2 # mohammad added
import os # mohammad added
import pandas as pd # mohammad added
from datetime import datetime # mohammad added
# import xlsxwriter # mohammad added
import datetime as dt # mohammad added
import csv # mohammad added
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
# from twilio.rest import Client # mohammad added
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 12)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks
]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores, keypoints
]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks, keypoints
]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores
]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key != input_data_fields.original_image:
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_with_detections_list.append(
tf.concat([images_with_detections, images_with_groundtruth], axis=2))
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
# frame_number, #Mohammad
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=100,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
# count = 1
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
# print (class_name)
if (class_name == "head"):
# path = 'C:/Mohammad/A_Mohammad laptop/Tensorflow From C Drive/tensorflow1/models/research/object_detection/savedframes'
# cv2.imwrite(os.path.join(path, "frame%d.jpg" % frame_number), image)
# with open('in.csv') as csvDataFile:
# csvReader = csv.reader(csvDataFile)
# data = [row for row in csv.reader(csvDataFile)]
# print (data[4][0]) # wokrs
# Excel data Mohammad
'''date = str(datetime.now())
# Workbook() takes one, non-optional, argument
# which is the filename that we want to create.
workbook = xlsxwriter.Workbook(r'E:/dataframe/M.xlsx')
# The workbook object is then used to add new
# worksheet via the add_worksheet() method.
worksheet = workbook.add_worksheet()
# Use the worksheet object to write
# data via the write() method.
# a_file = open(r'E:\Python Win7-64-AMD 3.3\Test\a.txt', encoding='utf-8')
worksheet.write('A{}'.format(1), date)
#worksheet.write('A{}'.format(b+1), date)
#close the Excel file
workbook.close()
#img = cv2.VideoCapture()
#image = vid.read()
#success, image = vidcap.read()
count = 0
#success = True
if (class_name == "NOHH"):
name = './data/' + str(count) + '.jpg'
# cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
cv2.imwrite(name, img, 'uint8')
#success, image = vidcap.read()
#print ('Read a new frame: ', success)
count += 1
else:
continue'''
# video = cv2.VideoCapture(0) # mohammad added
# try:
# if not os.path.exists('dataframe'):
# os.makedirs('E:/dataframe')
# except OSError:
# print ('Error: Creating directory of data')
# ret, frame = video.read()
# count = 0
# success = True
# while (class_name == "NOHH"):
# name = './E:/dataframe/' + str(count) + '.jpg'
# cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
# cv2.imwrite(name, frame)
# success, frame = video.read()
# print ('Read a new frame: ', success)
# count += 1
# Your Account SID from twilio.com/console
# account_sid = "ACe517054cefe9d4dd438f38a4fd99471f"
# Your Auth Token from twilio.com/console
# auth_token = "eb3aae92d192f97c5dd8d728c0598fb0"
# client = Client(account_sid, auth_token)
# message = client.messages.create(
# to="+14389291996",
# from_="+17787612471",
# body="No hardhat detected")
# print(message.sid)
# print (class_name + " Detected")
else:
# count = count + 1
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
display_str_list = box_to_display_str_map[box] # adding something to display list
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
# else:
# print("h")
return image
# added
def visualize_boxes_and_labels_on_image_array_over_mohammad(
image,
frame_number,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
display_str_list = box_to_display_str_map[box] # adding something to display list
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
else:
print("h")
# added for coordinates
def return_coordinates(
image,
count,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_score_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
box_to_score_map[box] = scores[i]
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
coordinates_list = []
counter_for = 0
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
height, width, channels = image.shape
ymin = int(ymin * height)
ymax = int(ymax * height)
xmin = int(xmin * width)
xmax = int(xmax * width)
# if (class_name == "H"): # get the coordinates
classsification = box_to_display_str_map[box][0]
classsification.split()
coordinates_list.append([ymin, ymax, xmin, xmax, (box_to_score_map[box] * 100), classsification])
counter_for = counter_for + 1
return coordinates_list
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(object):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image'):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
summary_name_prefix: A string prefix for each image summary.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right'):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(
eval_dict, self._category_index, self._max_boxes_to_draw,
self._min_score_thresh, self._use_normalized_coordinates)
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class SingleValueLegacyExtendedProperty(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def value(self):
"""
Gets and sets the value
Returns:
str:
The value
"""
if "value" in self._prop_dict:
return self._prop_dict["value"]
else:
return None
@value.setter
def value(self, val):
self._prop_dict["value"] = val
|
from rpython.jit.backend.llsupport.descr import get_size_descr,\
get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\
SizeDescrWithVTable, get_interiorfield_descr
from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\
GcLLDescr_framework
from rpython.jit.backend.llsupport import jitframe
from rpython.jit.metainterp.gc import get_description
from rpython.jit.tool.oparser import parse
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.codewriter.heaptracker import register_known_gctype
from rpython.jit.metainterp.history import JitCellToken, FLOAT
from rpython.jit.metainterp.history import AbstractFailDescr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import rclass
from rpython.jit.backend.x86.arch import WORD
class Evaluator(object):
def __init__(self, scope):
self.scope = scope
def __getitem__(self, key):
return eval(key, self.scope)
class FakeLoopToken(object):
pass
class RewriteTests(object):
def check_rewrite(self, frm_operations, to_operations, **namespace):
S = lltype.GcStruct('S', ('x', lltype.Signed),
('y', lltype.Signed))
sdescr = get_size_descr(self.gc_ll_descr, S)
sdescr.tid = 1234
#
T = lltype.GcStruct('T', ('y', lltype.Signed),
('z', lltype.Ptr(S)),
('t', lltype.Signed))
tdescr = get_size_descr(self.gc_ll_descr, T)
tdescr.tid = 5678
tzdescr = get_field_descr(self.gc_ll_descr, T, 'z')
#
A = lltype.GcArray(lltype.Signed)
adescr = get_array_descr(self.gc_ll_descr, A)
adescr.tid = 4321
alendescr = adescr.lendescr
#
B = lltype.GcArray(lltype.Char)
bdescr = get_array_descr(self.gc_ll_descr, B)
bdescr.tid = 8765
blendescr = bdescr.lendescr
#
C = lltype.GcArray(lltype.Ptr(S))
cdescr = get_array_descr(self.gc_ll_descr, C)
cdescr.tid = 8111
clendescr = cdescr.lendescr
#
E = lltype.GcStruct('Empty')
edescr = get_size_descr(self.gc_ll_descr, E)
edescr.tid = 9000
#
vtable_descr = self.gc_ll_descr.fielddescr_vtable
O = lltype.GcStruct('O', ('parent', rclass.OBJECT),
('x', lltype.Signed))
o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
register_known_gctype(self.cpu, o_vtable, O)
#
tiddescr = self.gc_ll_descr.fielddescr_tid
wbdescr = self.gc_ll_descr.write_barrier_descr
WORD = globals()['WORD']
#
strdescr = self.gc_ll_descr.str_descr
unicodedescr = self.gc_ll_descr.unicode_descr
strlendescr = strdescr.lendescr
unicodelendescr = unicodedescr.lendescr
strhashdescr = self.gc_ll_descr.str_hash_descr
unicodehashdescr = self.gc_ll_descr.unicode_hash_descr
casmdescr = JitCellToken()
clt = FakeLoopToken()
clt._ll_initial_locs = [0, 8]
frame_info = lltype.malloc(jitframe.JITFRAMEINFO, flavor='raw')
clt.frame_info = frame_info
frame_info.jfi_frame_depth = 13
frame_info.jfi_frame_size = 255
framedescrs = self.gc_ll_descr.getframedescrs(self.cpu)
framelendescr = framedescrs.arraydescr.lendescr
jfi_frame_depth = framedescrs.jfi_frame_depth
jfi_frame_size = framedescrs.jfi_frame_size
jf_frame_info = framedescrs.jf_frame_info
jf_savedata = framedescrs.jf_savedata
jf_force_descr = framedescrs.jf_force_descr
jf_descr = framedescrs.jf_descr
jf_guard_exc = framedescrs.jf_guard_exc
jf_forward = framedescrs.jf_forward
jf_extra_stack_depth = framedescrs.jf_extra_stack_depth
signedframedescr = self.cpu.signedframedescr
floatframedescr = self.cpu.floatframedescr
casmdescr.compiled_loop_token = clt
#
guarddescr = AbstractFailDescr()
#
namespace.update(locals())
#
for funcname in self.gc_ll_descr._generated_functions:
namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname)
namespace[funcname + '_descr'] = getattr(self.gc_ll_descr,
'%s_descr' % funcname)
#
ops = parse(frm_operations, namespace=namespace)
expected = parse(to_operations % Evaluator(namespace),
namespace=namespace)
operations = self.gc_ll_descr.rewrite_assembler(self.cpu,
ops.operations,
[])
equaloplists(operations, expected.operations)
lltype.free(frame_info, flavor='raw')
class FakeTracker(object):
pass
class BaseFakeCPU(object):
JITFRAME_FIXED_SIZE = 0
def __init__(self):
self.tracker = FakeTracker()
self._cache = {}
self.signedframedescr = ArrayDescr(3, 8, FieldDescr('len', 0, 0, 0), 0)
self.floatframedescr = ArrayDescr(5, 8, FieldDescr('len', 0, 0, 0), 0)
def getarraydescr_for_frame(self, tp):
if tp == FLOAT:
return self.floatframedescr
return self.signedframedescr
def unpack_arraydescr_size(self, d):
return 0, d.itemsize, 0
def unpack_fielddescr(self, d):
return d.offset
def arraydescrof(self, ARRAY):
try:
return self._cache[ARRAY]
except KeyError:
r = ArrayDescr(1, 2, FieldDescr('len', 0, 0, 0), 0)
self._cache[ARRAY] = r
return r
def fielddescrof(self, STRUCT, fname):
key = (STRUCT, fname)
try:
return self._cache[key]
except KeyError:
r = FieldDescr(fname, 1, 1, 1)
self._cache[key] = r
return r
class TestBoehm(RewriteTests):
def setup_method(self, meth):
class FakeCPU(BaseFakeCPU):
def sizeof(self, STRUCT):
return SizeDescrWithVTable(102, gc_fielddescrs=[])
self.cpu = FakeCPU()
self.gc_ll_descr = GcLLDescr_boehm(None, None, None)
def test_new(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
jump()
""")
def test_no_collapsing(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new(descr=sdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
jump()
""")
def test_new_array_fixed(self):
self.check_rewrite("""
[]
p0 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(adescr.basesize)d, \
10, \
%(adescr.itemsize)d, \
%(adescr.lendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
## should ideally be:
## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
## %(adescr.basesize + 10 * adescr.itemsize)d, \
## descr=malloc_fixedsize_descr)
## setfield_gc(p0, 10, descr=alendescr)
def test_new_array_variable(self):
self.check_rewrite("""
[i1]
p0 = new_array(i1, descr=adescr)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(adescr.basesize)d, \
i1, \
%(adescr.itemsize)d, \
%(adescr.lendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
def test_new_with_vtable(self):
self.check_rewrite("""
[]
p0 = new_with_vtable(ConstClass(o_vtable))
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \
descr=malloc_fixedsize_descr)
setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
jump()
""")
def test_newstr(self):
self.check_rewrite("""
[i1]
p0 = newstr(i1)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(strdescr.basesize)d, \
i1, \
%(strdescr.itemsize)d, \
%(strlendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
def test_newunicode(self):
self.check_rewrite("""
[i1]
p0 = newunicode(10)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(unicodedescr.basesize)d, \
10, \
%(unicodedescr.itemsize)d, \
%(unicodelendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
## should ideally be:
## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
## %(unicodedescr.basesize + \
## 10 * unicodedescr.itemsize)d, \
## descr=malloc_fixedsize_descr)
## setfield_gc(p0, 10, descr=unicodelendescr)
class TestFramework(RewriteTests):
def setup_method(self, meth):
class config_(object):
class translation(object):
gc = 'minimark'
gcrootfinder = 'asmgcc'
gctransformer = 'framework'
gcremovetypeptr = False
gcdescr = get_description(config_)
self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None,
really_not_translated=True)
self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = (
lambda cpu: True)
self.gc_ll_descr.malloc_zero_filled = False
#
class FakeCPU(BaseFakeCPU):
def sizeof(self, STRUCT):
descr = SizeDescrWithVTable(104, gc_fielddescrs=[])
descr.tid = 9315
return descr
self.cpu = FakeCPU()
def test_rewrite_assembler_new_to_malloc(self):
self.check_rewrite("""
[p1]
p0 = new(descr=sdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(sdescr.size)d)
setfield_gc(p0, 1234, descr=tiddescr)
jump()
""")
def test_rewrite_assembler_new3_to_malloc(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new(descr=tdescr)
p2 = new(descr=sdescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(sdescr.size + tdescr.size + sdescr.size)d)
setfield_gc(p0, 1234, descr=tiddescr)
p1 = int_add(p0, %(sdescr.size)d)
setfield_gc(p1, 5678, descr=tiddescr)
p2 = int_add(p1, %(tdescr.size)d)
setfield_gc(p2, 1234, descr=tiddescr)
zero_ptr_field(p1, %(tdescr.gc_fielddescrs[0].offset)s)
jump()
""")
def test_rewrite_assembler_new_array_fixed_to_malloc(self):
self.check_rewrite("""
[]
p0 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(adescr.basesize + 10 * adescr.itemsize)d)
setfield_gc(p0, 4321, descr=tiddescr)
setfield_gc(p0, 10, descr=alendescr)
jump()
""")
def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(sdescr.size + \
adescr.basesize + 10 * adescr.itemsize)d)
setfield_gc(p0, 1234, descr=tiddescr)
p1 = int_add(p0, %(sdescr.size)d)
setfield_gc(p1, 4321, descr=tiddescr)
setfield_gc(p1, 10, descr=alendescr)
jump()
""")
def test_rewrite_assembler_round_up(self):
self.check_rewrite("""
[]
p0 = new_array(6, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(bdescr.basesize + 8)d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 6, descr=blendescr)
jump()
""")
def test_rewrite_assembler_round_up_always(self):
self.check_rewrite("""
[]
p0 = new_array(5, descr=bdescr)
p1 = new_array(5, descr=bdescr)
p2 = new_array(5, descr=bdescr)
p3 = new_array(5, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 5, descr=blendescr)
p1 = int_add(p0, %(bdescr.basesize + 8)d)
setfield_gc(p1, 8765, descr=tiddescr)
setfield_gc(p1, 5, descr=blendescr)
p2 = int_add(p1, %(bdescr.basesize + 8)d)
setfield_gc(p2, 8765, descr=tiddescr)
setfield_gc(p2, 5, descr=blendescr)
p3 = int_add(p2, %(bdescr.basesize + 8)d)
setfield_gc(p3, 8765, descr=tiddescr)
setfield_gc(p3, 5, descr=blendescr)
jump()
""")
def test_rewrite_assembler_minimal_size(self):
self.check_rewrite("""
[]
p0 = new(descr=edescr)
p1 = new(descr=edescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(4*WORD)d)
setfield_gc(p0, 9000, descr=tiddescr)
p1 = int_add(p0, %(2*WORD)d)
setfield_gc(p1, 9000, descr=tiddescr)
jump()
""")
def test_rewrite_assembler_variable_size(self):
self.check_rewrite("""
[i0]
p0 = new_array(i0, descr=bdescr)
jump(i0)
""", """
[i0]
p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr)
setfield_gc(p0, i0, descr=blendescr)
jump(i0)
""")
def test_rewrite_new_string(self):
self.check_rewrite("""
[i0]
p0 = newstr(i0)
jump(i0)
""", """
[i0]
p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr)
setfield_gc(p0, i0, descr=strlendescr)
setfield_gc(p0, 0, descr=strhashdescr)
jump(i0)
""")
def test_rewrite_assembler_nonstandard_array(self):
# a non-standard array is a bit hard to get; e.g. GcArray(Float)
# is like that on Win32, but not on Linux. Build one manually...
NONSTD = lltype.GcArray(lltype.Float)
nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD)
nonstd_descr.tid = 6464
nonstd_descr.basesize = 64 # <= hacked
nonstd_descr.itemsize = 8
nonstd_descr_gcref = 123
self.check_rewrite("""
[i0, p1]
p0 = new_array(i0, descr=nonstd_descr)
setarrayitem_gc(p0, i0, p1)
jump(i0)
""", """
[i0, p1]
p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \
64, 8, \
%(nonstd_descr.lendescr.offset)d, \
6464, i0, \
descr=malloc_array_nonstandard_descr)
cond_call_gc_wb_array(p0, i0, descr=wbdescr)
setarrayitem_gc(p0, i0, p1)
jump(i0)
""", nonstd_descr=nonstd_descr)
def test_rewrite_assembler_maximal_size_1(self):
self.gc_ll_descr.max_size_of_young_obj = 100
self.check_rewrite("""
[]
p0 = new_array(103, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), 1, \
%(bdescr.tid)d, 103, \
descr=malloc_array_descr)
jump()
""")
def test_rewrite_assembler_maximal_size_2(self):
self.gc_ll_descr.max_size_of_young_obj = 300
self.check_rewrite("""
[]
p0 = new_array(101, descr=bdescr)
p1 = new_array(102, descr=bdescr) # two new_arrays can be combined
p2 = new_array(103, descr=bdescr) # but not all three
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(2 * (bdescr.basesize + 104))d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 101, descr=blendescr)
p1 = int_add(p0, %(bdescr.basesize + 104)d)
setfield_gc(p1, 8765, descr=tiddescr)
setfield_gc(p1, 102, descr=blendescr)
p2 = call_malloc_nursery( \
%(bdescr.basesize + 104)d)
setfield_gc(p2, 8765, descr=tiddescr)
setfield_gc(p2, 103, descr=blendescr)
jump()
""")
def test_rewrite_assembler_huge_size(self):
# "huge" is defined as "larger than 0xffffff bytes, or 16MB"
self.check_rewrite("""
[]
p0 = new_array(20000000, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), 1, \
%(bdescr.tid)d, 20000000, \
descr=malloc_array_descr)
jump()
""")
def test_new_with_vtable(self):
self.check_rewrite("""
[]
p0 = new_with_vtable(ConstClass(o_vtable))
jump()
""", """
[p1]
p0 = call_malloc_nursery(104) # rounded up
setfield_gc(p0, 9315, descr=tiddescr)
setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
jump()
""")
def test_new_with_vtable_too_big(self):
self.gc_ll_descr.max_size_of_young_obj = 100
self.check_rewrite("""
[]
p0 = new_with_vtable(ConstClass(o_vtable))
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \
descr=malloc_big_fixedsize_descr)
setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
jump()
""")
def test_rewrite_assembler_newstr_newunicode(self):
self.check_rewrite("""
[i2]
p0 = newstr(14)
p1 = newunicode(10)
p2 = newunicode(i2)
p3 = newstr(i2)
jump()
""", """
[i2]
p0 = call_malloc_nursery( \
%(strdescr.basesize + 16 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr)
setfield_gc(p0, 14, descr=strlendescr)
setfield_gc(p0, 0, descr=strhashdescr)
p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr)
setfield_gc(p1, 10, descr=unicodelendescr)
setfield_gc(p1, 0, descr=unicodehashdescr)
p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\
descr=unicodedescr)
setfield_gc(p2, i2, descr=unicodelendescr)
setfield_gc(p2, 0, descr=unicodehashdescr)
p3 = call_malloc_nursery_varsize(1, 1, i2, \
descr=strdescr)
setfield_gc(p3, i2, descr=strlendescr)
setfield_gc(p3, 0, descr=strhashdescr)
jump()
""")
def test_write_barrier_before_setfield_gc(self):
self.check_rewrite("""
[p1, p2]
setfield_gc(p1, p2, descr=tzdescr)
jump()
""", """
[p1, p2]
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, p2, descr=tzdescr)
jump()
""")
def test_write_barrier_before_array_without_from_array(self):
self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = (
lambda cpu: False)
self.check_rewrite("""
[p1, i2, p3]
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[p1, i2, p3]
cond_call_gc_wb(p1, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_short_array(self):
self.gc_ll_descr.max_size_of_young_obj = 2000
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(129, descr=cdescr)
call(123456)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 129 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 129, descr=clendescr)
zero_array(p1, 0, 129, descr=cdescr)
call(123456)
cond_call_gc_wb(p1, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_long_array(self):
# the limit of "being too long" is fixed, arbitrarily, at 130
self.gc_ll_descr.max_size_of_young_obj = 2000
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(130, descr=cdescr)
call(123456)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 130 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 130, descr=clendescr)
zero_array(p1, 0, 130, descr=cdescr)
call(123456)
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_unknown_array(self):
self.check_rewrite("""
[p1, i2, p3]
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[p1, i2, p3]
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_label_makes_size_unknown(self):
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(5, descr=cdescr)
label(p1, i2, p3)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 5, descr=clendescr)
zero_array(p1, 0, 5, descr=cdescr)
label(p1, i2, p3)
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_setinteriorfield_gc(self):
S1 = lltype.GcStruct('S1')
INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1)))
interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR)
interiordescr.tid = 1291
interiorlendescr = interiordescr.lendescr
interiorzdescr = get_interiorfield_descr(self.gc_ll_descr,
INTERIOR, 'z')
self.check_rewrite("""
[p1, p2]
setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr)
jump(p1, p2)
""", """
[p1, p2]
cond_call_gc_wb_array(p1, 0, descr=wbdescr)
setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr)
jump(p1, p2)
""", interiorzdescr=interiorzdescr)
def test_initialization_store(self):
self.check_rewrite("""
[p1]
p0 = new(descr=tdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_initialization_store_2(self):
self.check_rewrite("""
[]
p0 = new(descr=tdescr)
p1 = new(descr=sdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
p1 = int_add(p0, %(tdescr.size)d)
setfield_gc(p1, 1234, descr=tiddescr)
# <<<no cond_call_gc_wb here>>>
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_initialization_store_array(self):
self.check_rewrite("""
[p1, i2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, i2, p1, descr=cdescr)
jump()
""", """
[p1, i2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 5, descr=cdescr)
setarrayitem_gc(p0, i2, p1, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 1, p1, descr=cdescr)
setarrayitem_gc(p0, 0, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 2, 3, descr=cdescr)
setarrayitem_gc(p0, 1, p1, descr=cdescr)
setarrayitem_gc(p0, 0, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_right(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 3, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
jump()
""")
def test_zero_array_not_reduced_at_all(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_completely(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 5, 0, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left_with_call(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
call(321321)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 1, 4, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
call(321321)
cond_call_gc_wb(p0, descr=wbdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left_with_label(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
label(p0, p2)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 1, 4, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
label(p0, p2)
cond_call_gc_wb_array(p0, 1, descr=wbdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_varsize(self):
self.check_rewrite("""
[p1, p2, i3]
p0 = new_array_clear(i3, descr=bdescr)
jump()
""", """
[p1, p2, i3]
p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr)
setfield_gc(p0, i3, descr=blendescr)
zero_array(p0, 0, i3, descr=bdescr)
jump()
""")
def test_zero_array_varsize_cannot_reduce(self):
self.check_rewrite("""
[p1, p2, i3]
p0 = new_array_clear(i3, descr=bdescr)
setarrayitem_gc(p0, 0, p1, descr=bdescr)
jump()
""", """
[p1, p2, i3]
p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr)
setfield_gc(p0, i3, descr=blendescr)
zero_array(p0, 0, i3, descr=bdescr)
cond_call_gc_wb_array(p0, 0, descr=wbdescr)
setarrayitem_gc(p0, 0, p1, descr=bdescr)
jump()
""")
def test_initialization_store_potentially_large_array(self):
# the write barrier cannot be omitted, because we might get
# an array with cards and the GC assumes that the write
# barrier is always called, even on young (but large) arrays
self.check_rewrite("""
[i0, p1, i2]
p0 = new_array(i0, descr=bdescr)
setarrayitem_gc(p0, i2, p1, descr=bdescr)
jump()
""", """
[i0, p1, i2]
p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr)
setfield_gc(p0, i0, descr=blendescr)
cond_call_gc_wb_array(p0, i2, descr=wbdescr)
setarrayitem_gc(p0, i2, p1, descr=bdescr)
jump()
""")
def test_non_initialization_store(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
p1 = newstr(i0)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
p1 = call_malloc_nursery_varsize(1, 1, i0, \
descr=strdescr)
setfield_gc(p1, i0, descr=strlendescr)
setfield_gc(p1, 0, descr=strhashdescr)
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_non_initialization_store_label(self):
self.check_rewrite("""
[p1]
p0 = new(descr=tdescr)
label(p0, p1)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
label(p0, p1)
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_multiple_writes(self):
self.check_rewrite("""
[p0, p1, p2]
setfield_gc(p0, p1, descr=tzdescr)
setfield_gc(p0, p2, descr=tzdescr)
jump(p1, p2, p0)
""", """
[p0, p1, p2]
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
setfield_gc(p0, p2, descr=tzdescr)
jump(p1, p2, p0)
""")
def test_rewrite_call_assembler(self):
self.check_rewrite("""
[i0, f0]
i2 = call_assembler(i0, f0, descr=casmdescr)
""", """
[i0, f0]
i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_size)
p1 = call_malloc_nursery_varsize_frame(i1)
setfield_gc(p1, 0, descr=tiddescr)
i2 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth)
setfield_gc(p1, 0, descr=jf_extra_stack_depth)
setfield_gc(p1, NULL, descr=jf_savedata)
setfield_gc(p1, NULL, descr=jf_force_descr)
setfield_gc(p1, NULL, descr=jf_descr)
setfield_gc(p1, NULL, descr=jf_guard_exc)
setfield_gc(p1, NULL, descr=jf_forward)
setfield_gc(p1, i2, descr=framelendescr)
setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info)
setarrayitem_gc(p1, 0, i0, descr=signedframedescr)
setarrayitem_gc(p1, 1, f0, descr=floatframedescr)
i3 = call_assembler(p1, descr=casmdescr)
""")
def test_int_add_ovf(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
i1 = int_add_ovf(i0, 123)
guard_overflow(descr=guarddescr) []
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
i1 = int_add_ovf(i0, 123)
guard_overflow(descr=guarddescr) []
jump()
""")
def test_int_gt(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
i1 = int_gt(i0, 123)
guard_false(i1, descr=guarddescr) []
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
i1 = int_gt(i0, 123)
guard_false(i1, descr=guarddescr) []
jump()
""")
def test_zero_ptr_field_before_getfield(self):
# This case may need to be fixed in the metainterp/optimizeopt
# already so that it no longer occurs for rewrite.py. But anyway
# it's a good idea to make sure rewrite.py is correct on its own.
self.check_rewrite("""
[]
p0 = new(descr=tdescr)
p1 = getfield_gc(p0, descr=tdescr)
jump(p1)
""", """
[]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
p1 = getfield_gc(p0, descr=tdescr)
jump(p1)
""")
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: atomix/leader/latch.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import AsyncGenerator, List, Optional
import betterproto
from atomix.proto import headers
class EventResponseType(betterproto.Enum):
CHANGED = 0
@dataclass
class CreateRequest(betterproto.Message):
header: headers.RequestHeader = betterproto.message_field(1)
@dataclass
class CreateResponse(betterproto.Message):
header: headers.ResponseHeader = betterproto.message_field(1)
@dataclass
class CloseRequest(betterproto.Message):
header: headers.RequestHeader = betterproto.message_field(1)
delete: bool = betterproto.bool_field(2)
@dataclass
class CloseResponse(betterproto.Message):
header: headers.ResponseHeader = betterproto.message_field(1)
@dataclass
class LatchRequest(betterproto.Message):
header: headers.RequestHeader = betterproto.message_field(1)
participant_id: str = betterproto.string_field(2)
@dataclass
class LatchResponse(betterproto.Message):
header: headers.ResponseHeader = betterproto.message_field(1)
latch: "Latch" = betterproto.message_field(2)
@dataclass
class GetRequest(betterproto.Message):
header: headers.RequestHeader = betterproto.message_field(1)
@dataclass
class GetResponse(betterproto.Message):
header: headers.ResponseHeader = betterproto.message_field(1)
latch: "Latch" = betterproto.message_field(2)
@dataclass
class EventRequest(betterproto.Message):
header: headers.RequestHeader = betterproto.message_field(1)
@dataclass
class EventResponse(betterproto.Message):
header: headers.ResponseHeader = betterproto.message_field(1)
type: "EventResponseType" = betterproto.enum_field(2)
latch: "Latch" = betterproto.message_field(3)
@dataclass
class Latch(betterproto.Message):
id: int = betterproto.uint64_field(1)
leader: str = betterproto.string_field(2)
participants: List[str] = betterproto.string_field(3)
class LeaderLatchServiceStub(betterproto.ServiceStub):
"""Leader latch service"""
async def create(
self, *, header: Optional[headers.RequestHeader] = None
) -> CreateResponse:
"""Create creates a leader latch"""
request = CreateRequest()
if header is not None:
request.header = header
return await self._unary_unary(
"/atomix.leader.LeaderLatchService/Create", request, CreateResponse,
)
async def close(
self, *, header: Optional[headers.RequestHeader] = None, delete: bool = False
) -> CloseResponse:
"""Close closes a leader latch"""
request = CloseRequest()
if header is not None:
request.header = header
request.delete = delete
return await self._unary_unary(
"/atomix.leader.LeaderLatchService/Close", request, CloseResponse,
)
async def latch(
self,
*,
header: Optional[headers.RequestHeader] = None,
participant_id: str = "",
) -> LatchResponse:
"""Latch attempts to acquire the leader latch"""
request = LatchRequest()
if header is not None:
request.header = header
request.participant_id = participant_id
return await self._unary_unary(
"/atomix.leader.LeaderLatchService/Latch", request, LatchResponse,
)
async def get(
self, *, header: Optional[headers.RequestHeader] = None
) -> GetResponse:
"""Get gets the current leader"""
request = GetRequest()
if header is not None:
request.header = header
return await self._unary_unary(
"/atomix.leader.LeaderLatchService/Get", request, GetResponse,
)
async def events(
self, *, header: Optional[headers.RequestHeader] = None
) -> AsyncGenerator[EventResponse, None]:
"""Events listens for leader change events"""
request = EventRequest()
if header is not None:
request.header = header
async for response in self._unary_stream(
"/atomix.leader.LeaderLatchService/Events", request, EventResponse,
):
yield response
|
from __future__ import absolute_import, annotations
import json
import logging
import math
import os
from collections import OrderedDict
from typing import Dict, List, Set, Tuple
from ..libs.ndcg import ndcg
from ..models.graph import Graph
from ..models.result import Result
from ..services import utils
logger = logging.getLogger("recap")
config = utils.Config.get_instance()
class Evaluation(object):
"""Class for calculating and storing evaluation measures
Candiates are fetched automatically from a file.
The order of the candiates is not relevant for the calculations.
"""
user_candidates: List[str]
user_rankings: Dict[str, int]
system_candidates: List[str]
system_rankings: Dict[str, int]
precision: float
recall: float
average_precision: float
correctness: float
completeness: float
ndcg: float
def __init__(
self, case_base: Dict[str, Graph], results: List[Result], query: Graph
) -> None:
self._get_candidates(case_base, query.filename)
self.system_rankings = OrderedDict()
for i, res in enumerate(results):
self.system_rankings[res.graph.filename] = i + 1
self.system_candidates = list(self.system_rankings.keys())
self._calculate_metrics(case_base, results)
def as_dict(self):
return {
"unranked": {
"Precision": self.precision,
"Recall": self.recall,
"F1 Score": self.f_score(1),
"F2 Score": self.f_score(2),
},
"ranked": {
"Average Precision": self.average_precision,
"NDCG": self.ndcg,
"Correctness": self.correctness,
"Completeness": self.completeness,
},
}
def _get_candidates(self, case_base: Dict[str, Graph], filename: str) -> None:
filepath = os.path.join(config["candidates_folder"], filename)
try:
with open(filepath) as file:
data = json.load(file)
self.user_candidates = data["candidates"]
self.user_rankings = data["rankings"]
except Exception:
self.user_candidates = []
self.user_rankings = {}
def _calculate_metrics(
self, case_base: Dict[str, Graph], results: List[Result]
) -> None:
relevant_keys = set(self.user_candidates)
not_relevant_keys = {
key for key in case_base.keys() if key not in relevant_keys
}
tp = relevant_keys.intersection(set(self.system_candidates))
fp = not_relevant_keys.intersection(set(self.system_candidates))
fn = relevant_keys.difference(set(self.system_candidates))
self.precision = len(tp) / (len(tp) + len(fp))
self.recall = len(tp) / (len(tp) + len(fn))
self._average_precision()
self._correctness_completeness(case_base, results)
self._ndcg()
def f_score(self, beta: int):
if self.precision + self.recall == 0:
return 0
return ((1 + math.pow(beta, 2)) * self.precision * self.recall) / (
math.pow(beta, 2) * self.precision + self.recall
)
def _average_precision(self) -> None:
"""Compute the average prescision between two lists of items.
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
"""
score = 0.0
num_hits = 0.0
for i, result in enumerate(self.system_candidates):
if (
result in self.user_candidates
and result not in self.system_candidates[:i]
):
num_hits += 1.0
score += num_hits / (i + 1.0)
self.average_precision = score / len(self.user_candidates)
def _correctness_completeness(
self, case_base: Dict[str, Graph], results: List[Result]
) -> None:
orders = 0
concordances = 0
disconcordances = 0
self.correctness = 1
self.completeness = 1
for user_key_1, user_rank_1 in self.user_rankings.items():
for user_key_2, user_rank_2 in self.user_rankings.items():
if user_key_1 != user_key_2:
system_rank_1 = self.system_rankings.get(user_key_1)
system_rank_2 = self.system_rankings.get(user_key_2)
if user_rank_1 > user_rank_2:
orders += 1
if system_rank_1 is not None and system_rank_2 is not None:
if system_rank_1 > system_rank_2:
concordances += 1
elif system_rank_1 < system_rank_2:
disconcordances += 1
if concordances + disconcordances > 0:
self.correctness = (concordances - disconcordances) / (
concordances + disconcordances
)
if orders > 0:
self.completeness = (concordances + disconcordances) / orders
def _ndcg(self) -> None:
ranking_inv = {
name: config["candidates_max_rank"] + 1 - rank
for name, rank in self.user_rankings.items()
}
results_ratings = [
ranking_inv.get(result, 0) for result in self.system_rankings.keys()
]
self.ndcg = ndcg(results_ratings, len(results_ratings))
|
# -*- coding: utf-8 -*-
import re
def path_sub(url):
if re.search(r'(\/\d+?\/)', url):
url = re.sub(r'(\/\d+?\/)', '/modify/', url)
return url
pure_digits_regex = lambda s: re.compile('^\d+$').match(s)
pure_english_regex = lambda s: re.compile('^[\.\_\-A-Za-z0-9_]+$').match(s)
pure_english_regex2 = lambda s: re.compile('^[A-Za-z_]+$').match(s)
pure_email_regex = lambda s: re.compile('^(\w|[-+=.])+@\w+([-.]\w+)*\.(\w+)$').match(s)
pure_ip_regex = lambda s: re.compile('^(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)$').match(s)
pure_ipaddr_regex = lambda s: re.compile('^(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\/(\d+)$').match(s)
pure_tel_regex = lambda s: re.compile('^1[3456789]\d{9}$').match(s)
pure_upper_regex = lambda s: re.compile('^[A-Z]+$').match(s)
pure_lower_regex = lambda s: re.compile('^[a-z]+$').match(s)
pure_upper_regex2 = lambda s: re.compile('[A-Z]+').search(s)
pure_lower_regex2 = lambda s: re.compile('[a-z]+').search(s)
pure_digits_regex2 = lambda s: re.compile('\d+').search(s)
if __name__ == "__main__":
print pure_tel_regex("19829799823")
print pure_tel_regex("19929799823")
|
"""
A simple Configuration file for training and/or predicting algae cells.
"""
import torch
import os
""" base path of the dataset """
ROOT = '/content/drive/MyDrive/algae-dataset'
""" define the path to the tiles and annotations dataset """
IMAGE_DATASET_PATH = os.path.join(ROOT, "tiles")
MASK_DATASET_PATH = os.path.join(ROOT, "annotations")
""" determine the algae species that we need """
ALGAE_SPECIES = ['Pp', 'Cr', 'Cv']
""" define the train/test split """
TEST_SPLIT = 0.2
""" determine the device to be used for training and evaluation """
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
""" determine if we will be pinning memory during data loading """
PIN_MEMORY = True if DEVICE == "cuda" else False
""" define the number of channels in the input, number of classes,
and number of levels in the U-Net model """
NUM_CHANNELS = 1
NUM_CLASSES = 1
NUM_LEVELS = 3
""" initialize learning rate, number of epochs to train for, and the
batch size """
INIT_LR = 0.001
NUM_EPOCHS = 40
BATCH_SIZE = 16
""" define the input image dimensions """
INPUT_IMAGE_WIDTH = 512
INPUT_IMAGE_HEIGHT = 512
""" define threshold to filter weak predictions """
THRESHOLD = 0.5
""" Early stopping patience """
PATIENCE = 5
""" define the path to the base output directory """
BASE_OUTPUT = os.path.join(ROOT, "output")
if not os.path.exists(BASE_OUTPUT):
os.mkdir(BASE_OUTPUT)
""" define the path to the output serialized model, model training
plot, and testing image paths """
MODEL_PATH = os.path.join(BASE_OUTPUT, "unet.ptm")
TEST_PATHS = os.path.sep.join([BASE_OUTPUT, "test_paths.txt"])
|
import firebase_admin
from firebase_admin import credentials, db
import math
cred = credentials.Certificate("test-ddf2c-firebase-adminsdk-5tva7-6b546e57b4.json")
firebase_admin.initialize_app(cred, {
'databaseURL' : 'https://test-ddf2c.firebaseio.com/'
})
root = db.reference()
lat = float(input("Enter Lat: "))
longi = float(input("Enter Long: "))
people = db.reference('people').get()
if people is not None:
for p in people.values():
distance = math.sqrt((lat - float(p['lat']))**2 + (longi - float(p['long']))**2)
print("You are {distance} degress from {name}.".format(distance = distance, name = p['name']))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.