input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
self.config.version:
warn_msg = (
"Found a different version {other_version} of dataset {name} in "
"cache_dir {cache_dir}. Using currently defined version "
"{cur_version}.".format(
other_version=str(other_version),
name=self.name,
cache_dir=self._cache_dir_root,
cur_version=str(self.config.version),
)
)
logger.warning(warn_msg)
return version_data_dir
@abc.abstractmethod
def _info(self) -> DatasetInfo:
"""Construct the DatasetInfo object. See `DatasetInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (DatasetInfo) The dataset information
"""
raise NotImplementedError
@classmethod
def get_imported_module_dir(cls):
"""Return the path of the module of this class or subclass."""
return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
try_from_hf_gcs: bool = True,
dl_manager: Optional[DownloadManager] = None,
**download_and_prepare_kwargs,
):
"""Downloads and prepares dataset for reading.
Args:
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
download_mode (Optional `nlp.GenerateMode`): select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
ignore_verifications (bool): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...)
save_infos (bool): Save the dataset information (checksums/size/splits/...)
try_from_hf_gcs (bool): If True, it will try to download the already prepared dataset from the Hf google cloud storage
dl_manager (Optional ``nlp.DownloadManager``): specific Download Manger to use
"""
download_mode = GenerateMode(download_mode or GenerateMode.REUSE_DATASET_IF_EXISTS)
verify_infos = not ignore_verifications
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self._cache_dir_root, "downloads")
download_config.force_download = download_mode == FORCE_REDOWNLOAD
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.config.data_dir
)
data_exists = os.path.exists(self._cache_dir)
if data_exists and download_mode == REUSE_DATASET_IF_EXISTS:
logger.info("Reusing dataset %s (%s)", self.name, self._cache_dir)
self.download_post_processing_resources(dl_manager)
return
# Currently it's not possible to overwrite the data because it would
# conflict with versioning: If the last version has already been generated,
# it will always be reloaded and cache_dir will be set at construction.
if data_exists and download_mode != REUSE_CACHE_IF_EXISTS:
raise ValueError(
"Trying to overwrite an existing dataset {} at {}. A dataset with "
"the same version {} already exists. If the dataset has changed, "
"please update the version number.".format(self.name, self._cache_dir, self.config.version)
)
logger.info("Generating dataset %s (%s)", self.name, self._cache_dir)
if not is_remote_url(self._cache_dir): # if cache dir is local, check for available space
os.makedirs(self._cache_dir_root, exist_ok=True)
if not utils.has_sufficient_disk_space(self.info.size_in_bytes or 0, directory=self._cache_dir_root):
raise IOError(
"Not enough disk space. Needed: {} (download: {}, generated: {}, post-processed: {})".format(
utils.size_str(self.info.size_in_bytes or 0),
utils.size_str(self.info.download_size or 0),
utils.size_str(self.info.dataset_size or 0),
utils.size_str(self.info.post_processing_size or 0),
)
)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
if is_remote_url(dirname):
yield dirname
else:
tmp_dir = dirname + ".incomplete"
os.makedirs(tmp_dir)
try:
yield tmp_dir
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.rename(tmp_dir, dirname)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
print(
f"Downloading and preparing dataset {self.info.builder_name}/{self.info.config_name} "
f"(download: {utils.size_str(self.info.download_size)}, generated: {utils.size_str(self.info.dataset_size)}, post-processed: {utils.size_str(self.info.post_processing_size)}"
f"total: {utils.size_str(self.info.size_in_bytes)}) to {self._cache_dir}..."
)
if self.manual_download_instructions is not None:
assert (
dl_manager.manual_dir is not None
), "The dataset {} with config {} requires manual data. \n Please follow the manual download instructions: {}. \n Manual data can be loaded with `nlp.load_dataset({}, data_dir='<path/to/manual/data>')".format(
self.name, self.config.name, self.manual_download_instructions, self.name
)
# Create a tmp dir and rename to self._cache_dir on successful exit.
with incomplete_dir(self._cache_dir) as tmp_data_dir:
# Temporarily assign _cache_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with utils.temporary_assignment(self, "_cache_dir", tmp_data_dir):
# Try to download the already prepared dataset files
downloaded_from_gcs = False
if try_from_hf_gcs:
try:
self._download_prepared_from_hf_gcs()
downloaded_from_gcs = True
except (DatasetNotOnHfGcs, MissingFilesOnHfGcs):
logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
if not downloaded_from_gcs:
self._download_and_prepare(
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
)
# Sync info
self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
# Save info
self._save_info()
# Download post processing resources
self.download_post_processing_resources(dl_manager)
print(
f"Dataset {self.name} downloaded and prepared to {self._cache_dir}. "
f"Subsequent calls will reuse this data."
)
def _download_prepared_from_hf_gcs(self):
relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
reader = ArrowReader(self._cache_dir, self.info)
# use reader instructions to download the right files
reader.download_from_hf_gcs(self._cache_dir, relative_data_dir)
downloaded_info = DatasetInfo.from_directory(self._cache_dir)
self.info.update(downloaded_info)
# download post processing resources
remote_cache_dir = os.path.join(HF_GCP_BASE_URL, relative_data_dir)
for split in self.info.splits:
for resource_file_name in self._post_processing_resources(split).values():
if "/" in resource_file_name:
raise ValueError("Resources shouldn't be in a sub-directory: {}".format(resource_file_name))
try:
resource_path = utils.cached_path(os.path.join(remote_cache_dir, resource_file_name))
shutil.move(resource_path, os.path.join(self._cache_dir, resource_file_name))
except ConnectionError:
logger.info(
"Couldn't download resourse file {} from Hf google storage.".format(resource_file_name)
)
logger.info("Dataset downloaded from Hf google storage.")
def _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs):
"""Downloads and prepares dataset for reading.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager: (DownloadManager) `DownloadManager` used to download and cache
data.
verify_infos: bool, if True, do not perform checksums and size tests.
prepare_split_kwargs: Additional options.
"""
# Generating data for all splits
split_dict = SplitDict(dataset_name=self.name)
split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
# Checksums verification
if verify_infos:
verify_checksums(
self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
)
for split_generator in split_generators:
if str(split_generator.split_info.name).lower() == "all":
raise ValueError(
"`all` is a special split keyword corresponding to the "
"union of all splits, so cannot be used as key in "
"._split_generator()."
)
logger.info("Generating split %s", split_generator.split_info.name)
split_dict.add(split_generator.split_info)
try:
# Prepare split will record examples associated to the split
self._prepare_split(split_generator, **prepare_split_kwargs)
except OSError:
raise OSError("Cannot find data file. " + (self.manual_download_instructions or ""))
if verify_infos:
verify_splits(self.info.splits, split_dict)
# Update the info object with the splits.
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def download_post_processing_resources(self, dl_manager):
for split in self.info.splits:
for resource_name, resource_file_name in self._post_processing_resources(split).items():
if "/" in resource_file_name:
raise ValueError("Resources shouldn't be in a sub-directory: {}".format(resource_file_name))
resource_path = os.path.join(self._cache_dir, resource_file_name)
if not os.path.exists(resource_path):
downloaded_resource_path = self._download_post_processing_resources(
split, resource_name, dl_manager
)
if downloaded_resource_path:
logger.info(
"Downloaded post-processing resource {} as {}".format(resource_name, resource_file_name)
)
shutil.move(downloaded_resource_path, resource_path)
def _save_info(self):
self.info.write_to_directory(self._cache_dir)
def _save_infos(self):
DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
def _make_split_generators_kwargs(self, prepare_split_kwargs):
"""Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
del prepare_split_kwargs
return {}
def as_dataset(
self, split: Optional[Split] = None, run_post_process=True, ignore_verifications=False
) -> Union[Dataset, DatasetDict]:
""" Return a Dataset for the specified split.
"""
if not os.path.exists(self._cache_dir):
raise AssertionError(
(
"Dataset %s: could not find data in %s. Please make sure to call "
"builder.download_and_prepare(), or pass download=True to "
"nlp.load_dataset() before trying to access the Dataset object."
)
% (self.name, self._cache_dir_root)
)
logger.info(
"Constructing Dataset for split %s, from %s", split or ", ".join(self.info.splits), self._cache_dir
)
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
# Create a dataset for each of the given splits
datasets = utils.map_nested(
partial(
self._build_single_dataset,
run_post_process=run_post_process,
ignore_verifications=ignore_verifications,
),
split,
map_tuple=True,
)
if isinstance(datasets, dict):
datasets = DatasetDict(datasets)
return datasets
def _build_single_dataset(self, split: Union[str, Split], run_post_process: bool, ignore_verifications: bool):
"""as_dataset for a single split."""
verify_infos = not ignore_verifications
if isinstance(split, str):
split = Split(split)
# Build base dataset
ds = self._as_dataset(split=split,)
if run_post_process:
for resource_file_name in self._post_processing_resources(split).values():
if "/" in resource_file_name:
raise ValueError("Resources shouldn't be in a sub-directory: {}".format(resource_file_name))
resources_paths = {
resource_name: os.path.join(self._cache_dir, resource_file_name)
for resource_name, resource_file_name in self._post_processing_resources(split).items()
}
ds = self._post_process(ds, resources_paths)
if self.info.post_processed is not None and self.info.post_processed.features is not None:
if self.info.post_processed.features.type != ds.features.type:
raise ValueError(
"Post-processed features info don't match the dataset:\nGot\n{}\nbut expected something like\n{}".format(
self.info.post_processed.features, ds.features
)
)
else:
ds.info.features = self.info.post_processed.features
recorded_checksums = {}
for resource_name, resource_path in resources_paths.items():
size_checksum = get_size_checksum_dict(resource_path)
recorded_checksums[resource_name] = size_checksum
if verify_infos:
if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
expected_checksums = None
else:
expected_checksums = self.info.post_processed.resources_checksums.get(split)
verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
if self.info.post_processed is None:
self.info.post_processed = PostProcessedInfo()
if self.info.post_processed.resources_checksums is None:
self.info.post_processed.resources_checksums = {}
self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
self.info.post_processing_size = sum(
checksums_dict["num_bytes"]
for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
for checksums_dict in split_checksums_dicts.values()
)
if self.info.dataset_size is not None and self.info.download_size is not None:
self.info.size_in_bytes = (
self.info.dataset_size + self.info.download_size + self.info.post_processing_size
)
self._save_info()
return ds
def _as_dataset(self, split: Split = Split.TRAIN) -> Dataset:
"""Constructs a `Dataset`.
This is the internal implementation to overwrite called when user calls
`as_dataset`. It should read the pre-processed datasets files and | |
update_mutations)
return res['mutate_response'][0]['id']
@utils.enforce_id_param
def delete_playlist(self, playlist_id):
"""Deletes a playlist and returns its id.
:param playlist_id: the id to delete.
"""
# TODO accept multiple?
mutate_call = mobileclient.BatchMutatePlaylists
del_mutations = mutate_call.build_playlist_deletes([playlist_id])
res = self._make_call(mutate_call, del_mutations)
return res['mutate_response'][0]['id']
def get_all_user_playlist_contents(self):
"""
Retrieves the contents of *all* user-created playlists
-- the Mobileclient does not support retrieving
only the contents of one
playlist.
This will not return results for public playlists
that the user is subscribed to; use :func:`get_shared_playlist_contents`
instead.
The same structure as :func:`get_all_playlists`
will be returned, but
with the addition of a ``'tracks'`` key in each dict
set to a list of properly-ordered playlist entry dicts.
Here is an example playlist entry::
{
'kind': 'sj#playlistEntry',
'deleted': False,
'trackId': '2bb0ab1c-ce1a-3c0f-9217-a06da207b7a7',
'lastModifiedTimestamp': '1325285553655027',
'playlistId': '3d72c9b5-baad-4ff7-815d-cdef717e5d61',
'absolutePosition': '01729382256910287871', # denotes playlist ordering
'source': '1', # ??
'creationTimestamp': '1325285553655027',
'id': 'c9f1aff5-f93d-4b98-b13a-429cc7972fea'
}
"""
all_playlist_entries = []
all_entries = self._get_all_items(mobileclient.ListPlaylistEntries,
incremental=True, include_deleted=False,
updated_after=None)
for chunk in all_entries:
for s in chunk:
all_playlist_entries.append(s)
user_playlists = []
for chunk in self.get_all_playlists(incremental=True):
for p in chunk:
if p.get('type') == 'SHARED':
p['tracks'] = self.get_shared_playlist_contents(p['shareToken'])
else:
entries = [e for e in all_playlist_entries if e['playlistId'] == p['id']]
entries.sort(key=itemgetter('absolutePosition'))
p['tracks'] = entries
user_playlists.append(p)
return user_playlists
def get_shared_playlist_contents(self, share_token):
"""
Retrieves the contents of a public All Access playlist.
:param share_token: from ``playlist['shareToken']``, or a playlist share
url (``https://play.google.com/music/playlist/<token>``).
Note that tokens from urls will need to be url-decoded,
eg ``AM...%3D%3D`` becomes ``AM...==``.
For example, to retrieve the contents of a playlist that the user is
subscribed to::
subscribed_to = [p for p in mc.get_all_playlists() if p.get('type') == 'SHARED']
share_tok = subscribed_to[0]['shareToken']
tracks = mc.get_shared_playlist_contents(share_tok)
The user need not be subscribed to a playlist to list its tracks.
Returns a list of playlist entries
with structure the same as those
returned by :func:`get_all_user_playlist_contents`,
but without the ``'clientId'`` or ``'playlistId'`` keys.
"""
res = self._make_call(mobileclient.ListSharedPlaylistEntries,
updated_after=None, share_token=share_token)
entries = res['entries'][0]['playlistEntry']
entries.sort(key=itemgetter('absolutePosition'))
return entries
@utils.accept_singleton(basestring, 2)
@utils.enforce_id_param
@utils.enforce_ids_param(position=2)
@utils.empty_arg_shortcircuit(position=2)
def add_songs_to_playlist(self, playlist_id, song_ids):
"""Appends songs to the end of a playlist.
Returns a list of playlist entry ids that were added.
:param playlist_id: the id of the playlist to add to.
:param song_ids: a list of song ids, or a single song id.
Playlists have a maximum size of 1000 songs.
Calls may fail before that point (presumably) due to
an error on Google's end (see `#239
<https://github.com/simon-weber/Unofficial-Google-Music-API/issues/239>`__).
"""
mutate_call = mobileclient.BatchMutatePlaylistEntries
add_mutations = mutate_call.build_plentry_adds(playlist_id, song_ids)
res = self._make_call(mutate_call, add_mutations)
return [e['id'] for e in res['mutate_response']]
@utils.accept_singleton(basestring, 1)
@utils.enforce_ids_param(position=1)
@utils.empty_arg_shortcircuit(position=1)
def remove_entries_from_playlist(self, entry_ids):
"""Removes specific entries from a playlist.
Returns a list of entry ids that were removed.
:param entry_ids: a list of entry ids, or a single entry id.
"""
mutate_call = mobileclient.BatchMutatePlaylistEntries
del_mutations = mutate_call.build_plentry_deletes(entry_ids)
res = self._make_call(mutate_call, del_mutations)
return [e['id'] for e in res['mutate_response']]
def reorder_playlist_entry(self, entry, to_follow_entry=None, to_precede_entry=None):
"""Reorders a single entry in a playlist and returns its id.
Read ``reorder_playlist_entry(foo, bar, gaz)`` as
"reorder playlist entry *foo* to follow entry *bar*
and precede entry *gaz*."
:param entry: the playlist entry to move.
:param to_follow_entry: the playlist entry
that will come before *entry* in the resulting playlist,
or None if *entry* is to be the first entry in the playlist.
:param to_precede_entry: the playlist entry
that will come after *entry* in the resulting playlist
or None if *entry* is to be the last entry in the playlist.
``reorder_playlist_entry(foo)`` is invalid and will raise ValueError;
provide at least one of *to_follow_entry* or *to_precede_entry*.
Leaving *to_follow_entry* or *to_precede_entry* as None when
*entry* is not to be the first or last entry in the playlist
is undefined.
All params are dicts returned by
:func:`get_all_user_playlist_contents` or
:func:`get_shared_playlist_contents`.
"""
if to_follow_entry is None and to_precede_entry is None:
raise ValueError('either to_follow_entry or to_precede_entry must be provided')
mutate_call = mobileclient.BatchMutatePlaylistEntries
before = to_follow_entry['clientId'] if to_follow_entry else None
after = to_precede_entry['clientId'] if to_precede_entry else None
reorder_mutation = mutate_call.build_plentry_reorder(entry, before, after)
res = self._make_call(mutate_call, [reorder_mutation])
return [e['id'] for e in res['mutate_response']]
# WIP, see issue #179
# def reorder_playlist(self, reordered_playlist, orig_playlist=None):
# """TODO"""
# if not reordered_playlist['tracks']:
# #TODO what to return?
# return
# if orig_playlist is None:
# #TODO get pl from server
# pass
# if len(reordered_playlist['tracks']) != len(orig_playlist['tracks']):
# raise ValueError('the original playlist does not have the same number of'
# ' tracks as the reordered playlist')
# # find the minimum number of mutations to match the orig playlist
# orig_tracks = orig_playlist['tracks']
# orig_tracks_id_to_idx = dict([(t['id'], i) for (i, t) in enumerate(orig_tracks)])
# re_tracks = reordered_playlist['tracks']
# re_tracks_id_to_idx = dict([(t['id'], i) for (i, t) in enumerate(re_tracks)])
# translated_re_tracks = [orig_tracks_id_to_idx[t['id']] for t in re_tracks]
# lis = utils.longest_increasing_subseq(translated_re_tracks)
# idx_to_move = set(range(len(orig_tracks))) - set(lis)
# idx_pos_pairs = [(i, re_tracks_id_to_idx[orig_tracks[i]['id']])
# for i in idx_to_move]
# #TODO build out mutations
# return idx_pos_pairs
# @staticmethod
# def _create_ple_reorder_mutations(tracks, from_to_idx_pairs):
# """
# Return a list of mutations.
# :param tracks: orig_playlist['tracks']
# :param from_to_idx_pairs: [(from_index, to_index)]
# """
# for from_idx, to_idx in sorted(key=itemgetter(1)
# playlist_len = len(self.plentry_ids)
# for from_pos, to_pos in [pair for pair in
# itertools.product(range(playlist_len), repeat=2)
# if pair[0] < pair[1]]:
# pl = self.mc_get_playlist_songs(self.playlist_id)
# from_e = pl[from_pos]
# e_before_new_pos, e_after_new_pos = None, None
# if to_pos - 1 >= 0:
# e_before_new_pos = pl[to_pos]
# if to_pos + 1 < playlist_len:
# e_after_new_pos = pl[to_pos + 1]
# self.mc.reorder_playlist_entry(from_e,
# to_follow_entry=e_before_new_pos,
# to_precede_entry=e_after_new_pos)
# self._mc_assert_ple_position(from_e, to_pos)
# if e_before_new_pos:
# self._mc_assert_ple_position(e_before_new_pos, to_pos - 1)
# if e_after_new_pos:
# self._mc_assert_ple_position(e_after_new_pos, to_pos + 1)
def get_registered_devices(self):
"""
Returns a list of dictionaries representing devices associated with the account.
Performing the :class:`Musicmanager` OAuth flow will register a device
of type ``'DESKTOP_APP'``.
Installing the Android or iOS Google Music app and logging into it will
register a device of type ``'ANDROID'`` or ``'IOS'`` respectively, which is
required for streaming with the :class:`Mobileclient`.
Here is an example response::
[
{
u'kind': u'sj#devicemanagementinfo',
u'friendlyName': u'my-hostname',
u'id': u'AA:BB:CC:11:22:33',
u'lastAccessedTimeMs': u'1394138679694',
u'type': u'DESKTOP_APP'
},
{
u"kind": u"sj#devicemanagementinfo",
u'friendlyName': u'Nexus 7',
u'id': u'0x00112233aabbccdd', # remove 0x when streaming
u'lastAccessedTimeMs': u'1344808742774',
u'type': u'ANDROID'
u'smartPhone': True
},
{
u"kind": u"sj#devicemanagementinfo",
u'friendlyName': u'iPhone 6',
u'id': u'ios:01234567-0123-0123-0123-0123456789AB',
u'lastAccessedTimeMs': 1394138679694,
u'type': u'IOS'
u'smartPhone': True
}
{
u'kind': u'sj#devicemanagementinfo',
u'friendlyName': u'Google Play Music for Chrome on Windows',
u'id': u'rt2qfkh0qjhos4bxrgc0oae...', # 64 characters, alphanumeric
u'lastAccessedTimeMs': u'1425602805052',
u'type': u'DESKTOP_APP'
},
]
"""
res = self._make_call(mobileclient.GetDeviceManagementInfo)
return res['data']['items'] if 'data' in res else []
def get_promoted_songs(self):
"""Returns a list of dictionaries that each represent a track.
Only All Access tracks will be returned.
Promoted tracks are determined in an unknown fashion,
but positively-rated library tracks are common.
See :func:`get_track_info` for the format of a track dictionary.
"""
return self._get_all_items(mobileclient.ListPromotedTracks,
incremental=False, include_deleted=False,
updated_after=None)
def create_station(self, name,
track_id=None, artist_id=None, album_id=None,
genre_id=None):
"""Creates an All Access radio station and returns its id.
:param name: the name of the station to create
:param \*_id: the id of an item to seed the station from.
Exactly one of these params must be provided, or ValueError
will be raised.
"""
# TODO could expose include_tracks
seed = {}
if track_id is not None:
if track_id[0] == 'T':
seed['trackId'] = track_id
seed['seedType'] = 2
else:
seed['trackLockerId'] = track_id
seed['seedType'] = 1
if artist_id is not None:
seed['artistId'] = artist_id
seed['seedType'] = 3
if album_id is not None:
seed['albumId'] = album_id
seed['seedType'] = 4
if genre_id is not None:
seed['genreId'] = genre_id
seed['seedType'] = 5
if len(seed) > 2:
raise ValueError('exactly one {track,artist,album,genre}_id must be provided')
mutate_call = mobileclient.BatchMutateStations
add_mutation = mutate_call.build_add(name, seed, include_tracks=False, num_tracks=0)
res = self._make_call(mutate_call, [add_mutation])
return res['mutate_response'][0]['id']
@utils.accept_singleton(basestring)
@utils.enforce_ids_param
@utils.empty_arg_shortcircuit
def delete_stations(self, station_ids):
"""Deletes All Access radio stations and returns their ids.
:param station_ids: a single id, or a list of ids to delete
"""
mutate_call = mobileclient.BatchMutateStations
delete_mutations = mutate_call.build_deletes(station_ids)
res = self._make_call(mutate_call, delete_mutations)
return [s['id'] for s in res['mutate_response']]
def get_all_stations(self, incremental=False, include_deleted=False, updated_after=None):
"""Returns a list of dictionaries | |
import pickle
from collections import defaultdict, namedtuple
import numpy as np
import argparse
import os
import model.config as config
import preprocessing.util as util
from termcolor import colored
import tensorflow as tf
class VocabularyCounter(object):
"""counts the frequency of each word and each character in the corpus. With each
file that it processes it increases the counters. So one frequency vocab for all the files
that it processes."""
def __init__(self, lowercase_emb=False):
import gensim
self.model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
"""lowercase_emb=False if True then we lowercase the word for counting of
frequencies and hence for finding the pretrained embedding."""
self.word_freq = defaultdict(int)
self.char_freq = defaultdict(int) # how many times each character is encountered
self.lowercase_emb = lowercase_emb
self.not_in_word2vec_cnt = 0
self.all_words_cnt = 0
def add(self, filepath):
"""the file must be in the new dataset format."""
with open(filepath) as fin:
for line in fin:
if line.startswith("DOCSTART_") or line.startswith("DOCEND") or\
line.startswith("MMSTART_") or line.startswith("MMEND") or \
line.startswith("*NL*"):
continue
line = line.rstrip() # omit the '\n' character
word = line.lower() if self.lowercase_emb else line
self.all_words_cnt += 1
if word not in self.model:
self.not_in_word2vec_cnt += 1
else:
self.word_freq[word] += 1
for c in line:
self.char_freq[c] += 1
def print_statistics(self, word_edges=None,
char_edges=None):
"""Print some statistics about word and char frequency."""
if word_edges is None:
word_edges = [1, 2, 3, 6, 11, 21, 31, 51, 76, 101, 201, np.inf]
if char_edges is None:
char_edges = [1, 6, 11, 21, 51, 101, 201, 501, 1001, 2001, np.inf]
print("not_in_word2vec_cnt = ", self.not_in_word2vec_cnt)
print("all_words_cnt = ", self.all_words_cnt)
print("some frequency statistics. The bins are [...) ")
for d, name, edges in zip([self.word_freq, self.char_freq], ["word", "character"], [word_edges, char_edges]):
hist_values, _ = np.histogram(list(d.values()), edges)
cum_sum = np.cumsum(hist_values[::-1])
print(name, " frequency histogram, edges: ", edges)
print("absolute values: ", hist_values)
print("absolute cumulative (right to left): ", cum_sum[::-1])
print("probabilites cumulative (right to left):", (cum_sum / np.sum(hist_values))[::-1])
def serialize(self, folder=None, name="vocab_freq.pickle"):
if folder is None:
folder = config.base_folder+"data/vocabulary/"
if not os.path.exists(folder):
os.makedirs(folder)
with open(folder+name, 'wb') as handle:
pickle.dump((self.word_freq, self.char_freq), handle)
def count_datasets_vocabulary(self):
# **YD** change the directory location
new_dataset_folder = config.base_folder+"data/new_datasets/"
# new_dataset_folder = config.base_folder + "data/yd_datasets/"
"""
datasets = ['aida_train.txt', 'aida_dev.txt', 'aida_test.txt', 'ace2004.txt',
'aquaint.txt', 'clueweb.txt', 'msnbc.txt', 'wikipedia.txt']
"""
for dataset in util.get_immediate_files(new_dataset_folder):
dataset = os.path.basename(os.path.normpath(dataset))
print("Processing dataset: ", dataset)
self.add(new_dataset_folder+dataset)
self.print_statistics()
self.serialize(folder=config.base_folder+"data/vocabulary/",
name="vocab_freq.pickle")
def build_word_char_maps():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with open(config.base_folder+"data/vocabulary/vocab_freq.pickle", 'rb') as handle:
word_freq, char_freq = pickle.load(handle)
word2id = dict()
id2word = dict()
char2id = dict()
id2char = dict()
wcnt = 0 # unknown word
word2id["<wunk>"] = wcnt
id2word[wcnt] = "<wunk>"
wcnt += 1
ccnt = 0 # unknown character
char2id["<u>"] = ccnt
id2char[ccnt] = "<u>"
ccnt += 1
# for every word in the corpus (we have already filtered out the words that are not in word2vec)
for word in word_freq:
if word_freq[word] >= args.word_freq_thr:
word2id[word] = wcnt
id2word[wcnt] = word
wcnt += 1
for c in char_freq:
if char_freq[c] >= args.char_freq_thr:
char2id[c] = ccnt
id2char[ccnt] = c
ccnt += 1
assert(len(word2id) == wcnt)
assert(len(char2id) == ccnt)
print("words in vocabulary: ", wcnt)
print("characters in vocabulary: ", ccnt)
with open(output_folder+"word_char_maps.pickle", 'wb') as handle:
pickle.dump((word2id, id2word, char2id, id2char, args.word_freq_thr,
args.char_freq_thr), handle)
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
embedding_dim = len(model['queen'])
embeddings_array = np.empty((wcnt, embedding_dim)) # id2emb
embeddings_array[0] = np.zeros(embedding_dim)
for i in range(1, wcnt):
embeddings_array[i] = model[id2word[i]]
np.save(output_folder+'embeddings_array.npy', embeddings_array)
return word2id, char2id
def build_word_char_maps_restore():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
with open(output_folder+"word_char_maps.pickle", 'rb') as handle:
word2id, _, char2id, _, _, _ = pickle.load(handle)
return word2id, char2id
class Chunker(object):
def __init__(self):
self.separator = args.chunking
self.chunk_ending = {'DOCEND'}
if self.separator == "per_paragraph":
self.chunk_ending.add('*NL*')
if self.separator == "per_sentence":
self.chunk_ending.add('.')
self.chunk_ending.add('*NL*')
self.parsing_errors = 0
def new_chunk(self):
self.chunk_words = []
self.begin_gm = [] # the starting positions of gold mentions
self.end_gm = [] # the end positions of gold mentions
self.ground_truth = [] # list with the correct entity ids
def compute_result(self, docid):
chunk_id = docid
if self.separator == "per_paragraph":
chunk_id = chunk_id + "&*" + str(self.par_cnt)
if self.separator == "per_sentence":
chunk_id = chunk_id + "&*" + str(self.par_cnt) + "&*" + str(self.sent_cnt)
result = (chunk_id, self.chunk_words, self.begin_gm, self.end_gm, self.ground_truth)
# correctness checks. not necessary
no_errors_flag = True
if len(self.begin_gm) != len(self.end_gm) or \
len(self.begin_gm) != len(self.ground_truth):
no_errors_flag = False
for b, e in zip(self.begin_gm, self.end_gm):
if e <= b or b >= len(self.chunk_words) or e > len(self.chunk_words):
no_errors_flag = False
self.new_chunk()
if no_errors_flag == False:
self.parsing_errors += 1
print("chunker parse error: ", result)
return None
else:
return result
def process(self, filepath):
with open(filepath) as fin:
self.new_chunk()
docid = ""
# paragraph and sentence counter are not actually useful. only for debugging purposes.
self.par_cnt = 0 # paragraph counter (useful if we work per paragraph)
self.sent_cnt = 0 # sentence counter (useful if we work per sentence)
for line in fin:
line = line.rstrip() # omit the '\n' character
if line in self.chunk_ending:
if len(self.chunk_words) > 0: # if we have continues *NL* *NL* do not return empty chunks
temp = self.compute_result(docid)
if temp is not None:
yield temp
# do not add the chunk separator, no use
if line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
if line == '.':
self.sent_cnt += 1
elif line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
# do not add this in our words list
elif line == '.':
self.sent_cnt += 1
self.chunk_words.append(line)
elif line.startswith('MMSTART_'):
ent_id = line[8:] # assert that ent_id in wiki_name_id_map
self.ground_truth.append(ent_id)
self.begin_gm.append(len(self.chunk_words))
elif line == 'MMEND':
self.end_gm.append(len(self.chunk_words))
elif line.startswith('DOCSTART_'):
docid = line[9:]
self.par_cnt = 0
self.sent_cnt = 0
else:
self.chunk_words.append(line)
print(filepath, " chunker parsing errors: ", self.parsing_errors)
self.parsing_errors = 0
GmonlySample = namedtuple("GmonlySample",
["chunk_id", "chunk_words", 'begin_gm', "end_gm",
"ground_truth", "cand_entities", "cand_entities_scores"])
AllspansSample = namedtuple("AllspansSample",
["chunk_id", "chunk_words", "begin_spans", "end_spans",
"ground_truth", "cand_entities", "cand_entities_scores",
"begin_gm", "end_gm"])
class SamplesGenerator(object):
def __init__(self, mode="allspans"):
self.mode = mode
self._generator = Chunker()
self.fetchFilteredCoreferencedCandEntities = util.FetchFilteredCoreferencedCandEntities(args)
self.all_gm_misses = 0
self.all_gt_misses = 0
self.all_gm = 0 # all the gm encountered in all the datasets
def set_gmonly_mode(self):
self.mode = "gmonly"
def set_allspans_mode(self):
self.mode = "allspans"
def is_gmonly_mode(self):
return True if self.mode == "gmonly" else False
def is_allspans_mode(self):
return True if self.mode == "allspans" else False
def process(self, filepath):
if self.is_allspans_mode():
return self._process_allspans(filepath)
else:
return self._process_gmonly(filepath)
def _process_allspans(self, filepath):
gt_misses = 0
gm_misses = 0
gm_this_file = 0 # how many gold mentions are in this document - dataset. so we can find percentage for misses
max_mention_width_violations = 0
for chunk in self._generator.process(filepath):
self.fetchFilteredCoreferencedCandEntities.init_coref(el_mode=True)
begin_spans = []
end_spans = []
cand_entities = [] # list of lists candidate entities
cand_entities_scores = []
chunk_id, chunk_words, begin_gm, end_gm, ground_truth = chunk
gm_this_file += len(begin_gm)
for left, right in self.all_spans(chunk_words):
cand_ent, scores = self.fetchFilteredCoreferencedCandEntities.process(left, right, chunk_words)
if cand_ent is not None:
begin_spans.append(left)
end_spans.append(right)
cand_entities.append(cand_ent)
cand_entities_scores.append(scores)
if args.calculate_stats:
# check if gold mentions are inside the candidate spans and if yes check if ground truth is in cand ent.
gm_spans = list(zip(begin_gm, end_gm)) # [(3, 5), (10, 11), (15, 18)]
all_spans = list(zip(begin_spans, end_spans))
for i, gm_span in enumerate(gm_spans):
if gm_span not in all_spans:
gm_misses += 1
#print("gm not in spans\t\t\t", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'red'))
elif ground_truth[i] not in cand_entities[all_spans.index(gm_span)]:
gt_misses += 1
#print("gt not in cand ent", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'green'))
#print("gt: ", ground_truth[i], "cand_ent: ", cand_entities[all_spans.index(gm_span)])
for b, e in zip(begin_gm, end_gm):
if e - b > args.max_mention_width:
max_mention_width_violations += 1
if begin_spans: # there are candidate spans in the processed text
yield AllspansSample(chunk_id, chunk_words, begin_spans, end_spans,
ground_truth, cand_entities, cand_entities_scores,
begin_gm, end_gm)
if args.calculate_stats:
print("max_mention_width_violations :", max_mention_width_violations)
print("gt_misses", gt_misses)
print("gm_misses", gm_misses)
print("gm_this_file: ", gm_this_file)
print("recall % : ", (1 - (gm_misses+gt_misses)/gm_this_file)*100, " %")
self.all_gt_misses += gt_misses
self.all_gm_misses += gm_misses
self.all_gm += gm_this_file
@staticmethod
def all_spans(chunk_words):
# this function produces all possible text spans that do not include spans separators (fullstops).
# divide the list of words to lists of lists based on spans_separator.
# e.g. if chunk_words is for the whole document divide it to sentences (a list of
# sentences) since no span extend above a fullstop.
separation_indexes = []
spans_separator = set(config.spans_separators)
for idx, word in enumerate(chunk_words):
if word in spans_separator:
separation_indexes.append(idx)
| |
<filename>bdgraph/graph.py<gh_stars>1-10
#!/usr/bin/python3
''' bdgraph.py
Author:
<NAME>
Description:
Reads an input markup file containing definitions, dependencies, and graph
options, and writes a corresponding output graphviz dot file
Usage:
python3 bdgraph.py input_file [output_file]
'''
import bdgraph
import copy
import inspect
import sys
class Graph(object):
''' Class
The Graph class encapsulates everything about the input file, internal
representation, and handles parsing options, and writing output files '''
def __init__(self, contents, logging=False):
''' string, bool -> Graph
construct a Graph object, handles parsing the input file to create
internal representation and options list '''
# clean input, convert to list of lines, remove comments
contents = [line.strip() for line in contents.split('\n')]
contents = [line for line in contents if line and line[0] != '#']
self.contents = contents # list of string
self.nodes = [] # list of Node
self.graph_options = [] # list of Graph_Option
self.option_strings = [] # list of string
self.logging = logging # bool
self.has_cycle = False # bool
mode = 'definition' # default parsing state
for line in contents:
# state machine, determine state and then take appropriate action
if line == 'options':
mode = line
continue
elif line == 'dependencies':
mode = line
continue
# actions, we know our state so do something with the line
if mode == 'definition':
self.log('definition: ' + line)
try:
self.nodes += [bdgraph.Node(line, logging=self.logging)]
except bdgraph.BdgraphNodeNotFound:
raise bdgraph.BdgraphRuntimeError(
'error: unrecongized syntax: ' + line)
elif mode == 'options':
self.log('options: ' + line)
for option in line.split(' '):
try:
self.graph_options += [bdgraph.GraphOption(option)]
except bdgraph.BdgraphSyntaxError:
raise bdgraph.BdgraphRuntimeError(
'error: unrecongized option: ' + option)
elif mode == 'dependencies':
self.log('dependencies: ' + line)
try:
self.update_dependencies(line)
except bdgraph.BdgraphSyntaxError:
raise bdgraph.BdgraphRuntimeError(
'error: unrecongized dependency type: ' + line)
except bdgraph.BdgraphNodeNotFound:
raise bdgraph.BdgraphRuntimeError(
'error: unrecongized node reference: ' + line)
self.options = [_.label for _ in self.graph_options]
def __del__(self):
''' '''
bdgraph.node.Node.node_counter = 1
def show(self):
''' none -> IO
prints a representation of the graph to the console '''
options = ' '.join(self.option_strings)
print('graph options: ' + options)
print()
for node in self.nodes:
node.show()
def write_dot(self, file_name):
''' string -> IO
@file_name name of the output graphviz file to write
writes the graph to a file in graphviz dot format. nodes write
themselves and handle their own options '''
with open(file_name, 'w') as fd:
# header
fd.write('digraph g{\n'
' rankdir=LR;\n'
' ratio=fill;\n'
' node [style=filled];\n'
' overlap=false;\n')
if bdgraph.Option.Circular in self.option_strings:
fd.write(' layout=neato;\n')
# graph contents
for node in self.nodes:
node.write_dot(fd, self.graph_options)
# footer
fd.write('}\n')
def write_config(self, file_name):
''' string -> IO
@file_name name of the output bdgraph to write
rewrites the input file. this reformats definitions, options, and
dependencies. it's also run after the Graph.compress_representation()
function so the dependency description is minimal '''
with open(file_name, 'w') as fd:
# header
fd.write('#!/usr/local/bin/bdgraph\n')
fd.write('# 1 <- 2,3 => 1 requires 2 and 3 \n')
fd.write('# 2 -> 3,4 => 2 provides 3 and 4 \n')
fd.write('\n')
# definitions
for node in self.nodes:
node.write_definition(fd)
fd.write('\n')
# options
fd.write('options\n')
fd.write(' ' + ' '.join(self.option_strings))
fd.write('\n\n')
# dependencies
fd.write('dependencies\n')
for node in self.nodes:
node.write_dependencies(fd)
def update_dependencies(self, line):
''' string -> none | BdgraphSyntaxError, BdgraphNodeNotFound
@line input line from file with node dependency information
update the Nodes referenced in the dependency line provided.
inputs are in the form:
1,2,3 -> 4,5,6
1,2,3 <- 4,5,6
unrecongized dependency type throws a SyntaxError
unrecongized node references through AttributeError '''
left, right = 0, 1
# determine dependency type
require = line.split('<-')
allow = line.split('->')
# 1,2,3 <- 4,5,6
if len(require) > 1:
requiring_nodes = require[left].split(',')
providing_nodes = require[right].split(',')
# 1,2,3 -> 4,5,6
elif len(allow) > 1:
providing_nodes = allow[left].split(',')
requiring_nodes = allow[right].split(',')
# unrecongized dependency type
else:
raise bdgraph.BdgraphSyntaxError
# clean up labels
providing_nodes = [_.strip() for _ in providing_nodes]
requiring_nodes = [_.strip() for _ in requiring_nodes]
# for each node
for requiring_label in requiring_nodes:
requiring_node = self.find_node(requiring_label)
for providing_label in providing_nodes:
providing_node = self.find_node(providing_label)
# update requirements and provisions
requiring_node.add_require(providing_node)
providing_node.add_provide(requiring_node)
def find_node(self, label):
''' string -> Node | BdgraphNodeNotFound
@label Node.label of the node to find
search through the graph's nodes for the node with the same label as
the one provided. searches by label, not description '''
result = [node for node in self.nodes if node.label == label]
if not result:
self.log('failed to find: ' + label)
raise bdgraph.BdgraphNodeNotFound
else:
self.log('found: ' + label)
return result.pop()
def find_most(self, provide=False, require=False):
''' ('provide' | 'require') -> Node
search through the nodes for the one that requires the most nodes or
provides to the most nodes, depending on mode. used by
Graph.compress_representation() '''
highest = self.nodes[0]
for node in self.nodes:
if provide and len(node.provides) > len(highest.provides):
highest = node
elif require and len(node.requires) > len(highest.requires):
highest = node
return highest
def compress_representation(self):
''' none -> none
analyzes relationships between nodes to find an equivalent graph of
minimum size (# edges)
we use a copy of the graph to search for the current most
representative node. this is the node with the highest number of
provides or requires.
on the original graph, we remove all the inverses of the relationship
we just found. on the copy, we remove the relationship we found.
1 <- 2,3 1 <- 2,3 # found relationships
2 -> 1 2 -> 1 # relationship inverses
3 -> 1 3 -> 1 # relationship inverses
4 -> 5 4 -> 5
for example, in the graph above, we find 1.requires as the most
representative node. next, we make a copy of the graph so we can keep
track of relationships we remove. in the copied graph, we remove both
found node's relationships and their inverses. in the original graph,
we only remove the inverses
1 <- 2,3
4 -> 5 4 -> 5
this process continues until the copied graph is empty of relationships
'''
# copy the graph so we can remove the most representative nodes as
# they're found. this way they won't be found on the next iteration
graph_copy = copy.deepcopy(self)
while True:
most_provide = graph_copy.find_most(provide=True)
most_require = graph_copy.find_most(require=True)
num_provides = len(most_provide.provides)
num_requires = len(most_require.requires)
# there are no more relationships in the copied graph, stop
if num_provides == num_requires == 0:
break
# the most representative relationship is a provision
elif num_provides > num_requires:
copy_node = most_provide
real_node = self.find_node(copy_node.label)
# inverse of provide is require
for inverse in copy_node.provides:
try:
inverse.requires.remove(copy_node)
copy_node.provides.remove(inverse)
except ValueError:
pass
# remove inverses from real graph
for inverse in real_node.provides:
try:
inverse.requires.remove(real_node)
except ValueError:
pass
# the most representative relationship is a requirement
else:
copy_node = most_require
real_node = self.find_node(copy_node.label)
# remove inverses and node from copied graph
for inverse in copy_node.requires:
try:
inverse.provides.remove(copy_node)
copy_node.requires.remove(inverse)
except ValueError:
pass
# remove inverses from real graph
for inverse in real_node.requires:
try:
inverse.provides.remove(real_node)
except ValueError:
pass
def handle_options(self):
''' none -> none
handles non-user specified options, such as color_next and cleanup. '''
if bdgraph.Option.Remove in self.option_strings:
to_remove = []
# find all nodes to be deleted
for node in self.nodes:
try:
if node.node_option.type == bdgraph.Option.Remove:
to_remove.append(node)
except AttributeError:
pass
# remove all marked nodes from the tree and other nodes
for node_to_remove in to_remove:
self.nodes.remove(node_to_remove)
for node in self.nodes:
if node_to_remove in node.requires:
node.requires.remove(node_to_remove)
if node_to_remove in node.provides:
node.provides.remove(node_to_remove)
if bdgraph.Option.Next in self.option_strings:
for node in self.nodes:
# all requiring nodes have the complete flag? this is also true
# when the current node doesn't have any requiring nodes
requirements_satisfied = True
for req_node in node.requires:
if not req_node.node_option:
requirements_satisfied = False
elif req_node.node_option.type != bdgraph.Option.Complete:
requirements_satisfied = False
if (not node.node_option) and requirements_satisfied:
node.node_option = bdgraph.NodeOption('_')
def transitive_reduction(self):
''' none -> none
for all non-immediate children of each node, if that child has a
relationship with the current node, remove it. this removes redundant
transitive relationships
1 -> 2,3 becomes 1 -> 2,3
1 -> 3
cycles are currently not supported and are detected by
sys.getrecursionlimit() exceeding | |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple ElasticSearch-based accessor for tests and development."""
from __future__ import absolute_import
from __future__ import print_function
import collections
import datetime
import json
import logging
import os
import six
import elasticsearch
import elasticsearch_dsl
import time
from biggraphite import accessor as bg_accessor
from biggraphite import glob_utils as bg_glob
from biggraphite.drivers import _utils
from biggraphite.drivers import ttls
log = logging.getLogger(__name__)
# TODO:
# * Support metadata
# * Metrics
# * Directories
# * Add unit tests (with real ES)
# * Support data
# * Support dated indices
# * Handle timeouts, error
# * Implement repair
# * Implement clean
INDEX_DOC_TYPE = "_doc"
# TODO: Make that configurable (in a file), this will be particularly important
# for the number of shards and replicas.
INDEX_SETTINGS = {
"settings": {
"index": {
"number_of_shards": 3,
"number_of_replicas": 1,
"refresh_interval": "60s",
"translog": {
"sync_interval": "120s",
"durability": "async",
},
"search": {
"slowlog": {
"level": "info",
"threshold": {
"query": {
"debug": "2s",
"info": "5s",
},
"fetch": {
"debug": "200ms",
"info": "500ms",
},
}
}
}
},
},
"mappings": {
INDEX_DOC_TYPE: {
"properties": {
"depth": {"type": "long"},
"created_on": {"type": "date"},
"read_on": {"type": "date"},
"updated_on": {"type": "date"},
"name": {
"type": "keyword",
"ignore_above": 1024,
},
"uuid": {
"type": "keyword",
},
"config": {
"type": "object",
# TODO: describe existing fields with more details.
},
},
# Additional properties (such as path components) or labels
# TODO: have a specific dynamic mapping for labels using "match"
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": True,
}
}
}
]
},
},
}
DEFAULT_INDEX = "biggraphite_metrics"
DEFAULT_INDEX_SUFFIX = "_%Y-%m-%d"
DEFAULT_HOSTS = ["127.0.0.1"]
DEFAULT_PORT = 9200
DEFAULT_TIMEOUT = 10
DEFAULT_USERNAME = os.getenv("BG_ELASTICSEARCH_USERNAME")
DEFAULT_PASSWORD = os.getenv("BG_ELASTICSEARCH_PASSWORD")
MAX_QUERY_SIZE = 10000
OPTIONS = {
"username": str,
"password": str,
"index": str,
"index_suffix": str,
"hosts": _utils.list_from_str,
"port": int,
"timeout": float,
}
def add_argparse_arguments(parser):
"""Add ElasticSearch arguments to an argparse parser."""
parser.add_argument(
"--elasticsearch_index",
metavar="NAME",
help="elasticsearch index.",
default=DEFAULT_INDEX,
)
parser.add_argument(
"--elasticsearch_index_suffix",
metavar="NAME",
help="elasticsearch index suffix. Supports strftime format.",
default=DEFAULT_INDEX_SUFFIX,
)
parser.add_argument(
"--elasticsearch_username", help="elasticsearch username.", default=None
)
parser.add_argument(
"--elasticsearch_password", help="elasticsearch password.", default=None
)
parser.add_argument(
"--elasticsearch_hosts",
metavar="HOST[,HOST,...]",
help="Hosts used for discovery.",
default=DEFAULT_HOSTS,
)
parser.add_argument(
"--elasticsearch_port",
metavar="PORT",
type=int,
help="The native port to connect to.",
default=DEFAULT_PORT,
)
parser.add_argument(
"--elasticsearch_timeout",
metavar="TIMEOUT",
type=int,
help="elasticsearch query timeout in seconds.",
default=DEFAULT_TIMEOUT,
)
def _components_from_name(metric_name):
res = metric_name.split(".")
return list(filter(None, res))
def document_from_metric(metric):
"""Creates an ElasticSearch document from a Metric."""
config = metric.metadata.as_string_dict()
components = _components_from_name(metric.name)
name = bg_accessor.sanitize_metric_name(metric.name)
data = {
"depth": len(components) - 1,
"name": name,
}
for i, component in enumerate(components):
data["p%d" % i] = component
data.update({
"uuid": metric.id,
"created_on": datetime.datetime.now(),
"updated_on": datetime.datetime.now(),
"read_on": None,
"config": config,
})
return data
class Error(bg_accessor.Error):
"""Base class for all exceptions from this module."""
class InvalidArgumentError(Error, bg_accessor.InvalidArgumentError):
"""Callee did not follow requirements on the arguments."""
def _parse_wildcard_component(component):
"""Given a complex component, this builds a wildcard constraint."""
value = ""
for subcomponent in component:
if isinstance(subcomponent, bg_glob.AnySequence):
value += "*"
elif isinstance(subcomponent, six.string_types):
value += subcomponent
elif isinstance(subcomponent, bg_glob.AnyChar):
value += '?'
else:
raise Error("Unhandled type '%s'" % subcomponent)
return value
def _parse_regexp_component(component):
"""Given a complex component, this builds a regexp constraint."""
if isinstance(component, bg_glob.Globstar):
return ".*"
regex = ""
for subcomponent in component:
if isinstance(subcomponent, bg_glob.Globstar):
regex += ".*"
elif isinstance(subcomponent, bg_glob.AnySequence):
regex += "[^.]*"
elif isinstance(subcomponent, six.string_types):
regex += subcomponent
elif isinstance(subcomponent, bg_glob.CharNotIn):
regex += '[^' + ''.join(subcomponent.values) + ']'
elif isinstance(subcomponent, bg_glob.CharIn):
regex += '[' + ''.join(subcomponent.values) + ']'
elif isinstance(subcomponent, bg_glob.SequenceIn):
if subcomponent.negated:
regex += '[^.]*'
else:
regex += '(' + '|'.join(subcomponent.values) + ')'
elif isinstance(subcomponent, bg_glob.AnyChar):
regex += '[^.]'
else:
raise Error("Unhandled type '%s'" % subcomponent)
return regex
def parse_complex_component(component):
"""Given a complex component, this builds a constraint."""
if all([
any([
isinstance(sub_c, bg_glob.AnySequence),
isinstance(sub_c, bg_glob.AnyChar),
isinstance(sub_c, six.string_types),
]) for sub_c in component
]):
return 'wildcard', _parse_wildcard_component(component)
return 'regexp', _parse_regexp_component(component)
def _contains_regexp_wildcard(values):
return any("*" in value for value in values)
def parse_simple_component(component):
"""Given a component with a simple type, this builds a constraint."""
value = component[0]
if isinstance(value, bg_glob.AnySequence):
return None, None # No constrain
elif isinstance(value, six.string_types):
return 'term', value
elif isinstance(value, bg_glob.CharNotIn):
return 'regexp', '[^' + ''.join(value.values) + ']'
elif isinstance(value, bg_glob.CharIn):
return 'regexp', '[' + ''.join(value.values) + ']'
elif isinstance(value, bg_glob.SequenceIn):
if _contains_regexp_wildcard(value.values):
return 'regexp', '(' + '|'.join(value.values) + ')'
else:
return 'terms', value.values
elif isinstance(value, bg_glob.AnyChar):
return 'wildcard', '?'
else:
raise Error("Unhandled type '%s'" % value)
def _get_depth_from_components(components):
return len(components) - 1
def _raise_unsupported():
raise NotImplementedError("Elasticsearch accessor does not support data operations")
class _ElasticSearchAccessor(bg_accessor.Accessor):
"""A ElasticSearch acessor that doubles as a ElasticSearch MetadataCache."""
Row = collections.namedtuple(
"Row", ["time_start_ms", "offset", "shard", "value", "count"]
)
Row0 = collections.namedtuple("Row", ["time_start_ms", "offset", "value"])
def __init__(
self,
hosts=DEFAULT_HOSTS,
port=DEFAULT_PORT,
index=DEFAULT_INDEX,
index_suffix=DEFAULT_INDEX_SUFFIX,
username=DEFAULT_USERNAME,
password=<PASSWORD>,
timeout=DEFAULT_TIMEOUT,
updated_on_ttl_sec=ttls.DEFAULT_UPDATED_ON_TTL_SEC,
read_on_ttl_sec=ttls.DEFAULT_READ_ON_TTL_SEC,
):
"""Create a new ElasticSearchAccessor."""
super(_ElasticSearchAccessor, self).__init__("ElasticSearch")
self._hosts = list(hosts)
self._port = port
self._index_prefix = index
self._index_suffix = index_suffix
self._username = username
self._password = password
self._timeout = timeout
self._known_indices = {}
self.__glob_parser = bg_glob.GraphiteGlobParser()
self.__updated_on_ttl_sec = updated_on_ttl_sec
self.__read_on_ttl_sec = read_on_ttl_sec
self.client = None
log.debug(
"Created Elasticsearch accessor with index prefix: '%s' and index suffix: '%s'" %
(self._index_prefix, self._index_suffix)
)
def connect(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).connect(*args, **kwargs)
self._connect()
self.is_connected = True
def _connect(self):
"""Connect to elasticsearch."""
if self.is_connected:
return
if self._username:
http_auth = (self._username, self._password or "")
else:
http_auth = None
kwargs = {
'sniff_on_start': True,
'sniff_on_connection_fail': True,
'retry_on_timeout': True,
'max_retries': 3,
'timeout': self._timeout,
}
if self._port:
kwargs['port'] = self._port
if http_auth:
kwargs['http_auth'] = http_auth
es = elasticsearch.Elasticsearch(
self._hosts,
**kwargs
)
log.info("Connected: %s" % es.info())
self.client = es
def shutdown(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).shutdown(*args, **kwargs)
self._shutdown()
self.is_connected = False
def _shutdown(self):
"""Shutdown Elasticsearch client."""
if self.client:
self.client.transport.close()
self.client = None
def background(self):
"""Perform periodic background operations."""
pass
def flush(self):
"""Flush any internal buffers."""
if self.client:
self.client.indices.flush(
index="%s*" % self._index_prefix,
allow_no_indices=True,
ignore_unavailable=True,
wait_if_ongoing=True,
)
self.client.indices.refresh(
index="%s*" % self._index_prefix,
allow_no_indices=True,
ignore_unavailable=True,
)
def clear(self):
"""Clear all internal data."""
self._known_indices = {}
def get_index(self, metric):
"""Get the index where a metric should be stored."""
# Here the index could be sharded further by looking at the
# metric metadata, for example, per owner.
index_name = self._index_prefix + datetime.datetime.now().strftime(self._index_suffix)
if index_name not in self._known_indices:
if not self.client.indices.exists(index=index_name):
self.client.indices.create(
index=index_name,
body=INDEX_SETTINGS,
ignore=409
)
self.client.indices.flush()
self._known_indices[index_name] = True
return index_name
def insert_points_async(self, metric, datapoints, on_done=None):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).insert_points_async(
metric, datapoints, on_done
)
_raise_unsupported()
def insert_downsampled_points_async(self, metric, datapoints, on_done=None):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).insert_downsampled_points_async(
metric, datapoints, on_done
)
_raise_unsupported()
def drop_all_metrics(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).drop_all_metrics(*args, **kwargs)
# Drop indices.
self.client.indices.delete("%s*" % self._index_prefix)
def create_metric(self, metric):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).create_metric(metric)
index_name = self.get_index(metric)
self.client.create(
index=index_name,
doc_type=INDEX_DOC_TYPE,
id=metric.id,
body=document_from_metric(metric),
ignore=409,
)
def update_metric(self, name, updated_metadata):
"""See bg_accessor.Accessor."""
super(_ElasticSearchAccessor, self).update_metric(name, updated_metadata)
name = bg_accessor.sanitize_metric_name(name)
metric = self.get_metric(name)
if metric is None:
raise InvalidArgumentError("Unknown metric '%s'" % name)
updated_metric = self.make_metric(
name,
updated_metadata,
created_on=metric.created_on,
updated_on=datetime.datetime.now(),
read_on=metric.read_on
)
self.create_metric(updated_metric)
def delete_metric(self, name):
name = bg_accessor.sanitize_metric_name(name)
query = self._create_search_query() \
.filter('term', name=name)
log.debug(json.dumps(query.to_dict()))
query.delete()
def delete_directory(self, name):
components = _components_from_name(name)
depth = _get_depth_from_components(components)
query = self._create_search_query()
for index, component in enumerate(components):
query = query.filter('term', **{"p%d" % index: component})
query = query.filter('range', depth={'gte': depth})
log.debug(json.dumps(query.to_dict()))
query.delete()
# TODO (t.chataigner) Add unittest.
def _search_metrics_from_components(self, glob, components):
search = self._create_search_query().source('name')
# Handle glob with globstar(s).
globstars = components.count(bg_glob.Globstar())
if globstars:
name_regexp = "\\.".join([_parse_regexp_component(c) for c in components])
return True, search.filter('regexp', **{"name": name_regexp})
# TODO (t.chataigner) Handle fully defined prefix (like a.b.c.*.*.*)
# with a wildcard on name.
# Handle fully defined glob.
if self.__glob_parser.is_fully_defined(components):
return False, search.filter(
'term', **{"name": bg_accessor.sanitize_metric_name(glob)})
# Handle all other use cases.
for i, c | |
<filename>compiler/front_end/symbol_resolver.py<gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Symbol resolver for Emboss IR.
The resolve_symbols function should be used to generate canonical resolutions
for all symbol references in an Emboss IR.
"""
import collections
from compiler.util import error
from compiler.util import ir_pb2
from compiler.util import ir_util
from compiler.util import traverse_ir
# TODO(bolms): Symbol resolution raises an exception at the first error, but
# this is one place where it can make sense to report multiple errors.
FileLocation = collections.namedtuple("FileLocation", ["file", "location"])
def ambiguous_name_error(file_name, location, name, candidate_locations):
"""A name cannot be resolved because there are two or more candidates."""
result = [error.error(file_name, location, "Ambiguous name '{}'".format(name))
]
for location in sorted(candidate_locations):
result.append(error.note(location.file, location.location,
"Possible resolution"))
return result
def duplicate_name_error(file_name, location, name, original_location):
"""A name is defined two or more times."""
return [error.error(file_name, location, "Duplicate name '{}'".format(name)),
error.note(original_location.file, original_location.location,
"Original definition")]
def missing_name_error(file_name, location, name):
return [error.error(file_name, location, "No candidate for '{}'".format(name))
]
def array_subfield_error(file_name, location, name):
return [error.error(file_name, location,
"Cannot access member of array '{}'".format(name))]
def noncomposite_subfield_error(file_name, location, name):
return [error.error(file_name, location,
"Cannot access member of noncomposite field '{}'".format(
name))]
def _nested_name(canonical_name, name):
"""Creates a new CanonicalName with name appended to the object_path."""
return ir_pb2.CanonicalName(
module_file=canonical_name.module_file,
object_path=list(canonical_name.object_path) + [name])
class _Scope(dict):
"""A _Scope holds data for a symbol.
A _Scope is a dict with some additional attributes. Lexically nested names
are kept in the dict, and bookkeeping is kept in the additional attributes.
For example, each module should have a child _Scope for each type contained in
the module. `struct` and `bits` types should have nested _Scopes for each
field; `enum` types should have nested scopes for each enumerated name.
Attributes:
canonical_name: The absolute name of this symbol; e.g. ("file.emb",
"TypeName", "SubTypeName", "field_name")
source_location: The ir_pb2.SourceLocation where this symbol is defined.
visibility: LOCAL, PRIVATE, or SEARCHABLE; see below.
alias: If set, this name is merely a pointer to another name.
"""
__slots__ = ("canonical_name", "source_location", "visibility", "alias")
# A LOCAL name is visible outside of its enclosing scope, but should not be
# found when searching for a name. That is, this name should be matched in
# the tail of a qualified reference (the 'bar' in 'foo.bar'), but not when
# searching for names (the 'foo' in 'foo.bar' should not match outside of
# 'foo's scope). This applies to public field names.
LOCAL = object()
# A PRIVATE name is similar to LOCAL except that it is never visible outside
# its enclosing scope. This applies to abbreviations of field names: if 'a'
# is an abbreviation for field 'apple', then 'foo.a' is not a valid reference;
# instead it should be 'foo.apple'.
PRIVATE = object()
# A SEARCHABLE name is visible as long as it is in a scope in the search list.
# This applies to type names ('Foo'), which may be found from many scopes.
SEARCHABLE = object()
def __init__(self, canonical_name, source_location, visibility, alias=None):
super(_Scope, self).__init__()
self.canonical_name = canonical_name
self.source_location = source_location
self.visibility = visibility
self.alias = alias
def _add_name_to_scope(name_ir, scope, canonical_name, visibility, errors):
"""Adds the given name_ir to the given scope."""
name = name_ir.text
new_scope = _Scope(canonical_name, name_ir.source_location, visibility)
if name in scope:
errors.append(duplicate_name_error(
scope.canonical_name.module_file, name_ir.source_location, name,
FileLocation(scope[name].canonical_name.module_file,
scope[name].source_location)))
else:
scope[name] = new_scope
return new_scope
def _add_name_to_scope_and_normalize(name_ir, scope, visibility, errors):
"""Adds the given name_ir to scope and sets its canonical_name."""
name = name_ir.name.text
canonical_name = _nested_name(scope.canonical_name, name)
name_ir.canonical_name.CopyFrom(canonical_name)
return _add_name_to_scope(name_ir.name, scope, canonical_name, visibility,
errors)
def _add_struct_field_to_scope(field, scope, errors):
"""Adds the name of the given field to the scope."""
new_scope = _add_name_to_scope_and_normalize(field.name, scope, _Scope.LOCAL,
errors)
if field.HasField("abbreviation"):
_add_name_to_scope(field.abbreviation, scope, new_scope.canonical_name,
_Scope.PRIVATE, errors)
value_builtin_name = ir_pb2.Word(
text="this",
source_location=ir_pb2.Location(is_synthetic=True),
)
# In "inside field" scope, the name `this` maps back to the field itself.
# This is important for attributes like `[requires]`.
_add_name_to_scope(value_builtin_name, new_scope,
field.name.canonical_name, _Scope.PRIVATE, errors)
def _add_parameter_name_to_scope(parameter, scope, errors):
"""Adds the name of the given parameter to the scope."""
_add_name_to_scope_and_normalize(parameter.name, scope, _Scope.LOCAL, errors)
def _add_enum_value_to_scope(value, scope, errors):
"""Adds the name of the enum value to scope."""
_add_name_to_scope_and_normalize(value.name, scope, _Scope.LOCAL, errors)
def _add_type_name_to_scope(type_definition, scope, errors):
"""Adds the name of type_definition to the given scope."""
new_scope = _add_name_to_scope_and_normalize(type_definition.name, scope,
_Scope.SEARCHABLE, errors)
return {"scope": new_scope}
def _set_scope_for_type_definition(type_definition, scope):
"""Sets the current scope for an ir_pb2.TypeDefinition."""
return {"scope": scope[type_definition.name.name.text]}
def _add_module_to_scope(module, scope):
"""Adds the name of the module to the given scope."""
module_symbol_table = _Scope(
ir_pb2.CanonicalName(module_file=module.source_file_name,
object_path=[]),
None,
_Scope.SEARCHABLE)
scope[module.source_file_name] = module_symbol_table
return {"scope": scope[module.source_file_name]}
def _set_scope_for_module(module, scope):
"""Adds the name of the module to the given scope."""
return {"scope": scope[module.source_file_name]}
def _add_import_to_scope(foreign_import, table, module, errors):
if not foreign_import.local_name.text:
# This is the prelude import; ignore it.
return
_add_alias_to_scope(foreign_import.local_name, table, module.canonical_name,
[foreign_import.file_name.text], _Scope.SEARCHABLE,
errors)
def _construct_symbol_tables(ir):
"""Constructs per-module symbol tables for each module in ir."""
symbol_tables = {}
errors = []
traverse_ir.fast_traverse_ir_top_down(
ir, [ir_pb2.Module], _add_module_to_scope,
parameters={"errors": errors, "scope": symbol_tables})
traverse_ir.fast_traverse_ir_top_down(
ir, [ir_pb2.TypeDefinition], _add_type_name_to_scope,
incidental_actions={ir_pb2.Module: _set_scope_for_module},
parameters={"errors": errors, "scope": symbol_tables})
if errors:
# Ideally, we would find duplicate field names elsewhere in the module, even
# if there are duplicate type names, but field/enum names in the colliding
# types also end up colliding, leading to spurious errors. E.g., if you
# have two `struct Foo`s, then the field check will also discover a
# collision for `$size_in_bytes`, since there are two `Foo.$size_in_bytes`.
return symbol_tables, errors
traverse_ir.fast_traverse_ir_top_down(
ir, [ir_pb2.EnumValue], _add_enum_value_to_scope,
incidental_actions={
ir_pb2.Module: _set_scope_for_module,
ir_pb2.TypeDefinition: _set_scope_for_type_definition,
},
parameters={"errors": errors, "scope": symbol_tables})
traverse_ir.fast_traverse_ir_top_down(
ir, [ir_pb2.Field], _add_struct_field_to_scope,
incidental_actions={
ir_pb2.Module: _set_scope_for_module,
ir_pb2.TypeDefinition: _set_scope_for_type_definition,
},
parameters={"errors": errors, "scope": symbol_tables})
traverse_ir.fast_traverse_ir_top_down(
ir, [ir_pb2.RuntimeParameter], _add_parameter_name_to_scope,
incidental_actions={
ir_pb2.Module: _set_scope_for_module,
ir_pb2.TypeDefinition: _set_scope_for_type_definition,
},
parameters={"errors": errors, "scope": symbol_tables})
return symbol_tables, errors
def _add_alias_to_scope(name_ir, table, scope, alias, visibility, errors):
"""Adds the given name to the scope as an alias."""
name = name_ir.text
new_scope = _Scope(_nested_name(scope, name), name_ir.source_location,
visibility, alias)
scoped_table = table[scope.module_file]
for path_element in scope.object_path:
scoped_table = scoped_table[path_element]
if name in scoped_table:
errors.append(duplicate_name_error(
scoped_table.canonical_name.module_file, name_ir.source_location, name,
FileLocation(scoped_table[name].canonical_name.module_file,
scoped_table[name].source_location)))
else:
scoped_table[name] = new_scope
return new_scope
def _resolve_head_of_field_reference(field_reference, table, current_scope,
visible_scopes, source_file_name, errors):
return _resolve_reference(
field_reference.path[0], table, current_scope,
visible_scopes, source_file_name, errors)
def _resolve_reference(reference, table, current_scope, visible_scopes,
source_file_name, errors):
"""Sets the canonical name of the given reference."""
if reference.HasField("canonical_name"):
# This reference has already been resolved by the _resolve_field_reference
# pass.
return
target = _find_target_of_reference(reference, table, current_scope,
visible_scopes, source_file_name, errors)
if target is not None:
assert not target.alias
reference.canonical_name.CopyFrom(target.canonical_name)
def _find_target_of_reference(reference, table, current_scope, visible_scopes,
source_file_name, errors):
"""Returns the resolved name of the given reference."""
found_in_table = None
name = reference.source_name[0].text
for scope in visible_scopes:
scoped_table = table[scope.module_file]
for path_element in scope.object_path:
scoped_table = scoped_table[path_element]
if (name in scoped_table and
(scope == current_scope or
scoped_table[name].visibility == _Scope.SEARCHABLE)):
# Prelude is "", so explicitly check for None.
if found_in_table is not None:
# TODO(bolms): Currently, this catches the case where a module tries to
# use a name that is defined (at the same scope) in two different
# modules. It may make sense to raise duplicate_name_error whenever two
# modules define the same name (whether it is used or not), and reserve
# ambiguous_name_error for cases where a name is found in multiple
# scopes.
errors.append(ambiguous_name_error(
source_file_name, reference.source_location, name, [FileLocation(
found_in_table[name].canonical_name.module_file,
found_in_table[name].source_location), FileLocation(
scoped_table[name].canonical_name.module_file, scoped_table[
name].source_location)]))
continue
found_in_table = scoped_table
if reference.is_local_name:
# This is a little hacky. When "is_local_name" is True, the name refers
# to a type that was defined inline. In many cases, the type should be
# found at the same scope as the field; e.g.:
#
# struct Foo:
# 0 [+1] enum bar:
# BAZ = 1
#
# In this case, `Foo.bar` has type `Foo.Bar`. Unfortunately, things
# break down a little bit when there is an inline type in an anonymous
# `bits`:
#
# struct Foo:
| |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.distributions import Normal
from .base_network import BaseNetwork
class BaseMDN(BaseNetwork):
r"""Base class for Mixture Density Networks (we use Gaussian mixture).
This class defines the mixture density networks using isotropic Gaussian densities.
The network receives input tensor and outputs parameters for a mixture of Gaussian distributions.
i.e. mixing coefficients, means and variances.
Specifically, their dimensions are following, given N is batch size, K is the number of densities
and D is the data dimension
- mixing coefficients: ``[N, K, D]``
- mean: ``[N, K, D]``
- variance: ``[N, K, D]``
The subclass should implement at least the following:
- :meth:`make_feature_layers`
- :meth:`make_mdn_heads`
- :meth:`init_params`
- :meth:`feature_forward`
Example::
class MDN(BaseMDN):
def make_feature_layers(self, config):
out = make_fc(input_dim=1, hidden_sizes=[15, 15])
last_dim = 15
return out, last_dim
def make_mdn_heads(self, config, last_dim):
out = {}
num_density = 20
data_dim = 1
out['unnormalized_pi_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['mu_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['logvar_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['num_density'] = num_density
out['data_dim'] = data_dim
return out
def init_params(self, config):
for layer in self.feature_layers:
ortho_init(layer, nonlinearity='tanh', weight_scale=1.0, constant_bias=0.0)
ortho_init(self.unnormalized_pi_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.mu_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.logvar_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
def feature_forward(self, x):
for layer in self.feature_layers:
x = torch.tanh(layer(x))
return x
"""
def make_params(self, config):
# Create feature layers
self.feature_layers, self.last_dim = self.make_feature_layers(config)
assert isinstance(self.feature_layers, nn.ModuleList)
assert isinstance(self.last_dim, int)
# Create MDN heads: unnormalized pi, mean and log-variance
# also returns number of densities and data dimension
out_heads = self.make_mdn_heads(self.config, last_dim=self.last_dim)
assert isinstance(out_heads, dict) and len(out_heads) == 5
# unpack
self.unnormalized_pi_head = out_heads['unnormalized_pi_head']
self.mu_head = out_heads['mu_head']
self.logvar_head = out_heads['logvar_head']
self.num_density = out_heads['num_density']
self.data_dim = out_heads['data_dim']
# sanity check
assert isinstance(self.unnormalized_pi_head, nn.Module)
assert isinstance(self.mu_head, nn.Module)
assert isinstance(self.logvar_head, nn.Module)
assert isinstance(self.num_density, int)
assert isinstance(self.data_dim, int)
def make_feature_layers(self, config):
r"""Create and return the parameters for all the feature layers.
.. note::
For being able to track the parameters automatically, a ``nn.ModuleList`` should
be returned. Also the dimension of last feature should also be returned.
Args:
config (dict): a dictionary of configurations.
Returns
-------
out : ModuleList
a ModuleList of feature layers.
last_dim : int
the dimension of last feature
"""
raise NotImplementedError
def make_mdn_heads(self, config, last_dim):
r"""Create and returns all parameters/layers for MDN heads.
It includes the following:
* ``unnormalized_pi_head pi``: a Module for mixing coefficient with output shape :math:`K\times D`
* ``mu_head``: a Module for mean of Gaussian with output shape :math:`K\times D`
* ``logvar_head``: a Module for log-variance of Gaussian with output shape :math:`K\times D`
* ``num_density``: an integer :math:`K` number of densities
* ``data_dim``: an integer :math:`D` dimension of data
.. note::
A dictionary of all created modules should be returned with the keys
as their names.
Args:
config (dict): a dictionary of configurations.
last_dim (int): last feature dimension helps to define layers for MDN heads.
Returns
-------
out : dict
a dictionary of required output described above.
"""
raise NotImplementedError
def feature_forward(self, x):
r"""Defines forward pass of feature layers, before MDN heads.
.. note::
It should use the class member ``self.feature_layers`` (a ModuleList).
Args:
x (Tensor): input tensor
Returns
-------
out : Tensor
feature tensor before MDN heads
"""
raise NotImplementedError
def forward(self, x):
# Forward pass through feature layers to produce features before the MDN heads
x = self.feature_forward(x)
# Forward pass through the head of unnormalized pi (mixing coefficient)
unnormalized_pi = self.unnormalized_pi_head(x)
# Convert to tensor with shape [N, K, D]
unnormalized_pi = unnormalized_pi.view(-1, self.num_density, self.data_dim)
# Enforce each of coefficients are non-negative and summed up to 1
# Note that it's LogSoftmax to compute numerically stable loss via log-sum-exp trick
log_pi = F.log_softmax(unnormalized_pi, dim=1)
# Forward pass through mean head
mu = self.mu_head(x)
# Convert to tensor with shape [N, K, D]
mu = mu.view(-1, self.num_density, self.data_dim)
# Forward pass through log-variance head
logvar = self.logvar_head(x)
# Convert to tensor with shape [N, K, D]
logvar = logvar.view(-1, self.num_density, self.data_dim)
# Retrieve std from logvar
# For numerical stability: exp(0.5*logvar)
# TODO: support softplus option, see `GaussianPolicy` class
std = torch.exp(0.5*logvar)
return log_pi, mu, std
def calculate_batched_logprob(self, mu, std, x, _fast_code=True):
r"""Calculate the log-probabilities for each data sampled by each density component.
Here the density is Gaussian.
.. warning::
Currently there are fast and slow implementations temporarily with an option
to select one to use. Once it is entirely sure the fast implementation is correct
then this feature will be removed. A benchmark indicates that the fast implementation
is roughly :math:`14x` faster !
Args:
mu (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
x (Tensor): input tensor, shape [N, D]
_fast_code (bool, optional): if ``True``, then using fast implementation.
Returns
-------
log_probs : Tensor
the calculated log-probabilities for each data and each density, shape [N, K, D]
"""
# Set up lower bound of std, since zero std can lead to NaN log-probability
# Used for: torch.clamp(std_i, min=min_std...)
# min_std = 1e-12
def _fast(mu, std, x):
# Create Gaussian distribution
dist = Normal(loc=mu, scale=std)
# Calculate the log-probabilities
log_probs = dist.log_prob(x.unsqueeze(1).expand(-1, self.num_density, -1))
return log_probs
def _slow(mu, std, x):
log_probs = []
# Iterate over all density components
for i in range(self.num_density):
# Retrieve means and stds
mu_i = mu[:, i, :]
std_i = std[:, i, :]
# Thresholding std, if std is 0, it leads to NaN loss.
# std_i = torch.clamp(std_i, min=min_std, max=std_i.max().item())
# Create Gaussian distribution
dist = Normal(loc=mu_i, scale=std_i)
# Calculate the log-probability
logp = dist.log_prob(x)
# Record the log probability for current density
log_probs.append(logp)
# Stack log-probabilities with shape [N, K, D]
log_probs = torch.stack(log_probs, dim=1)
return log_probs
# select code
if _fast_code:
return _fast(mu=mu, std=std, x=x)
else:
return _slow(mu=mu, std=std, x=x)
def MDN_loss(self, log_pi, mu, std, target):
r"""Calculate the MDN loss function.
The loss function (negative log-likelihood) is defined by:
.. math::
L = -\frac{1}{N}\sum_{n=1}^{N}\ln \left( \sum_{k=1}^{K}\prod_{d=1}^{D} \pi_{k}(x_{n, d})
\mathcal{N}\left( \mu_k(x_{n, d}), \sigma_k(x_{n,d}) \right) \right)
For better numerical stability, we could use log-scale:
.. math::
L = -\frac{1}{N}\sum_{n=1}^{N}\ln \left( \sum_{k=1}^{K}\exp \left\{ \sum_{d=1}^{D}
\ln\pi_{k}(x_{n, d}) + \ln\mathcal{N}\left( \mu_k(x_{n, d}), \sigma_k(x_{n,d})
\right) \right\} \right)
.. note::
One should always use the second formula via log-sum-exp trick. The first formula
is numerically unstable resulting in +/- ``Inf`` and ``NaN`` error.
The log-sum-exp trick is defined by
.. math::
\log\sum_{i=1}^{N}\exp(x_i) = a + \log\sum_{i=1}^{N}\exp(x_i - a)
where :math:`a = \max_i(x_i)`
Args:
log_pi (Tensor): log-scale mixing coefficients, shape [N, K, D]
mu (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
target (Tensor): target tensor, shape [N, D]
Returns
-------
loss : Tensor
calculated loss
"""
# Enforce the shape of target to be consistent with output dimension
target = target.view(-1, self.data_dim)
# Calculate Gaussian log-probabilities over batch for each mixture and each data dimension
log_gaussian_probs = self.calculate_batched_logprob(mu=mu,
std=std,
x=target,
_fast_code=True)
# Calculate the joint log-probabilities from [N, K, D] to [N, K]
joint_log_probs = torch.sum(log_pi + log_gaussian_probs, dim=-1, keepdim=False)
# Calculate the loss via log-sum-exp trick, from [N, K] to [N]
# It calculates over K (mixing coefficient) dimension, produce tensor with shape [N]
loss = -torch.logsumexp(joint_log_probs, dim=-1, keepdim=False)
# Mean loss over the batch to scalar value
loss = loss.mean(0)
return loss
def sample(self, log_pi, mu, std, tau=1.0, _fast_code=True):
r"""Sample from Gaussian mixture using reparameterization trick.
- Firstly | |
#!/usr/bin/python
import pdb_util as util
from copy import deepcopy
import os, itertools
import collections
class simplepdb:
'''
Parses and writes PDB files, and exposes limited functionality for
manipulating their contents with a particular focus on the kinds of
manipulations required for setting up MD simulations.
Attributes:
mol_data: Dictionary of PDB column names and their values.
ters : Locations of breaks in the molecule, per the input PDB or
resulting from simple operations such as merging molecules. Specified
as a list of residues that appear immediately _before_ a break.
connect: Connect records for the molecule. Format is a dictionary where
the keys are the atoms that were found in the input connect record and
the values are the list of atoms to which they were bonded (per that
record).
natoms: Number of atoms in molecule(s).
'''
def __init__(self, other):
'''
Return a simplepdb object created by parsing an input PDB file or
copying the contents of another object.
Can't construct an object without such input because no utilities are
provided that could be used to construct a reasonable molecule.
'''
if isinstance(other, self.__class__):
for k,v in other.__dict__.items():
setattr(self, k, deepcopy(v))
else:
assert os.path.isfile(other), 'simplepdb constructor requires \
input PDB or object of the same type.\n'
assert 'pdb' in os.path.splitext(other)[-1], 'Not a PDB file.\n'
self.mol_data = self.parse_pdb(other)
self.ters,self.connect = self.get_ters_and_connect(other)
self.natoms = len(self.mol_data['atomnum'])
if self.natoms == 0:
print("WARNING: no atoms in molecule.\n")
def __eq__(self, other):
'''
Check equality of simplepdb objects based on the values of their fields
'''
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def parse_pdb(self, pdb):
'''
Return a dictionary of PDB column names and their values for ATOM and
HETATM records in the provided PDB file
'''
#TODO: deal with multiple models
parse = util.make_parser(util.pdb_fieldwidths)
f = open(pdb, 'r')
mol_data_list = [parse(line) for line in f if line.startswith('HETATM') or line.startswith('ATOM')]
f.close()
mol_data = {}
for i,field in enumerate(util.pdb_fieldnames):
if i in util.pdb_floatfields:
fieldlist = [float(line[i]) if line[i].strip() else line[i] for
line in mol_data_list]
elif i in util.pdb_intfields:
fieldlist = [int(line[i]) if line[i].strip() else line[i] for
line in mol_data_list]
else:
fieldlist = [line[i].strip() for line in mol_data_list]
mol_data[field] = fieldlist
if not mol_data['element']:
self.set_element(mol_data)
return mol_data
def get_ters_and_connect(self, pdb):
'''
Returns a list of breaks in a PDB file and a dictionary of CONECT records
'''
ters = []
connect = collections.OrderedDict()
last_line = ''
parse = util.make_parser(util.pdb_connectfields)
with open (pdb,'r') as f:
for line in f:
if line.startswith('TER'):
#this includes the insertion code, if applicable, so the
#ters have to be strings rather than ints
ters.append(last_line[22:27].strip())
elif line.startswith('CONECT'):
contents = parse(line)
atom = int(contents[1])
bonds = [int(bond) for bond in contents[2:] if
bond.strip()]
if atom not in connect:
connect[atom] = bonds
else:
connect[atom] = connect[atom] + bonds
elif line.startswith('ATOM') or line.startswith('HETATM'):
last_line = line
ter = last_line[22:27].strip()
if ter and not ter in ters:
ters.append(ter)
return ters,connect
def get_res_info(self, field_dict):
'''
Returns a list of dictionaries containing all relevant info for
residues whose field value matches the passed-in value
'''
info = []
resnums = []
for key,value in field_dict.items():
assert key in list(self.mol_data.keys()), 'Invalid residue identifier\n'
indices = [i for i,e in enumerate(self.mol_data[key]) if e==value]
for index in indices:
resnum = self.mol_data['resnum'][index]
if resnum not in resnums:
resnums.append(resnum)
info.append({})
for key in list(self.mol_data.keys()):
info[-1][key] = [self.mol_data[key][index]]
else:
res_index = resnums.index(resnum)
for key in list(self.mol_data.keys()):
info[res_index][key].append(self.mol_data[key][index])
return info
def get_center(self):
'''
Returns location of center
'''
center = [0,0,0]
center[0] = sum(self.mol_data['x']) / self.natoms
center[1] = sum(self.mol_data['y']) / self.natoms
center[2] = sum(self.mol_data['z']) / self.natoms
return center
def set_origin(self, loc):
'''
Translates molecule to new origin
'''
assert len(loc)==3,"Center is not three dimensional"
self.mol_data['x'] = [x - loc[0] for x in self.mol_data['x']]
self.mol_data['y'] = [y - loc[1] for y in self.mol_data['y']]
self.mol_data['z'] = [z - loc[2] for z in self.mol_data['z']]
def add_ter(self, ter):
self.ters.append(ter)
def add_residue(self, res_info, ignore_resnum=True):
'''
Takes a dict mapping fieldnames to values and adds the residue to the
existing mol_data dict, defaults to ignoring the existing residue
number and just sticking it at the end
'''
assert len(set(res_info['resnum'])) == 1, 'Different residue numbers in putative residue\n'
if ignore_resnum:
res_info['resnum'] = [max(self.mol_data['resnum']) + 1] * len(res_info['resnum'])
else:
assert res_info['resnum'][0] > 0, 'Residue numbers must be positive integers\n'
assert res_info['resnum'][0] not in self.mol_data['resnum'], 'Residue number %d already exists\n' %res_info['resnum'][0]
for key,value in self.mol_data.items():
value += res_info[key]
self.natoms += len(res_info['resnum'])
def group_by_residue(self):
'''
Rearrange atoms in a file so that atoms in the same residue are
contiguous and orders residues monotonically by resnum
'''
unsorted_resmap = {}
for old_idx in range(self.natoms):
resnum = self.mol_data['resnum'][old_idx]
if resnum not in unsorted_resmap:
unsorted_resmap[resnum] = [old_idx]
else:
unsorted_resmap[resnum].append(old_idx)
resmap = collections.OrderedDict(sorted(list(unsorted_resmap.items()), key=lambda t: t[0]))
new_indices = list(itertools.chain.from_iterable(list(resmap.values())))
new_mol_data = {}
for key in self.mol_data:
new_mol_data[key] = [self.mol_data[key][i] for i in new_indices]
self.renumber_atoms()
self.mol_data = new_mol_data
def renumber_atoms(self, start_val=1):
'''
Renumber atoms so they start at start_val
'''
mapping = {}
for i in range(self.natoms):
old_val = self.mol_data['atomnum'][i]
new_val = i + start_val
self.mol_data['atomnum'][i] = new_val
mapping[old_val] = new_val
#TODO: ugly
new_connect = collections.OrderedDict()
for atom,bonds in list(self.connect.items()):
if atom in mapping:
new_connect[mapping[atom]] = bonds
for atom,bonds in list(new_connect.items()):
for bond in bonds:
if bond in mapping:
idx = bonds.index(bond)
new_connect[atom][idx] = mapping[bond]
self.connect = new_connect
def renumber_residues(self, start_val=1):
'''
Renumber residues so they start at start_val in "first seen" order, desirable
when there is a ligand at the end of data with an out-of-order resnum
'''
reslist = []
for i,resnum in enumerate(self.mol_data['resnum']):
name = str(resnum)
if name not in reslist:
newidx = len(reslist)
reslist.append(name)
else:
newidx = reslist.index(name)
newnum = newidx + start_val
self.mol_data['resnum'][i] = newnum
insert_code = self.mol_data['rescode'][i]
if str(resnum) + insert_code in self.ters:
ter_idx = self.ters.index(str(resnum)+insert_code)
self.ters[ter_idx] = str(newnum) + insert_code
def rename_atoms(self):
'''
Generate unique atom names
'''
if self.has_unique_names():
return
for i,name in enumerate(self.mol_data['atomname']):
self.mol_data['atomname'][i] = ''.join([char for char in
self.mol_data['element'][i]])
occurrences = {}
for i,atom in enumerate(self.mol_data['atomname']):
if atom not in occurrences:
occurrences[atom] = [i,1]
else:
occurrences[atom][1] += 1
self.mol_data['atomname'][i] += str(occurrences[atom][1])
self.mol_data['atomname'][i] = \
'{:>{}s}'.format(self.mol_data['atomname'][i],
util.pdb_fieldwidths[3])
def set_element(self, mol_data):
'''
Set atom element based on atom name, but only if element not set.
'''
if not mol_data['element']:
for i,name in enumerate(mol_data['atomname']):
element = ''.join([char for char in name if char.isalpha()])
mol_data['element'][i] = '{:>{}s}'.format(element,
util.pdb_fieldwidths[-2])
def sanitize(self):
'''
Perform atom renumbering, residue renumbering, and regrouping atoms so
residues are contiguous; if a small molecule, also uniquely names atoms
and sets the element field
'''
self.group_by_residue()
self.renumber_atoms()
self.renumber_residues()
if not self.is_protein():
self.set_element(self.mol_data)
self.rename_atoms()
def has_hydrogen(self):
'''
Returns true if hydrogens are present
'''
return 'H' in [elem.strip() for elem in self.mol_data['element']]
def strip_hydrogen(self):
'''
Strip out all the hydrogens
'''
h_indices = [i for i,elem in enumerate(self.mol_data['element']) if elem.strip() ==
'H']
new_mol_data = {}
for key in list(self.mol_data.keys()):
new_mol_data[key] = [self.mol_data[key][i] for i in
range(len(self.mol_data[key])) if i not in h_indices]
self.mol_data = new_mol_data
def is_protein(self, ff=''):
'''
Returns true if standard amino acid residues are present
'''
aa = util.get_available_res(ff).intersection(self.mol_data['resname'])
return len(aa) > 0
def has_unique_names(self):
'''
Returns true if atom names are unique
'''
#TODO: add to tests
atom_ids = []
for i in range(self.natoms):
atom_ids.append(str(self.mol_data['resnum'][i]) +
self.mol_data['atomname'][i])
counter = collections.Counter(atom_ids)
if any(t > 1 for t in list(counter.values())):
return False
return True
def set_recordname(self, newname, resnum=None):
'''
Set record name to ATOM or HETATM for residue number resnum or all
resnums if no number is provided
'''
#TODO: add to tests
assert newname=='ATOM' or newname=='HETATM', 'Record names must be one \
of "ATOM" and "HETATM"'
if not resnum:
self.mol_data['recordname'] = [newname] * self.natoms
else:
self.mol_data['recordname'] = [newname for name in
self.mol_data['recordname'] if self.mol_data['resnum'] == resnum]
def set_resname(self, newname, oldname=''):
'''
Set resname to newname; if oldname is not specified then all resnames
are updated to newname, otherwise just oldname is
'''
#TODO: add to tests
if not | |
0x18, 0x49, 0x8c,
0x61, 0x82, 0x81, 0x89, 0x22, 0x46, 0x32, 0x18,
0xcb, 0x82, 0x8b, 0x12, 0x2c, 0x22, 0x06, 0x38,
0xc6, 0x82, 0x62, 0x22, 0x32, 0x24, 0x18, 0xa9,
0xa2, 0x12, 0x83, 0x21, 0xf1, 0x32, 0xc8, 0x12,
0x2b, 0x52, 0x18, 0x1a, 0x0c, 0x8c, 0x28, 0xb2,
0xc9, 0xb1, 0x18, 0xb4, 0x82, 0x65, 0x82, 0xa2,
0x83, 0x22, 0xfa, 0x73, 0x41, 0x50, 0x41, 0x70,
0x11, 0x08, 0x1d, 0x38, 0x2d, 0x24, 0x59, 0x31,
0x2a, 0x4e, 0x28, 0xa9, 0x32, 0x41, 0x29, 0x12,
0x11, 0x52, 0x41, 0x42, 0x30, 0x62, 0x3e, 0x89,
0xf0, 0x24, 0x48, 0x60, 0x82, 0x84, 0xc9, 0x22,
0x54, 0x24, 0x22, 0x47, 0xa8, 0x8b, 0x34, 0x26,
0x82, 0x12, 0x88, 0xf6, 0x81, 0x42, 0x60, 0x22,
0x18, 0x24, 0x30, 0x4a, 0x2a, 0xd1, 0x82, 0x25,
0xf1, 0x42, 0x88, 0x2a, 0x91, 0xc2, 0x18, 0x1a,
0x04, 0x88, 0x30, 0x92, 0x46, 0x28, 0x98, 0x28,
0x12, 0x8c, 0x84, 0xfa, 0x24, 0x7f, 0x00, 0x40,
0x84, 0x43, 0x04, 0x00, 0x41, 0x12, 0x80, 0x21,
0xc4, 0x18, 0x81, 0x83, 0x01, 0x40, 0x62, 0x41,
0x88, 0x43, 0x81, 0x04, 0x42, 0x63, 0x12, 0xc1,
0x18, 0x28, 0x00, 0x1a, 0x84, 0x22, 0x21, 0x21,
0x02, 0x22, 0xa0, 0x21, 0x22, 0x00, 0x80, 0x03,
0x32, 0x20, 0x08, 0xa9, 0x84, 0x32, 0x49, 0x2c,
0xa1, 0x41, 0x20, 0x8a, 0x02, 0xcf, 0xe3, 0x05,
0x42, 0x42, 0x00, 0x64, 0x18, 0x45, 0x08, 0x11,
0x81, 0xc3, 0x63, 0x82, 0x2a, 0x91, 0x18, 0x81,
0x00, 0x42, 0x80, 0x82, 0x71, 0x48, 0x44, 0x2c,
0x04, 0x84, 0x42, 0x00, 0x42, 0x20, 0x45, 0x02,
0x8b, 0x41, 0x20, 0x24, 0x01, 0x42, 0x18, 0x89,
0x02, 0x28, 0x32, 0x80, 0x92, 0x22, 0x42, 0xc0,
0x12, 0x8b, 0x48, 0x80, 0x02, 0x84, 0x00, 0x12,
0x4e, 0x63, 0x24, 0x22, 0x43, 0xd5, 0x48, 0x12,
0xa8, 0x41, 0x22, 0x10, 0xb4, 0x16, 0x12, 0x14,
0x14, 0x31, 0x22, 0x4a, 0x48, 0x84, 0x68, 0x41,
0x22, 0xc0, 0x14, 0x27, 0x22, 0x88, 0x28, 0xac,
0x71, 0x44, 0x88, 0x82, 0x21, 0x82, 0x41, 0x04,
0x28, 0x22, 0x90, 0x2c, 0x4a, 0x23, 0x45, 0xbc,
0x38, 0x28, 0x02, 0x9a, 0x94, 0x14, 0x56, 0x08,
0x8c, 0x83, 0x94, 0x28, 0x90, 0x58, 0x22, 0x4b,
0x21, 0x20, 0x81, 0x61, 0x22, 0xc4, 0x20, 0xf1,
0xbe, 0x24, 0x80, 0x01, 0xc0, 0x11, 0x00, 0x00,
0x83, 0x04, 0x82, 0x00, 0x20, 0x01, 0x48, 0x44,
0x8c, 0x31, 0x41, 0x00, 0x40, 0x04, 0x44, 0x84,
0x4a, 0x11, 0x12, 0x68, 0x41, 0x20, 0x84, 0x02,
0x24, 0x28, 0x00, 0x44, 0x84, 0x44, 0x2c, 0x02,
0x00, 0x16, 0x88, 0x08, 0x00, 0x20, 0x14, 0x68,
0x62, 0x00, 0x2a, 0xd4, 0x28, 0xc4, 0x84, 0x24,
0x5a, 0x62, 0x41, 0x8c, 0x92, 0x19, 0x6c, 0x24,
0x42, 0x14, 0xf4, 0x14, 0x29, 0x45, 0xb8, 0x88,
0x43, 0x8a, 0xa1, 0x81, 0x49, 0x81, 0xc8, 0x44,
0x4c, 0x82, 0x31, 0x65, 0x29, 0x81, 0x68, 0xc2,
0x46, 0x72, 0x44, 0x68, 0xc2, 0x18, 0x2e, 0x52,
0x18, 0x4c, 0xc4, 0x14, 0x62, 0x46, 0x02, 0x4c,
0x81, 0x87, 0x31, 0x18, 0x83, 0x32, 0x44, 0x16,
0xf8, 0x18, 0x64, 0x43, 0xf2, 0x12, 0x48, 0x12,
0x28, 0x5e, 0x22, 0x82, 0x48, 0x89, 0xc1, 0x32,
0x76, 0x08, 0x89, 0xc2, 0x16, 0x00, 0x48, 0x83,
0xf4, 0x2e, 0x3e, 0x00, 0x10, 0x04, 0x18, 0x12,
0x81, 0x30, 0x42, 0x00, 0x19, 0x02, 0x8c, 0x14,
0x08, 0x18, 0x48, 0x81, 0x12, 0x28, 0x4d, 0x48,
0x50, 0x84, 0x00, 0x83, 0x04, 0x40, 0x08, 0x18,
0x28, 0x22, 0x4a, 0x42, 0x82, 0x12, 0x24, 0x29,
0x02, 0x88, 0x12, 0x22, 0x28, 0x89, 0x41, 0x18,
0x08, 0x42, 0xc0, 0x84, 0x48, 0x18, 0x22, 0x89,
0x52, 0x84, 0x28, 0x24, 0x77, 0x95, 0x10, 0x64,
0x84, 0x20, 0x12, 0x84, 0xc2, 0x48, 0x50, 0x28,
0x28, 0x14, 0x18, 0x84, 0x12, 0x11, 0x12, 0xd0,
0x22, 0x81, 0x04, 0x00, 0x84, 0x56, 0x04, 0x40,
0x82, 0x94, 0x48, 0x26, 0x08, 0x10, 0x28, 0x02,
0x23, 0x02, 0x28, 0x46, 0x22, 0x04, 0x78, 0x21,
0x24, 0x28, 0x00, 0x00, 0x82, 0x24, 0x50, 0x82,
0x84, 0x8c, 0x84, 0x02, 0x00, 0xcf, 0xbc, 0x03,
0x00, 0x60, 0x48, 0x00, 0x28, 0x00, 0x80, 0x04,
0x45, 0x12, 0x21, 0x14, 0x02, 0x44, 0x44, 0x21,
0x10, 0x08, 0x00, 0x00, 0x00, 0x48, 0x12, 0x20,
0x01, 0x00, 0x00, 0x20, 0x02, 0x81, 0x4a, 0x81,
0x02, 0x18, 0x8a, 0x02, 0x23, 0x01, 0x23, 0x36,
0x98, 0x00, 0x80, 0x08, 0x22, 0x42, 0x39, 0x4d,
0x01, 0x00, 0x00, 0x20, 0x01, 0x48, 0x23, 0x44,
0x29, 0x42, 0x44, 0x42, 0x02, 0x00, 0x10, 0x44,
0x14, 0x44, 0x04, 0x90, 0x48, 0x40, 0x88, 0x81,
0x41, 0x32, 0x18, 0x40, 0x18, 0x18, 0x02, 0x00,
0x18, 0x20, 0x03, 0x48, 0x12, 0xc8, 0x20, 0x11,
0xa2, 0x14, 0x80, 0x42, 0xaa, 0x34, 0x60, 0x22,
0x60, 0xa2, 0x92, 0x27, 0xb9, 0xc0, 0x45, 0x44,
0x13, 0x21, 0x14, 0x44, 0x04, 0x89, 0x82, 0x02,
0x80, 0x11, 0x41, 0x08, 0x00, 0x00, 0x00, 0x00,
0xa4, 0x10, 0x62, 0x24, 0x00, 0x00, 0x80, 0x84,
0x81, 0xa2, 0x41, 0x81, 0x16, 0x82, 0x04, 0x28,
0x40, 0x98, 0x18, 0x21, 0x80, 0x21, 0xa8, 0xa1,
0x00, 0x42, 0x10, 0x08, 0x40, 0x08, 0x84, 0x2f,
0xfd, 0x0d, 0x82, 0x81, 0x00, 0x1d, 0x12, 0x22,
0x00, 0x85, 0x81, 0x01, 0x28, 0x80, 0x21, 0x82,
0x01, 0x44, 0x44, 0x16, 0x62, 0x24, 0x43, 0x01,
0x48, 0x44, 0x00, 0x40, 0x0a, 0x42, 0x89, 0x02,
0x21, 0x80, 0x42, 0x02, 0xc0, 0x62, 0x10, 0x28,
0x91, 0x18, 0x80, 0x21, 0x22, 0x81, 0x04, 0x40,
0x28, 0x61, 0x28, 0x00, 0x00, 0xf0, 0xe5, 0x85,
0x20, 0x05, 0x49, 0x02, 0x62, 0x90, 0x29, 0x2b,
0x12, 0x48, 0x84, 0x4d, 0x4a, 0x85, 0x24, 0x94,
0x22, 0x1c, 0x84, 0xc4, 0x42, 0x18, 0x00, 0x81,
0x30, 0x42, 0x44, 0x80, 0x01, 0x18, 0xc0, 0x48,
0x00, 0x84, 0x00, 0x48, 0x80, 0x02, 0x21, 0x20,
0x14, 0x22, 0x64, 0x88, 0x46, 0x02, 0xa2, 0x28,
0xca, 0x02, 0xc6, 0x02, 0x42, 0x84, 0x40, 0x88,
0x08, 0xff, 0x87, 0x02, 0x00, 0x10, 0x04, 0x44,
0x00, 0x00, 0x28, 0x48, 0x80, 0x02, 0x00, 0x00,
0x00, 0x83, 0x14, 0x08, 0x58, 0x10, 0x18, 0x28,
0x24, 0xc2, 0x28, 0x00, 0x8c, 0x22, 0x04, 0x00,
0x00, 0x48, 0x80, 0x24, 0x82, 0x24, 0xa2, 0x48,
0x48, 0x28, 0x16, 0x82, 0x0d, 0x48, 0x18, 0x48,
0x89, 0x04, 0x23, 0xc4, 0x48, 0xae, 0x48, 0x20,
0x11, 0x34, 0x14, 0x22, 0x12, 0x21, 0x1c, 0x31,
0x1a, 0x26, 0xe1, 0x84, 0x82, 0x42, 0x78, 0x28,
0x01, 0x00, 0x20, 0x02, 0x12, 0x41, 0x12, 0x49,
0x14, 0x88, 0x84, 0x14, 0xa8, 0x42, 0x10, 0xc2,
0x28, 0x21, 0xb0, 0x12, 0x24, 0x24, 0x41, 0x02,
0x20, 0x11, 0x02, 0x84, 0x90, 0xd8, 0x82, 0x23,
0x01, 0x88, 0x28, 0x00, 0x58, 0x28, 0x21, 0x80,
0x04, 0x88, 0x3f, 0xa7, 0x04, 0x15, 0x08, 0x19,
0x26, 0x14, 0x01, 0x00, 0x80, 0x01, 0x2d, 0x44,
0x90, 0x28, 0x80, 0x04, 0x00, 0x41, 0x8c, 0x04,
0x25, 0x02, 0x4d, 0x12, 0x80, 0x2c, 0x81, 0x82,
0x01, 0x78, 0x46, 0x01, 0x40, 0x18, 0xa8, 0x41,
0x00, 0x42, 0x00, 0x00, 0x60, 0x44, 0x80, 0x02,
0x92, 0x32, 0x00, 0x60, 0x84, 0x70, 0x42, 0x08,
0x9e, 0x83, 0xa0, 0x41, 0x4f, 0x48, 0x74, 0x21,
0x75, 0x44, 0xf4, 0x45, 0x14, 0xe0, 0x81, 0xf2,
0x2a, 0x38, 0x9f, 0x26, 0xa4, 0x76, 0x87, 0x24,
0x8f, 0x81, 0x42, 0x61, 0x11, 0x46, 0x12, 0x02,
0x65, 0x84, 0x84, 0x11, 0xac, 0x41, 0x46, 0x14,
0x08, 0x12, 0x8d, 0x48, 0x29, 0xc5, 0x48, 0x8f,
0x85, 0x34, 0x28, 0x28, 0x8f, 0x24, 0x91, 0x4a,
0x2a, 0xa4, 0x46, 0x24, 0x23, 0x91, 0x42, 0x58,
0x42, 0x5e, 0x42, 0x2b, 0x15, 0x6e, 0x12, 0xea,
0xac, 0x94, 0xca, 0xac, 0x81, 0x9a, 0xa1, 0x95,
0x2b, 0x8c, 0x4e, 0x98, 0xa7, 0x24, 0x6e, 0x62,
0x46, 0xe8, 0x88, 0x04, 0x24, 0x8b, 0x4c, 0x87,
0x32, 0x14, 0x54, 0x85, 0x98, 0x34, 0x40, 0x82,
0x42, 0x21, 0x41, 0x38, 0x18, 0x28, 0x2b, 0x56,
0xd3, 0xd2, 0x2a, 0x21, 0x22, 0x61, 0x24, 0x3a,
0x01, 0x41, 0x25, 0x92, 0x1c, 0x84, 0x6d, 0x4c,
0x4f, 0x44, 0x14, 0x68, 0x24, 0x87, 0x87, 0x80,
0xc2, 0x32, 0xad, 0x18, 0x48, 0x1a, 0x02, 0x29,
0x13, 0x88, 0x02, 0x16, 0x22, 0x62, 0x82, 0x89,
0xb4, 0x18, 0xb6, 0x18, 0x21, 0xb1, 0x12, 0xe2,
0x24, 0x81, 0xa8, 0x6a, 0x16, 0xe2, 0x2b, 0xe2,
0x81, 0xf4, 0x18, 0x18, 0x29, 0x94, 0x28, 0x2d,
0x48, 0x8f, 0x82, 0x0a, 0x6f, 0xa7, 0x08, 0x5b,
0x14, 0x20, 0xa1, 0x62, 0x1e, 0x21, 0x67, 0x42,
0x9f, 0x22, 0x05, 0x1c, 0x52, 0x82, 0x5f, 0x92,
0x04, 0x46, 0x4e, 0xd8, 0x11, 0xa4, 0x54, 0x40,
0x52, 0x64, 0x16, 0xe4, 0xc4, 0x44, 0x9c, 0x4c,
0x4a, 0x51, 0x22, 0x2b, 0x44, 0x00, 0x8f, 0x24,
0xb3, 0x22, 0xc1, 0x48, 0x3a, 0x44, 0xe8, 0x82,
0x61, 0xa4, 0x12, 0x84, 0x2a, 0x61, 0x24, 0x2b,
0x26, 0x29, 0x94, 0x38, 0x84, 0x84, 0x25, 0xe8,
0x28, 0xa6, 0x12, 0xa2, 0x23, 0x44, 0x82, 0x97,
0x48, 0x18, 0x88, 0x81, 0x62, 0x81, 0x4a, 0xc4,
0x98, 0x3f, 0x97, 0x0b, 0x13, | |
<gh_stars>0
"""This module implements plugin base."""
import os
import sys
import glob
from collections import namedtuple
import logging
import types
import time
from queue import Empty, Full
from swak.config import get_exe_dir
from swak.exception import UnsupportedPython
from swak.const import PLUGINDIR_PREFIX
from swak.formatter import StdoutFormatter
from swak.util import get_plugin_module_name, stop_iter_when_signalled
from swak.data import OneDataStream
PUT_WAIT_TIME = 1.0
PREFIX = ['i', 'p', 'm', 'o']
PluginInfo = namedtuple('PluginInfo', ['fname', 'pname', 'dname', 'cname',
'desc', 'module'])
class Plugin(object):
"""Base class for plugin."""
def __init__(self):
"""Init."""
self.started = self.shutdowned = False
self.tag = None
def set_tag(self, tag):
"""Set tag."""
self.tag = tag
def start(self):
"""Start plugin.
This method is called when the task starts after processing the
setting. Creation of resources such as files and threads to be used in
the plug-in is created here.
"""
assert not self.started
logging.info("starting plugin {}".format(self))
self._start()
self.started = True
def _start(self):
"""Implement start."""
pass
def stop(self):
"""Stop plugin.
This method is called when the task is preparing to shutdown. You
should do simple things that do not fail, such as setting a thread
stop flag.
"""
logging.info("stopping plugin {}".format(self))
assert self.started
self._stop()
self.started = False
def _stop(self):
"""Implement stop."""
pass
def shutdown(self):
"""Shutdown plugin.
This method is called when the agent has been completely done. Here you
can close or remove any files, threads, etc that you had created in
``start``.
"""
logging.info("shutting down plugin {}".format(self))
assert not self.started # Stop first
assert not self.shutdowned
self._shutdown()
self.shutdowned = True
def _shutdown(self):
"""Implement shutdown."""
pass
class Input(Plugin):
"""Base class for input plugin."""
def __init__(self):
"""Init.
Args:
tag (str): data tag.
"""
super(Input, self).__init__()
self.encoding = None
self.proxy = False
def read(self, stop_event):
"""Generate data stream.
This is a function that blocks until all data is exhausted.
Args:
stop_event (threading.Event): Stop event
Yield:
tuple: (tag, DataStream)
"""
logging.debug("Input.read")
for tag, ds in self.generate_stream(self.generate_data, stop_event):
yield tag, ds
def generate_data(self):
"""Generate data.
Yields:
tuple: time, data
"""
raise NotImplementedError()
def generate_stream(self, gen_data, stop_event):
"""Generate data stream from data generator.
Inefficient default implementation.
Args:
gen_data (function): Data generator function.
stop_event (threading.Event): Stop event
Yields:
tuple: (tag, DataStream)
"""
logging.debug("Input.generate_stream gen_data {}".format(gen_data))
for utime, data in gen_data(stop_event):
# Omit blank data that would have been generated under
# inappropriate input conditions.
if len(data) == 0:
continue
logging.warning("yield inefficient OneDataStream from {}. "
"Implement optimized generate_stream!.".
format(self))
yield self.tag, OneDataStream(utime, data)
class ProxyInput(Input):
"""Input proxy class.
This class is used in the aggregated thread model.
"""
def __init__(self):
"""Init."""
super(ProxyInput, self).__init__()
self.recv_queues = {}
self.proxy = True
def append_recv_queue(self, tag, queue):
"""Append receive queue.
Args:
tag (str): data tag.
queue (Queue): Receiving queue
"""
assert queue not in self.recv_queues, "The queue has already been "\
"appended."
self.recv_queues[tag] = queue
def generate_stream(self, gen_data, stop_event):
"""Generate data stream from data generator.
Note: Yield (None, None) tuple if the queue is empty to give agent a
chance to flush,
Args:
gen_data: Data generator.
stop_event (threading.Event): Stop event.
Yields:
tuple: (tag, DataStream)
"""
while True:
# Loop each receive queue
for tag, queue in self.recv_queues.items():
while True:
try:
stop_iter_when_signalled(stop_event)
ds = queue.get_nowait()
except Empty:
# Give a chance to flush.
yield None, None
# Process next queue
break
else:
logging.debug("yield ds")
yield tag, ds
class RecordInput(Input):
"""Base class for input plugin which emits record.
This if usually for a generative input plugin which knows how to genreate
data and **emit** record directly to the data router. (no following
parser is needed.)
Function to be implemented:
``generate_record``
"""
def generate_data(self, stop_event):
"""Generate data by reading lines from the source.
If explicit encoding, filter & parser exist, apply them.
Args:
stop_event (threading.Event): Stop event
Yields:
tuple: time, data
"""
logging.debug("RecordInput.generate_data")
for record in self.generate_record():
stop_iter_when_signalled(stop_event)
yield time.time(), record
def generate_record(self):
"""Generate records.
This function can be written in synchronous or asynchronous manner. To
make it work asynchronously, return an empty record immediately under
blocking situations.
Note: When operating synchronously, flushing with time interval does
not work.
Yields:
dict: A record.
"""
raise NotImplementedError()
class TextInput(Input):
"""Base class for input plugin which reads text and emits line.
This is usally for a plugin which reads text from source, seperate it by
lines and **feed** them to the following parser.
Function to be implemented:
``generate_line``
"""
def __init__(self):
"""Init."""
super(TextInput, self).__init__()
self.parser = None
self.filter_fn = None
self.encoding = None
def set_encoding(self, encoding):
"""Set encoding of input source.
Args:
encoding (str): Encoding of input source.
"""
self.encoding = encoding
def set_parser(self, parser):
"""Set parser for this TextInput plugin."""
self.parser = parser
def generate_data(self, stop_event):
"""Generate data by reading lines from the source.
If explicit encoding, filter & parser exist, apply them.
Args:
stop_event (threading.Event): Stop event
Yields:
tuple: time, data
"""
for line in self.generate_line():
stop_iter_when_signalled(stop_event)
if self.encoding is not None:
line = line.decode(self.encoding)
# Test by filter function
if self.filter_fn is not None:
if not self.filter_fn(line):
continue
if self.parser is not None:
data = self.parser.parse(line)
else:
data = line
yield time.time(), data
def generate_line(self):
"""Generate lines.
This function can be written in synchronous or asynchronous manner. To
make it work asynchronously, return an empty record immediately under
blocking situations.
Note: When operating synchronously, flushing with time interval does
not work.
Yields:
str: A text line.
"""
raise NotImplementedError()
def set_filter_func(self, func):
"""Set filter function."""
self.filter_fn = func
class Parser(Plugin):
"""Base class for parser plugin.
Following methods should be implemented:
execute
"""
def parse(self, line):
"""Parse.
Args:
line (str): Line to parse.
Returns:
dict: Parsed result.
"""
raise NotImplementedError()
class Modifier(Plugin):
"""Base class for modify plugin.
Following methods should be implemented:
modify
"""
def prepare_for_stream(self, tag, ds):
"""Prepare to modify data stream.
Args:
tag (str): data tag
ds (datatream): data stream
"""
pass
def modify(self, tag, utime, record):
"""Modify data.
Args:
tag (str): data tag
utime (float): data time stamp.
record (dict): data record
Returns:
If modified
float: Modified time
record: Modified record
If removed
None
"""
raise NotImplementedError()
class Output(Plugin):
"""Base class for output plugin.
Following methods should be implemented:
write_stream, write_chunk
"""
def __init__(self, formatter, abuffer):
"""Init.
Args:
formatter (Formatter): Swak formatter for this output.
abuffer (Buffer): Swak buffer for this output.
"""
super(Output, self).__init__()
if abuffer is not None and abuffer.output is None:
abuffer.output = self
self.formatter = formatter
self.buffer = abuffer
self.proxy = False
def _shutdown(self):
"""Shut down the plugin."""
logging.info("Output._shutdown")
if self.buffer is not None and self.buffer.flush_at_shutdown:
logging.info("need flushing at shutdown for {} buffer {}".
format(self, self.buffer))
self.flush(True)
def flush(self, flush_all=False):
"""Flushing buffer.
Args:
flush_all (bool): Whether flush all or just one.
"""
if self.buffer is not None:
logging.debug("Output.flush")
self.buffer.flushing(flush_all)
def set_buffer(self, buffer):
"""Set output bufer."""
self.buffer = buffer
def _start(self):
"""Implement start."""
if self.buffer is not None:
self.buffer.start()
def _stop(self):
"""Implement stop."""
if self.buffer is not None:
self.buffer.stop()
def emit_stream(self, tag, ds, stop_event):
"""Emit data stream to queue or directly to output target.
Args:
tag (str): Data tag.
ds (datatream): Data stream.
stop_event (threading.Event): Stop event.
Returns:
int: Adding size of the stream.
"""
logging.debug("Output.handle_stream")
adding_size = 0
for utime, record in ds:
dtime = self.formatter.timestamp_to_datetime(utime)
formatted = self.formatter.format(tag, dtime, record)
if self.buffer is not None:
adding_size += self.buffer.append(formatted)
else:
self.write(formatted)
return adding_size
def write(self, bulk):
"""Write a bulk.
NOTE: A bulk can have the following types:
- str: When there is no buffer
- bytearray: When there is a buffer of binary format
- list: When there is a buffer of string format
The output must support various bulk types depending on the presence
and supported formats of the buffer.
Args:
bulk:
"""
if len(bulk) == 0:
return
logging.debug("Output.write")
self._write(bulk)
def _write(self, bulk):
"""Write a bulk to the output.
NOTE: A bulk can have the following types:
- str: When there is no buffer
- bytearray: When there is a buffer of binary format
- list: When there is a buffer of string format
An output plugin must support various bulk types depending on the
presence and supported formats of the buffer.
Args:
bulk:
"""
raise NotImplementedError()
def may_chunking(self):
"""Chunking if needed."""
if | |
= 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration('TestCluster', filterName, cat, photometry_db = None, plotdir = None)
self.assertEquals(len(allfits), 1)
fitresult = allfits[0]
self.assertTrue(fitresult.fixedcolor)
self.assertTrue(numpy.abs(fitresult.zp + targetZP) < 0.25)
###
def testStdDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
####
def testAltMagDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_ISO-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_ISO-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, fluxtype = 'ISO', photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
#####
def testOtherSpecificationsDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, photometry_db = self.db, plotdir = None,
specification = {'myspec' :'custom'})
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter myspec'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(photocalib.myspec, 'custom')
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter myspec'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
self.assertEquals(calib.myspec, 'custom')
#####
def testThreeSec(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_AUTO',
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_AUTO',
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
print self.db
fit = threeSecondCalibration(clustername, 'W-J-V', 'SUBARU-10_2-1', cat, photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, '%s_3sec' % filtername)
self.assertEquals(photocalib.fitresults, fit)
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, '%s_3sec' % filtername)
#####
def testThreeSec(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_AUTO',
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_AUTO',
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
print self.db
fit = threeSecondCalibration(clustername, 'W-J-V', 'SUBARU-10_2-1', cat, photometry_db = self.db, plotdir = None,
specification = {'myspec2' : 'custom'})
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter myspec2'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, '%s_3sec' % filtername)
self.assertEquals(photocalib.fitresults, fit)
self.assertEquals(photocalib.myspec2, 'custom')
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter myspec2'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, '%s_3sec' % filtername)
self.assertEquals(calib.myspec2, 'custom')
##############
class TestSpecialCalib(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
self.maindir = '/tmp'
self.cluster = 'testcluster'
self.stdfilter = 'K'
self.filter = 'SPECIAL-0-1-%s' | |
<reponame>nursix/rlpptm
# -*- coding: utf-8 -*-
"""
Infection test result reporting for RLPPTM template
@license: MIT
"""
import base64
import datetime
import hashlib
import json
import requests
import secrets
import sys
import uuid
from gluon import current, Field, IS_EMPTY_OR, IS_IN_SET, SQLFORM, URL, \
BUTTON, DIV, FORM, H5, INPUT, TABLE, TD, TR
from core import IS_ONE_OF, S3CustomController, S3Method, \
s3_date, s3_mark_required, s3_qrcode_represent, \
JSONERRORS
from .dcc import DCC
from .vouchers import RLPCardLayout
CWA = {"system": "RKI / Corona-Warn-App",
"app": "Corona-Warn-App",
}
# =============================================================================
class TestResultRegistration(S3Method):
""" REST Method to Register Test Results """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Page-render entry point for REST interface.
@param r: the CRUDRequest instance
@param attr: controller attributes
"""
output = {}
if r.method == "register":
output = self.register(r, **attr)
elif r.method == "certify":
output = self.certify(r, **attr)
elif r.method == "cwaretry":
output = self.cwaretry(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def register(self, r, **attr):
"""
Register a test result
@param r: the CRUDRequest instance
@param attr: controller attributes
"""
if r.http not in ("GET", "POST"):
r.error(405, current.ERROR.BAD_METHOD)
if not r.interactive:
r.error(415, current.ERROR.BAD_FORMAT)
T = current.T
db = current.db
s3db = current.s3db
auth = current.auth
request = current.request
response = current.response
s3 = response.s3
settings = current.deployment_settings
# Page title and intro text
title = T("Register Test Result")
# Get intro text from CMS
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "disease") & \
(ltable.resource == "case_diagnostics") & \
(ltable.deleted == False))
query = (ctable.name == "TestResultRegistrationIntro") & \
(ctable.deleted == False)
row = db(query).select(ctable.body,
join = join,
cache = s3db.cache,
limitby = (0, 1),
).first()
intro = row.body if row else None
# Instantiate Consent Tracker
consent = s3db.auth_Consent(processing_types=["CWA_ANONYMOUS", "CWA_PERSONAL"])
table = s3db.disease_case_diagnostics
# Configure disease_id
field = table.disease_id
if field.writable:
default_disease = None
offset = 1
else:
default_disease = field.default
field.readable = False
offset = 0
# Probe date is mandatory
field = table.probe_date
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
field.requires = requires.other
# Configure demographic_id
if settings.get_disease_testing_report_by_demographic():
field = table.demographic_id
field.readable = field.writable = True
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
field.requires = requires.other
offset += 1
# Configure device_id
field = table.device_id
field.readable = field.writable = True
dtable = s3db.disease_testing_device
query = (dtable.device_class == "RAT") & \
(dtable.approved == True) & \
(dtable.available == True)
if default_disease:
query = (dtable.disease_id == default_disease) & query
field.requires = IS_ONE_OF(db(query), "disease_testing_device.id",
field.represent,
)
cwa_options = (("NO", T("Do not report")),
("ANONYMOUS", T("Issue anonymous contact tracing code")),
("PERSONAL", T("Issue personal test certificate")),
)
formfields = [# -- Test Result --
table.site_id,
table.disease_id,
table.probe_date,
table.demographic_id,
table.device_id,
table.result,
# -- Report to CWA --
Field("report_to_cwa", "string",
requires = IS_IN_SET(cwa_options, sort=False, zero=""),
default = "NO",
label = T("Report test result to %(system)s") % CWA,
),
Field("last_name",
label = T("<NAME>"),
),
Field("first_name",
label = T("First Name"),
),
s3_date("date_of_birth",
label = T("Date of Birth"),
month_selector = True,
),
Field("dcc_option", "boolean",
default = False,
label = T("Provide Digital %(title)s Certificate") % {"title": "COVID-19 Test"},
),
Field("consent",
label = "",
widget = consent.widget,
),
]
# Required fields
required_fields = []
# Subheadings
subheadings = ((0, T("Test Result")),
(4 + offset, CWA["system"]),
)
# Generate labels (and mark required fields in the process)
labels, has_required = s3_mark_required(formfields,
mark_required = required_fields,
)
s3.has_required = has_required
# Form buttons
REGISTER = T("Submit")
buttons = [INPUT(_type = "submit",
_value = REGISTER,
),
]
# Construct the form
response.form_label_separator = ""
form = SQLFORM.factory(table_name = "test_result",
record = None,
hidden = {"_next": request.vars._next},
labels = labels,
separator = "",
showid = False,
submit_button = REGISTER,
delete_label = auth.messages.delete_label,
formstyle = settings.get_ui_formstyle(),
buttons = buttons,
*formfields)
# Identify form for CSS & JS Validation
form.add_class("result-register")
# Add Subheadings
if subheadings:
for pos, heading in subheadings[::-1]:
form[0].insert(pos, DIV(heading, _class="subheading"))
# Inject scripts
script = "/%s/static/themes/RLP/js/testresult.js" % r.application
if script not in s3.scripts:
s3.scripts.append(script)
s3.jquery_ready.append("S3EnableNavigateAwayConfirm()")
if form.accepts(request.vars,
current.session,
formname = "register",
onvalidation = self.validate,
):
formvars = form.vars
# Create disease_case_diagnostics record
testresult = {"result": formvars.get("result"),
}
if "site_id" in formvars:
testresult["site_id"] = formvars["site_id"]
if "disease_id" in formvars:
testresult["disease_id"] = formvars["disease_id"]
if "probe_date" in formvars:
testresult["probe_date"] = formvars["probe_date"]
if "device_id" in formvars:
testresult["device_id"] = formvars["device_id"]
if "demographic_id" in formvars:
testresult["demographic_id"] = formvars["demographic_id"]
record_id = table.insert(**testresult)
if not record_id:
raise RuntimeError("Could not create testresult record")
testresult["id"] = record_id
# Set record owner
auth = current.auth
auth.s3_set_record_owner(table, record_id)
auth.s3_make_session_owner(table, record_id)
# Onaccept
s3db.onaccept(table, testresult, method="create")
response.confirmation = T("Test Result registered")
report_to_cwa = formvars.get("report_to_cwa")
if report_to_cwa == "NO":
# Do not report to CWA, just forward to read view
self.next = r.url(id=record_id, method="read")
else:
# Report to CWA and show test certificate
dcc_option = False
if report_to_cwa == "ANONYMOUS":
processing_type = "CWA_ANONYMOUS"
cwa_report = CWAReport(record_id)
elif report_to_cwa == "PERSONAL":
dcc_option = formvars.get("dcc_option")
processing_type = "CWA_PERSONAL"
cwa_report = CWAReport(record_id,
anonymous = False,
first_name = formvars.get("first_name"),
last_name = formvars.get("last_name"),
dob = formvars.get("date_of_birth"),
dcc = dcc_option,
)
else:
processing_type = cwa_report = None
if cwa_report:
# Register consent
if processing_type:
cwa_report.register_consent(processing_type,
formvars.get("consent"),
)
# Send to CWA
success = cwa_report.send()
if success:
response.information = T("Result reported to %(system)s") % CWA
retry = False
else:
response.error = T("Report to %(system)s failed") % CWA
retry = True
# Store DCC data
if dcc_option:
cwa_data = cwa_report.data
try:
hcert = DCC.from_result(cwa_data.get("hash"),
record_id,
cwa_data.get("fn"),
cwa_data.get("ln"),
cwa_data.get("dob"),
)
except ValueError as e:
hcert = None
response.warning = str(e)
if hcert:
hcert.save()
else:
# Remove DCC flag if hcert could not be generated
cwa_report.dcc = False
S3CustomController._view("RLPPTM", "certificate.html")
# Title
field = table.disease_id
if cwa_report.disease_id and field.represent:
disease = field.represent(cwa_report.disease_id)
title = "%s %s" % (disease, T("Test Result"))
else:
title = T("Test Result")
return {"title": title,
"intro": None, # TODO
"form": cwa_report.formatted(retry=retry),
}
else:
response.information = T("Result not reported to %(system)s") % CWA
self.next = r.url(id=record_id, method="read")
return None
elif form.errors:
current.response.error = T("There are errors in the form, please check your input")
# Custom View
S3CustomController._view("RLPPTM", "testresult.html")
return {"title": title,
"intro": intro,
"form": form,
}
# -------------------------------------------------------------------------
@staticmethod
def validate(form):
"""
Validate the test result registration form
- personal details are required for reporting to CWA by name
- make sure the required consent option is checked
- make sure the selected device matches the selected disease
"""
T = current.T
formvars = form.vars
consent = current.s3db.auth_Consent
response = consent.parse(formvars.get("consent"))
# Verify that we have the data and consent required
cwa = formvars.get("report_to_cwa")
if cwa == "PERSONAL":
# Personal data required
for fn in ("first_name", "last_name", "date_of_birth"):
if not formvars.get(fn):
form.errors[fn] = T("Enter a value")
# CWA_PERSONAL consent required
c = response.get("CWA_PERSONAL")
if not c or not c[1]:
form.errors.consent = T("Consent required")
elif cwa == "ANONYMOUS":
# CWA_ANONYMOUS consent required
c = response.get("CWA_ANONYMOUS")
if not c or not c[1]:
form.errors.consent = T("Consent required")
# Verify that the selected testing device matches the selected
# disease (only if disease is selectable - otherwise, the device
# list is pre-filtered anyway):
if "disease_id" in formvars:
disease_id = formvars["disease_id"]
device_id = formvars.get("device_id")
if device_id:
table = current.s3db.disease_testing_device
query = (table.id == device_id) & \
(table.disease_id == disease_id) & \
(table.deleted == False)
row = current.db(query).select(table.id,
limitby = (0, 1),
).first()
if not row:
form.errors.device_id = T("Device not applicable for selected disease")
# -------------------------------------------------------------------------
@staticmethod
def certify(r, **attr):
"""
Generate a test certificate (PDF) for download
@param r: the CRUDRequest instance
@param attr: controller attributes
"""
if not r.record:
r.error(400, current.ERROR.BAD_REQUEST)
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD)
if r.representation != "pdf":
r.error(415, current.ERROR.BAD_FORMAT)
post_vars = r.post_vars
# Extract and check formkey from post data
formkey = post_vars.get("_formkey")
keyname = "_formkey[testresult/%s]" % r.id
if not formkey or formkey not in current.session.get(keyname, []):
r.error(403, current.ERROR.NOT_PERMITTED)
# Extract cwadata
cwadata = post_vars.get("cwadata")
if not cwadata:
r.error(400, current.ERROR.BAD_REQUEST)
try:
cwadata = json.loads(cwadata)
except JSONERRORS:
r.error(400, current.ERROR.BAD_REQUEST)
# Generate the CWAReport (implicitly validates the hash)
anonymous = "fn" not in cwadata
try:
cwareport = CWAReport(r.id,
anonymous = anonymous,
first_name = cwadata.get("fn"),
last_name = cwadata.get("ln"),
dob = cwadata.get("dob"),
| |
+\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_blob_length * 2]
resultlist.append(("public_key_blob_length",
str(public_key_blob_length)))
resultlist.append(("public_key_blob",
self.get_ascii(\
public_key_blob)))
if boolean != 0:
public_key_to_be_used_for_authentication_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8],\
16)
public_key_to_be_used_for_authentication =\
self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8 + public_key_algorithm_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_blob_length * 2]
signature_length = \
int(self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8], 16)
signature = self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 + \
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8 + signature_length * 2]
resultlist.append((\
"public_key_to_be_used_for_authentication_length",
str(public_key_to_be_used_for_authentication_length)))
resultlist.append((\
"public_key_to_be_used_for_authentication",
self.get_ascii(\
public_key_to_be_used_for_authentication)))
resultlist.append(("signature_length",
str(signature_length)))
resultlist.append(("signature",
self.get_ascii(signature)))
if method_name.startswith("password"):
boolean = int(self.myresult[20 + user_name_length\
* 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8], 16)
resultlist.append(("boolean", boolean))
if boolean == 0:
plaintext_password_length = int(self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8], 16)
plaintext_password = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length\
* 2 + 8 + method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 + service_name_length\
* 2 + 8 + method_name_length * 2 + 8 + 8 +\
plaintext_password_length * 2]
resultlist.append(("plaintext_password_length",
str(plaintext_password_length)))
resultlist.append(("plaintext_password",
self.get_ascii(plaintext_password)))
if boolean != 0:
plaintext_old_password_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8], 16)
plaintext_old_password = self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2]
plaintext_new_password_length = \
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2:20\
+ user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2 + 8], 16)
plaintext_new_password = self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8 + plaintext_old_password_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2 +\
plaintext_new_password_length * 2]
resultlist.append(("plaintext_old_password_length",
str(plaintext_old_password_length)))
resultlist.append(("plaintext_old_password",
plaintext_old_password))
resultlist.append(("plaintext_new_password_length",
str(plaintext_new_password_length)))
resultlist.append(("plaintext_new_password",
self.get_ascii(plaintext_new_password)))
if method_name.startswith("hostbased"):
public_key_algorithm_for_host_key_length =\
int(self.myresult[12:20], 16)
public_key_algorithm_for_host_key =\
self.myresult[20:20 +\
public_key_algorithm_for_host_key_length * 2]
public_host_key_and_cert_for_client_host_len =\
int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2:20 +\
public_key_algorithm_for_host_key_length *\
2 + 8], 16)
public_host_key_and_certificates_for_client_host =\
self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 +\
8:20 + public_key_algorithm_for_host_key_length *\
2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2]
client_host_name_length = int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2:20 + public_key_algorithm_for_host_key_length\
* 2 + 8 + public_host_key_and_cert_for_client_host_len\
* 2 + 8], 16)
client_host_name = self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8:20 + public_key_algorithm_for_host_key_length\
* 2 + 8 + public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2]
user_name_on_the_client_host_length = int(\
self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2 + 8], 16)
user_name_on_the_client_host = self.myresult[20\
+ public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 +\
8 + client_host_name_length * 2 + 8:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 + 8 +\
client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2]
signature_length = int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 + 8 +\
client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8], 16)
signature = self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 +\
8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len *\
2 + 8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8 +\
signature_length * 2]
resultlist.append(("public_key_algorithm_for\
_host_key_length",
str(public_key_algorithm_for_host_key_length)))
resultlist.append(("public_key_algorithm_for_host_key",
self.get_ascii(public_key_algorithm_for_host_key)))
resultlist.append(\
("public_host_key_and_certificates_for_client_host_length",
str(\
public_host_key_and_cert_for_client_host_len)))
resultlist.append(\
("public_host_key_and_certificates_for_client_host",
self.get_ascii(\
public_host_key_and_certificates_for_client_host)))
resultlist.append(("client_host_name_length",
str(client_host_name_length)))
resultlist.append(("client_host_name",
self.get_ascii(client_host_name)))
resultlist.append(\
("user_name_on_the_client_host_length",\
str(user_name_on_the_client_host_length)))
resultlist.append(("user_name_on_the_client_host",
self.get_ascii(user_name_on_the_client_host)))
resultlist.append(("signature_length",
str(signature_length)))
resultlist.append(("signature",
self.get_ascii(signature)))
else:
method_specific_fields_length = int(self.myresult[\
20 + user_name_length * 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length * 2 + 8], 16)
method_specific_fields = self.myresult[\
20 + user_name_length * 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length * 2 + 8 +\
method_specific_fields_length * 2]
resultlist.append(("method_specific_fields_length",
str(method_specific_fields_length)))
resultlist.append(("method_specific_fields",
self.get_ascii(method_specific_fields)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_FAILURE"):
try:
authentications_that_can_continue_length =\
int(self.myresult[12:20], 16)
authentications_that_can_continue =\
self.myresult[20:20 + authentications_that_can_continue_length * 2]
partial_success_boolean = int(self.myresult[20 +\
authentications_that_can_continue_length * 2:20 +\
authentications_that_can_continue_length * 2 + 8], 16)
resultlist.append(\
("authentications_that_can_continue_length",
str(authentications_that_can_continue_length)))
resultlist.append(("authentications_that_can_continue",
authentications_that_can_continue))
resultlist.append(("partial_success_boolean",
partial_success_boolean))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_BANNER"):
try:
message_length = int(self.myresult[12:20], 16)
message = self.myresult[20:20 + message_length * 2]
language_tag_length = int(self.myresult[20 +\
message_length * 2:20 + message_length * 2 + 8], 16)
language_tag = self.myresult[20 + message_length * 2\
+ 8:20 + message_length * 2 + 8 + language_tag_length * 2]
resultlist.append(("message_length", str(message_length)))
resultlist.append(("message", self.get_ascii(message)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_KEXINIT"):
try:
cookie = base64.standard_b64encode(self.myresult[12:44])
kex_algorithms_length = int(self.myresult[44:52], 16)
kex_algorithms = self.get_ascii(self.myresult[52:52 +\
kex_algorithms_length * 2])
server_host_key_algorithms_length = int(self.myresult[52 +\
kex_algorithms_length * 2:52 + kex_algorithms_length\
* 2 + 8], 16)
server_host_key_algorithms = self.get_ascii(self.myresult[\
52 + kex_algorithms_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2])
encryption_algorithms_client_to_server_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8], 16)
encryption_algorithms_client_to_server = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8:52 +\
kex_algorithms_length | |
per image. You can adjust this
number if expecting more objects. You can use -1 to return all detections.
pos_iou_thresh : float, default is 1.0
IOU threshold for true anchors that match real objects.
'pos_iou_thresh < 1' is not implemented.
ignore_iou_thresh : float
Anchors that has IOU in `range(ignore_iou_thresh, pos_iou_thresh)` don't get
penalized of objectness score.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self, stages, channels, anchors, strides, classes, alloc_size=(128, 128),
nms_thresh=0.45, nms_topk=400, post_nms=100, pos_iou_thresh=1.0,
ignore_iou_thresh=0.7, norm_layer=BatchNorm, norm_kwargs=None, agnostic=False, t_out=True, t=1, conv=2, corr_d=0,
**kwargs):
super(YOLOV3Temporal, self).__init__(**kwargs)
self._classes = classes
self.nms_thresh = nms_thresh
self.nms_topk = nms_topk
self.post_nms = post_nms
self.t_out = t_out
self.t = t
self.first_gap = 2*int(math.floor(self.t/2)/2)
self.second_gap = 2*int(math.ceil(self.t/2)/2)
self.conv = conv
self.corr_d = corr_d
self._pos_iou_thresh = pos_iou_thresh
self._ignore_iou_thresh = ignore_iou_thresh
if pos_iou_thresh >= 1:
self._target_generator = YOLOV3TargetMerger(len(classes), ignore_iou_thresh)
else:
raise NotImplementedError(
"pos_iou_thresh({}) < 1.0 is not implemented!".format(pos_iou_thresh))
self._loss = YOLOV3Loss()
with self.name_scope():
self.stages = nn.HybridSequential()
self.transitions = nn.HybridSequential()
self.yolo_blocks = nn.HybridSequential()
self.yolo_outputs = nn.HybridSequential()
if t_out and corr_d:
self.corr = Corr(corr_d, t=5, kernal_size=3, stride=1, keep='none', comp_mid=True)
self.convs1 = nn.HybridSequential()
self.convs1.add(_conv2d(channel=128, kernel=3, stride=1, padding=1,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.convs2 = nn.HybridSequential()
self.convs2.add(_conv2d(channel=128, kernel=3, stride=2, padding=1,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.convs3 = nn.HybridSequential()
self.convs3.add(_conv2d(channel=128, kernel=3, stride=2, padding=1,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
if not t_out:
self.convs1 = nn.HybridSequential()
self.convs2 = nn.HybridSequential()
# for _ in range(1):
self.convs1.add(_conv21d(channel=512, t=3, d=3, m=256, padding=[1, 0], stride=[(1, 2, 2), 1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.convs2.add(_conv21d(channel=1024, t=3, d=3, m=512, padding=[1, 0], stride=[(1, 2, 2), 1],
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
# note that anchors and strides should be used in reverse order
for i, stage, channel, anchor, stride in zip(range(len(stages)), stages, channels, anchors[::-1], strides[::-1]):
self.stages.add(stage)
block = YOLODetectionBlockV3(channel, conv_type=str(conv), norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.yolo_blocks.add(block)
output = YOLOOutputV3(i, len(classes), anchor, stride, alloc_size=alloc_size, agnostic=agnostic)
self.yolo_outputs.add(output)
if i > 0:
self.transitions.add(_conv2d(channel, 1, 0, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
@property
def num_class(self):
"""Number of (non-background) categories.
Returns
-------
int
Number of (non-background) categories.
"""
return self._num_class
@property
def classes(self):
"""Return names of (non-background) categories.
Returns
-------
iterable of str
Names of (non-background) categories.
"""
return self._classes
def hybrid_forward(self, F, x, *args):
"""YOLOV3 network hybrid forward.
Parameters
----------
F : mxnet.nd or mxnet.sym
`F` is mxnet.sym if hybridized or mxnet.nd if not.
x : mxnet.nd.NDArray
Input data.
*args : optional, mxnet.nd.NDArray
During training, extra inputs are required:
(gt_boxes, obj_t, centers_t, scales_t, weights_t, clas_t)
These are generated by YOLOV3PrefetchTargetGenerator in dataloader transform function.
Returns
-------
(tuple of) mxnet.nd.NDArray
During inference, return detections in shape (B, N, 6)
with format (cid, score, xmin, ymin, xmax, ymax)
During training, return losses only: (obj_loss, center_loss, scale_loss, cls_loss).
"""
all_box_centers = []
all_box_scales = []
all_objectness = []
all_class_pred = []
all_anchors = []
all_offsets = []
all_feat_maps = []
all_detections = []
routes = []
if self.t == 1:
for stage in self.stages:
x = stage(x)
routes.append(x)
else:
assert self.t == 5, 'Currently only support t=5 but will increase to more later'
if self.t_out:
if self.corr_d:
x = TimeDistributed(self.stages[0])(x)
# get middle feature for further Darknet processing
mid = F.squeeze(x.slice_axis(axis=1, begin=int(self.t / 2), end=int(self.t / 2)+1), axis=1)
x = self.corr(x) # perform correlations across all timesteps
x = TimeDistributed(self.convs1)(x) # do first conv
mid_rep = F.repeat(F.expand_dims(mid, axis=1), axis=1, repeats=self.t) # repeat the mid feats t times
routes.append(F.concat(mid_rep, x, dim=2)) # concat and pass to YOLO (a,d)
mid = self.stages[1](mid) # pass middle frame through more darknet
mid_rep = F.repeat(F.expand_dims(mid, axis=1), axis=1, repeats=self.t) # repeat the mid feats t times
x = TimeDistributed(self.convs2)(x) # downscale x with another conv
routes.append(F.concat(mid_rep, x, dim=2)) # concat and pass to YOLO (b,e)
mid = self.stages[2](mid) # pass middle frame through last bit of darknet
mid_rep = F.repeat(F.expand_dims(mid, axis=1), axis=1, repeats=self.t) # repeat the mid feats t times
x = TimeDistributed(self.convs3)(x) # downscale x with another conv
x = F.concat(mid_rep, x, dim=2)
routes.append(x) # concat and pass to YOLO (c,f)
else:
x = TimeDistributed(self.stages[0])(x)
routes.append(x)
x = TimeDistributed(self.stages[1])(x)
# x = TimeDistributed(self.stages[1])(x.slice_axis(axis=1, begin=1, end=4)) # old code when did heir
routes.append(x)
x = TimeDistributed(self.stages[2])(x)
# x = TimeDistributed(self.stages[2])(x.slice_axis(axis=1, begin=1, end=2)) # old code when did heir
routes.append(x)
else:
x = TimeDistributed(self.stages[0])(x)
routes.append(x.slice_axis(axis=1, begin=2, end=3).squeeze(axis=1))
cx = F.swapaxes(self.convs1(F.swapaxes(x, 1, 2)), 1, 2)
x = TimeDistributed(self.stages[1])(x.slice_axis(axis=1, begin=1, end=4))
x = x + cx
routes.append(x.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1))
cx = F.swapaxes(self.convs2(F.swapaxes(x, 1, 2)), 1, 2)
x = TimeDistributed(self.stages[2])(x.slice_axis(axis=1, begin=1, end=2))
x = x + cx
x = x.squeeze(axis=1)
routes.append(x)
# the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow
for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs):
if self.t > 1 and self.conv == 2 and self.t_out:
x, tip = TimeDistributed(block)(x)
else:
x, tip = block(x)
if self.t > 1 and self.t_out:
if autograd.is_training():
dets, box_centers, box_scales, objness, class_pred, anchors, offsets = TimeDistributed(output, style='for')(tip)
all_box_centers.append(box_centers.reshape((0, 0, -3, -1)))
all_box_scales.append(box_scales.reshape((0, 0, -3, -1)))
all_objectness.append(objness.reshape((0, 0, -3, -1)))
all_class_pred.append(class_pred.reshape((0, 0, -3, -1)))
all_anchors.append(anchors)
all_offsets.append(offsets)
# here we use fake featmap to reduce memory consumption, only shape[2, 3] is used
fake_featmap = F.zeros_like(tip.slice_axis(
axis=0, begin=0, end=1).slice_axis(axis=2, begin=0, end=1))
all_feat_maps.append(fake_featmap)
else:
dets = TimeDistributed(output)(tip)
else:
if autograd.is_training():
dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip)
all_box_centers.append(box_centers.reshape((0, -3, -1)))
all_box_scales.append(box_scales.reshape((0, -3, -1)))
all_objectness.append(objness.reshape((0, -3, -1)))
all_class_pred.append(class_pred.reshape((0, -3, -1)))
all_anchors.append(anchors)
all_offsets.append(offsets)
# here we use fake featmap to reduce memory consumption, only shape[2, 3] is used
fake_featmap = F.zeros_like(tip.slice_axis(
axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1))
all_feat_maps.append(fake_featmap)
else:
dets = output(tip)
all_detections.append(dets)
if i >= len(routes) - 1:
break
# add transition layers
if self.t > 1 and self.t_out:
x = TimeDistributed(self.transitions[i])(x)
else:
x = self.transitions[i](x)
# upsample feature map reverse to shallow layers
upsample = _upsample(x, stride=2)
route_now = routes[::-1][i + 1]
x = F.concat(F.slice_like(upsample, route_now * 0, axes=(-2, -1)), route_now, dim=-3) # concat to darknet
if autograd.is_training():
# during training, the network behaves differently since we don't need detection results
if autograd.is_recording():
box_preds = F.concat(*all_detections, dim=-2)
if self.t == 1 or not self.t_out: # the original if no temporal
all_targets = self._target_generator(box_preds, *args)
all_preds = [F.concat(*p, dim=1) for p in [
all_objectness, all_box_centers, all_box_scales, all_class_pred]]
return self._loss(*(all_preds + all_targets))
losses = [[], [], [], []]
for t in range(self.t):
all_preds = [F.slice_axis(F.concat(*p, dim=-2), axis=1, begin=t, end=t + 1).squeeze(axis=1)
for p in [all_objectness, all_box_centers, all_box_scales, all_class_pred]]
box_preds_t = F.slice_axis(box_preds, axis=1, begin=t, end=t+1).squeeze(axis=1)
argst = [F.slice_axis(a, axis=1, begin=t, end=t+1).squeeze(axis=1) for a in args]
all_targets = self._target_generator(box_preds_t, *argst)
ls = self._loss(*(all_preds + all_targets))
for i, l in enumerate(ls):
losses[i].append(l)
return [F.mean(F.concat(*l, dim=0)) for l in losses]
if self.t > 1 and self.t_out:
all_anchors = [F.slice_axis(a, axis=1, begin=1, end=2).squeeze(axis=1) for a in all_anchors]
all_offsets = [F.slice_axis(a, axis=1, begin=1, end=2).squeeze(axis=1) for a in all_offsets]
all_feat_maps = [F.slice_axis(a, axis=1, begin=1, end=2).squeeze(axis=1) for a in all_feat_maps]
# orig 2d:
return (F.concat(*all_detections, dim=-2), all_anchors, all_offsets, all_feat_maps,
F.concat(*all_box_centers, dim=-2), F.concat(*all_box_scales, dim=-2),
F.concat(*all_objectness, dim=-2), F.concat(*all_class_pred, dim=-2))
# concat all detection results from different stages
result = F.concat(*all_detections, dim=-2)
if self.nms_thresh > 0 and self.nms_thresh < 1: # todo check this works for the extra dim
result = F.contrib.box_nms(
result, overlap_thresh=self.nms_thresh, valid_thresh=0.01,
topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False)
if self.post_nms > 0:
result = result.slice_axis(axis=-2, begin=0, end=self.post_nms)
ids = result.slice_axis(axis=-1, begin=0, end=1)
scores = result.slice_axis(axis=-1, begin=1, end=2)
bboxes = result.slice_axis(axis=-1, begin=2, end=None)
return ids, scores, bboxes
def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100):
"""Set non-maximum suppression parameters.
Parameters
----------
nms_thresh : float, default is 0.45.
Non-maximum suppression threshold. You can specify < 0 or > 1 to disable NMS.
nms_topk : int, default is 400
Apply NMS to top k detection results, use -1 to disable so that every Detection
result is used in NMS.
post_nms : int, default is 100
Only return top `post_nms` detection results, the rest is discarded. The number is
based on COCO dataset which has maximum 100 objects per image. You can adjust this
number if expecting more objects. You can use -1 to return all detections.
| |
<reponame>Regnareb/StreamManager<gh_stars>0
import os
import sys
import copy
import ctypes
import socket
import logging
import threading
import functools
import webbrowser
logger = logging.getLogger(__name__)
import keyboard
from PySide2 import QtCore, QtWidgets, QtGui, QtWebEngineWidgets
# TODO
# Be able to import a text file in the description/title as variables (to have counters and currentsong for example)
# Rajouter dans le menu contextuel les variables %CATEGORY% et autres fichiers monitorés
# Pouvoir ajouter un commandbot avec des commandes customs (!game !currentsong)
# Add About and Help menu entries
# Automatically switch scenes in OBS depending of the game played
# Add an XML/EDL file and add each marker created for import into premiere/resolve/FCP
# Change color tray icon to green if update channel with new process or red + toast message if error
# Add trayicons for dropped frames and stream/record states
# Do a notification if the user has not used a streaming process for X minutes if any service is online (to prevent streaming unnoticed)
# Faire un streamdeck customisable qui change automatiquement les touches selon le programme utilisé https://interactjs.io/
# Being able to put it in portrait without changing icons layout
# Add Multi Actions with pause timers
# Create an independant server that scan the foreground process and send it to the receiver, this way multi computer streaming is possible
# websocket plugin ( https://github.com/Elektordi/obs-websocket-py ) Show Scene selector, MIC and DEFAULT volume, RECORD and STREAMING status and STATS
import common.manager
import common.remote
import common.tools
import common.systray
class QLoggerHandler(common.tools.HtmlStreamHandler):
def __init__(self, signal):
super().__init__()
self.signal = signal
def emit(self, record):
message = self.format(record)
self.signal.emit(QtCore.SIGNAL("logMsg(QString)"), message)
class LogPanel(QtWidgets.QDockWidget):
changed_loglevel = QtCore.Signal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setWindowTitle('Logs')
self.setObjectName('docklogs')
self.levels = ['Debug', 'Info', 'Warning', 'Error', 'Critical']
self.interface = {}
self.interface['main'] = QtWidgets.QWidget()
self.interface['layoutv'] = QtWidgets.QVBoxLayout()
self.interface['layouth'] = QtWidgets.QHBoxLayout()
self.interface['label'] = QtWidgets.QLabel('Logs Level:')
self.interface['levels'] = QtWidgets.QComboBox()
self.interface['levels'].insertItems(0, self.levels)
self.interface['levels'].currentIndexChanged.connect(self.changed_loglevel.emit)
self.interface['textedit'] = QtWidgets.QTextBrowser()
self.interface['textedit'].setOpenLinks(False)
self.interface['clear'] = QtWidgets.QPushButton('Clear')
self.interface['clear'].clicked.connect(self.interface['textedit'].clear)
self.interface['layouth'].addStretch()
self.interface['layouth'].addWidget(self.interface['label'])
self.interface['layouth'].addWidget(self.interface['levels'])
self.interface['layouth'].addStretch()
self.interface['layouth'].addWidget(self.interface['clear'])
self.interface['layoutv'].addLayout(self.interface['layouth'])
self.interface['layoutv'].addWidget(self.interface['textedit'])
self.interface['main'].setLayout(self.interface['layoutv'])
self.setWidget(self.interface['main'])
# Use old syntax signals as you can't have multiple inheritance with QObject
self.emitter = QtCore.QObject()
self.connect(self.emitter, QtCore.SIGNAL("logMsg(QString)"), self.interface['textedit'].append)
self.handler = QLoggerHandler(self.emitter)
formatter = logging.Formatter('<span title="line %(lineno)d">%(levelname)s %(name)s.%(funcName)s() - %(message)s</span>')
self.handler.setFormatter(formatter)
logging.getLogger().addHandler(self.handler)
class DialogAddProcess(QtWidgets.QDialog):
def __init__(self, database, parent=None):
super().__init__(parent)
self.completer = QtWidgets.QCompleter(list(database.keys()))
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.linedit = QtWidgets.QLineEdit()
self.linedit.setMinimumWidth(200)
self.linedit.setCompleter(self.completer)
self.buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.linedit)
self.layout.addWidget(self.buttons)
self.setLayout(self.layout)
self.setWindowTitle('Add Game')
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.cancel)
def cancel(self):
self.linedit.setText('')
self.close()
def closeEvent(self, event):
self.cancel()
super().closeEvent(event)
@common.tools.decorate_all_methods(common.tools.catch_exception(logger=logger))
class StreamManager_UI(common.systray.Window):
def __init__(self):
super().__init__()
self.setWindowTitle('Stream Manager')
self.setIcon(QtGui.QIcon('icon.png'))
self.load_stylesheet()
self.setCentralWidget(None)
self.log_panel = LogPanel()
self.log_panel.changed_loglevel.connect(self.set_loglevel)
self.manager = ManagerStreamThread()
self.manager.create_services()
self.manager.createdservices.connect(self.updated)
self.manager.validate.connect(self.update_invalidcategory)
self.manager.updated.connect(self.updated)
self.webremote = WebRemote(self.manager.config['base']['autostart'])
self.webremote.startedcheck.connect(self.start_check)
self.webremote.stoppedcheck.connect(self.stop_check)
self.webremote.start()
self.preferences = Preferences(self.manager, self)
self.preferences.updated.connect(self.preferences_updated)
self.preferences.finished.connect(self.set_shortcuts)
self.create_gamelayout()
self.create_statuslayout()
self.populate_appdata()
self.load_generalsettings()
self.create_menu()
self.setTabPosition(QtCore.Qt.AllDockWidgetAreas, QtWidgets.QTabWidget.North)
self.setDockOptions(QtWidgets.QMainWindow.AllowNestedDocks | QtWidgets.QMainWindow.AllowTabbedDocks)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.log_panel)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.panel_status['dock'])
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.gameslayout['dock'])
self.panel_status['dock'].raise_()
self.setAcceptDrops(True)
self.set_shortcuts(init=True)
self.read_qsettings()
if self.manager.config['base']['starttray']:
self.hide()
else:
self.show()
def set_dockable(self, state=None):
if state==None:
state = self.dockable.isChecked()
for i in [self.log_panel, self.gameslayout['dock'], self.panel_status['dock']]:
dummy = None if state else QtWidgets.QWidget()
i.setTitleBarWidget(dummy)
self.dockable.setChecked(state)
def read_qsettings(self):
self.settings = QtCore.QSettings('regnareb', 'Stream Manager')
if self.settings.value('initialised_once'):
self.restoreGeometry(self.settings.value('geometry'))
self.restoreState(self.settings.value('windowState'))
self.log_panel.interface['levels'].setCurrentIndex(self.log_panel.interface['levels'].findText(self.settings.value('logslevel')))
self.set_loglevel(self.settings.value('logslevel'))
logger.info('Loaded settings from last session.')
self.set_dockable(bool(self.settings.value('dockable')))
else:
self.first_launch()
def first_launch(self):
logger.info('First launch.')
self.set_loglevel('Warning')
self.tabifyDockWidget(self.panel_status['dock'], self.gameslayout['dock'])
self.tabifyDockWidget(self.gameslayout['dock'], self.log_panel)
self.log_panel.hide()
self.preferences.open()
self.preferences.tabs.setCurrentIndex(1)
self.preferences.tabs.tabBar().hide()
self.set_dockable(False)
self.settings.setValue('initialised_once', 1)
def closeEvent(self, event):
if self.trayIcon.isVisible():
if not self.settings.value('showed_quitmessage'):
QtWidgets.QMessageBox.information(self, "Minimise to System Tray", "The program will keep running in the system tray. To terminate the program, choose <b>Quit</b> in the context menu of the system tray icon.")
self.settings.setValue("showed_quitmessage", True)
self.panel_status['webpage'].load(QtCore.QUrl(""))
super().closeEvent(event)
else:
self.quit()
def restore(self):
if self.isHidden():
self.panel_status['webpage'].load(QtCore.QUrl("http://localhost:{}/".format(self.webremote.port)))
super().restore()
def quit(self):
self.manager.quit()
self.webremote.quit()
self.webremote.terminate()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
self.settings.setValue("dockable", self.dockable.isChecked() or '')
self.settings.setValue("logslevel", self.log_panel.interface['levels'].currentText())
if not self.manager.save_config():
msgBox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Can't Save Preferences", "Couldn't save the preferences, you can copy its content in the \"Show Detail\" to try and salvage them, or send it to the developer for debug purposes.")
msgBox.setDetailedText(str(self.manager.config))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Close | QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Close)
ret = msgBox.exec_()
if ret==QtWidgets.QMessageBox.Cancel:
return
super().quit()
def preferences_updated(self):
self.set_shortcuts()
self.manager.process = ''
def load_stylesheet(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'qtstylesheet.css')
with open(path) as f:
stylesheet = f.read()
self.setStyleSheet(stylesheet)
def dropEvent(self, event):
for url in event.mimeData().urls():
self.manager.load_credentials(url.toLocalFile())
def dragEnterEvent(self, event):
event.acceptProposedAction()
def start_check(self):
self.manager.start()
def stop_check(self):
self.manager.quit()
def updated(self, infos=None):
self.reload()
def reload(self):
self.panel_status['webpage'].reload()
def set_loglevel(self, level=''):
block_signals(self.log_panel.interface.values(), True)
if level not in self.log_panel.levels:
level = self.log_panel.interface['levels'].currentText()
self.manager.set_loglevel(level)
self.log_panel.interface['levels'].setCurrentIndex(self.log_panel.interface['levels'].findText(level))
block_signals(self.log_panel.interface.values(), False)
def mouseDoubleClickEvent(self, *args):
pos = self.pos()
geo = self.geometry()
if self.menuBar().isVisible():
self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
else:
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint & ~QtCore.Qt.FramelessWindowHint)
self.show()
self.move(pos)
self.setGeometry(geo)
self.menuBar().setVisible(not self.menuBar().isVisible())
def create_menu(self):
def clipboard():
url = "http://localhost:{}/".format(self.webremote.port)
cb = QtWidgets.QApplication.clipboard()
cb.setText(url, mode=cb.Clipboard)
actionfile = self.menuBar().addMenu('File')
preferences = QtWidgets.QAction('&Preferences', self, triggered=self.preferences.open)
preferences.setMenuRole(QtWidgets.QAction.PreferencesRole)
actionfile.addAction(preferences)
actionfile.addAction(QtWidgets.QAction('&Copy Remote URL', self, triggered=clipboard))
actionfile.addSeparator()
actionfile.addAction(QtWidgets.QAction('&Import Preferences', self, triggered=self.import_settings))
actionfile.addAction(QtWidgets.QAction('&Export Preferences', self, triggered=self.export_settings))
actionfile.addAction(QtWidgets.QAction('&Import Game Database', self, triggered=self.import_database))
actionfile.addAction(QtWidgets.QAction('&Export Game Database', self, triggered=self.export_database))
actionfile.addSeparator()
actionfile.addAction(QtWidgets.QAction('&Quit', self, triggered=self.quit))
actionview = self.menuBar().addMenu('View')
self.dockable = QtWidgets.QAction('Dockable', self, triggered=self.set_dockable)
self.dockable.setCheckable(True)
actionview.addSeparator()
actionview.addAction(self.panel_status['dock'].toggleViewAction())
actionview.addAction(self.gameslayout['dock'].toggleViewAction())
actionview.addAction(self.log_panel.toggleViewAction())
actionview.addSeparator()
actionview.addAction(self.dockable)
actionhelp = self.menuBar().addMenu('Help')
actionhelp.addAction(QtWidgets.QAction('&Homepage', self, triggered=functools.partial(webbrowser.open, 'https://github.com/Regnareb/StreamManager')))
def create_gamelayout(self):
self.gameslayout = {}
self.gameslayout['llayout'] = QtWidgets.QVBoxLayout()
self.gameslayout['table'] = QtWidgets.QTableWidget()
self.gameslayout['table'].setObjectName('table_games')
self.gameslayout['table'].currentCellChanged.connect(self.load_appsettings)
self.gameslayout['table'].itemChanged.connect(self.rename_process)
self.gameslayout['table'].setEditTriggers(QtWidgets.QTableWidget.DoubleClicked)
self.gameslayout['table'].setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.gameslayout['table'].setColumnCount(1)
self.gameslayout['table'].setWordWrap(False)
self.gameslayout['table'].verticalHeader().setVisible(False)
self.gameslayout['table'].setMinimumWidth(200)
header = self.gameslayout['table'].horizontalHeader()
header.setMinimumHeight(40)
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.sectionClicked.connect(self.load_generalsettings)
self.gameslayout['table'].setHorizontalHeaderLabels(['GENERAL'])
self.gameslayout['add_process'] = QtWidgets.QPushButton('+')
self.gameslayout['add_process'].setFixedSize(30, 27)
self.gameslayout['add_process'].clicked.connect(self.add_process)
self.gameslayout['remove_process'] = QtWidgets.QPushButton('-')
self.gameslayout['remove_process'].setFixedSize(30, 27)
self.gameslayout['remove_process'].clicked.connect(self.remove_process)
self.gameslayout['addremove_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['addremove_layout'].addWidget(self.gameslayout['add_process'])
self.gameslayout['addremove_layout'].addWidget(self.gameslayout['remove_process'])
self.gameslayout['addremove_layout'].addStretch()
self.gameslayout['llayout'].addWidget(self.gameslayout['table'])
self.gameslayout['llayout'].addLayout(self.gameslayout['addremove_layout'])
self.gameslayout['rlayout'] = QtWidgets.QFormLayout()
self.gameslayout['rlayout'].setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.gameslayout['stacked'] = QtWidgets.QStackedWidget()
self.gameslayout['stacked'].setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.gameslayout['stacked_processpath'] = LineEdit({True: QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.SP_DirIcon)})
self.gameslayout['stacked_processpath'].changeButtonState(True)
self.gameslayout['stacked_processpath'].editingFinished.connect(self.save_appdata)
self.gameslayout['stacked_processpath'].buttonClicked.connect(self.get_processpath)
self.gameslayout['stacked_processpath'].setToolTip('Process Name/Path')
self.gameslayout['stacked_processlayout'] = QtWidgets.QFormLayout()
self.gameslayout['stacked_processlayout'].setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.gameslayout['stacked_processlayout'].addRow('Executable name:', self.gameslayout['stacked_processpath'])
self.gameslayout['stacked_process'] = QtWidgets.QWidget()
self.gameslayout['stacked_processlayout'].setContentsMargins(0, 0, 0, 0)
self.gameslayout['stacked_process'].setLayout(self.gameslayout['stacked_processlayout'])
self.gameslayout['stacked_label'] = QtWidgets.QLabel()
self.gameslayout['stacked_label'].setText('Applied by default for all games if there is no data\nLocks will force this setting no matter what for all games')
self.gameslayout['stacked_label'].setAlignment(QtCore.Qt.AlignCenter)
self.gameslayout['stacked'].addWidget(self.gameslayout['stacked_process'])
self.gameslayout['stacked'].addWidget(self.gameslayout['stacked_label'])
self.gameslayout['rlayout'].addRow(self.gameslayout['stacked'])
self.gameslayout['stacked'].setCurrentWidget(self.gameslayout['stacked_label'])
elements = ['title', 'tags', 'command', 'description']
folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'images'))
icons = {False: QtGui.QIcon(folder + "/unlock.png"), True: QtGui.QIcon(folder + "/lock.png")}
self.gameslayout['category_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['category_layout'].setSpacing(0)
self.gameslayout['category_conflicts'] = QtWidgets.QPushButton('...')
self.gameslayout['category_conflicts'].setStyleSheet('border: 1px solid rgba(0, 0, 0, 50); padding:4px')
self.gameslayout['category_conflicts'].setFixedWidth(self.gameslayout['category_conflicts'].sizeHint().height())
self.gameslayout['category_conflicts'].clicked.connect(self.show_assignations)
self.gameslayout['category'] = LineEdit(icons)
self.gameslayout['category'].setToolTip('Category')
self.gameslayout['category'].editingFinished.connect(functools.partial(self.save_appdata, validate=True))
self.completer = QtWidgets.QCompleter(list(self.manager.database.keys()))
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.gameslayout['category'].setCompleter(self.completer)
self.gameslayout['category_layout'].addWidget(self.gameslayout['category_conflicts'])
self.gameslayout['category_layout'].addWidget(self.gameslayout['category'])
self.gameslayout['rlayout'].addRow('Category:', self.gameslayout['category_layout'])
for key in elements:
self.gameslayout[key] = LineEdit(icons)
self.gameslayout[key].setMinimumHeight(30)
self.gameslayout[key].editingFinished.connect(self.save_appdata)
s = self.gameslayout[key].sizePolicy()
s.setRetainSizeWhenHidden(True)
self.gameslayout[key].setSizePolicy(s)
self.gameslayout[key].setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.gameslayout['rlayout'].addRow(key.title() + ':', self.gameslayout[key])
self.gameslayout[key].setToolTip(key.title())
self.gameslayout['rlayout'].labelForField(self.gameslayout['description']).setText('Game Description <span style="color:grey;">(!game)</span>:')
self.gameslayout['rlayout'].labelForField(self.gameslayout['command']).setText('Command to execute:')
self.gameslayout['container_llayout'] = QtWidgets.QWidget()
self.gameslayout['container_llayout'].setLayout(self.gameslayout['llayout'])
self.gameslayout['container_rlayout'] = QtWidgets.QWidget()
self.gameslayout['container_rlayout'].setLayout(self.gameslayout['rlayout'])
self.gameslayout['dock'] = QtWidgets.QDockWidget('Games')
self.gameslayout['dock'].setObjectName('dockgames')
self.gameslayout['dock_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['main'] = QtWidgets.QSplitter()
self.gameslayout['main'].addWidget(self.gameslayout['container_llayout'])
self.gameslayout['main'].addWidget(self.gameslayout['container_rlayout'])
self.gameslayout['main'].setStretchFactor(0, 0)
self.gameslayout['main'].setStretchFactor(1, 1)
self.gameslayout['main'].setCollapsible(0, 0)
self.gameslayout['main'].setCollapsible(1, 0)
self.gameslayout['main'].addWidget(self.gameslayout['container_rlayout'])
self.gameslayout['dock'].setWidget(self.gameslayout['main'])
def create_filedialog(self, action='open'):
if action == 'open':
path, _filters = QtWidgets.QFileDialog.getOpenFileName()
elif action == 'save':
path, _filters = QtWidgets.QFileDialog.getSaveFileName()
return path
def get_processpath(self, *args):
path = self.create_filedialog()
if path:
self.gameslayout['stacked_processpath'].setText(path)
def add_process(self):
self.nodal = DialogAddProcess(self.manager.database)
self.nodal.exec_()
name = self.nodal.linedit.text()
if name:
row = self.create_gamerow(name)
index = self.gameslayout['table'].indexFromItem(row)
self.gameslayout['table'].setCurrentIndex(index)
if not self.rename_process():
self.gameslayout['table'].removeRow(index.row())
self.load_appsettings()
def rename_process(self, *args):
current = self.gameslayout['table'].currentItem()
new = current.text()
old = current._process
if not new:
current.setText(old)
return None
if self.manager.config['appdata'].get(new, ''):
msgBox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, "That Process Already Exists", 'The process "{}" already exists, are you sure you want to do that?\nIt will replace the old settings with the current ones.'.format(new))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
logger.warning('The same process is already registered: {}'.format(new))
ret = msgBox.exec_()
if ret == QtWidgets.QMessageBox.Ok:
# Delete the old data and replace with current
item = [i for i in self.gameslayout['table'].findItems(new, QtCore.Qt.MatchExactly) if i is not current][0]
index = self.gameslayout['table'].indexFromItem(item)
self.gameslayout['table'].removeRow(index.row())
currentindex = self.gameslayout['table'].indexFromItem(current)
self.gameslayout['table'].setCurrentIndex(currentindex)
else:
# Return to the previous name
current.setText(old)
return None
self.manager.rename_process(old, new)
current._process = new
self.gameslayout['table'].sortByColumn(0, QtCore.Qt.AscendingOrder)
return True
def remove_process(self):
current = self.gameslayout['table'].currentItem()
if current:
self.manager.remove_process(current.text())
self.gameslayout['table'].removeRow(self.gameslayout['table'].currentRow())
def import_settings(self):
path = self.create_filedialog(action='open')
if path:
self.manager.load_config(path, backup=False)
def export_settings(self):
path = self.create_filedialog(action='save')
if path:
self.manager.save_config(path)
def import_database(self):
path = self.create_filedialog(action='open')
if path:
self.manager.import_database(path)
def export_database(self):
path = self.create_filedialog(action='save')
if path:
self.manager.export_database(path)
def save_appdata(self, validate=False):
current = self.gameslayout['table'].currentItem()
cat = self.gameslayout['category'].text()
title = self.gameslayout['title'].text()
description = self.gameslayout['description'].text()
tags = self.gameslayout['tags'].text().split(',')
command = self.gameslayout['command'].text()
tags = [i.strip() for i in tags if i]
data = {'category': cat, 'title': title, 'tags': tags, 'description': description, 'command': command}
if validate:
self.manager.config['assignations'] = self.manager.validate_assignations(self.manager.config['assignations'], cat)
if current and current.text():
self.manager.config['appdata'][current.text()].update(data)
self.manager.config['appdata'][current.text()]['path'][sys.platform] = self.gameslayout['stacked_processpath'].text()
self.update_gamerow(current)
elif not current:
for key in data.copy():
data['forced_' + key] = self.gameslayout[key].button.state
self.manager.config['base'].update(data)
self.manager.process = '' # Reset current process to be able to apply new settings
logger.debug(data)
def show_assignations(self):
category = self.gameslayout['category'].text()
self.preferences.open()
self.preferences.tabs.setCurrentIndex(2)
self.preferences.tabs.tabBar().hide()
if | |
= vault_id
file_path = None
try:
success, message, file_info = phantom_rules.vault_info(vault_id=vault_id)
if not file_info:
return RetVal3(action_result.set_status(phantom.APP_ERROR, EWSONPREM_ERR_VAULT_INFO), None, None)
file_path = list(file_info)[0].get('path')
except Exception:
return RetVal3(action_result.set_status(phantom.APP_ERROR, EWSONPREM_ERR_VAULT_INFO), None, None)
if not file_path:
return RetVal3(action_result.set_status(phantom.APP_ERROR, "Could not get file path for vault item"), None, None)
try:
with open(file_path, 'r') as f:
email_data = f.read()
except Exception:
return RetVal3(action_result.set_status(phantom.APP_ERROR, "Error occurred while reading vault file"), None, None)
return RetVal3(phantom.APP_SUCCESS, email_data, email_id)
def _get_mail_header_dict(self, email_data, action_result):
try:
mail = email.message_from_string(email_data)
except Exception:
return RetVal2(action_result.set_status(phantom.APP_ERROR,
"Unable to create email object from data. Does not seem to be valid email"), None)
headers = mail.__dict__.get('_headers')
if not headers:
return RetVal2(action_result.set_status(phantom.APP_ERROR,
"Could not extract header info from email object data. Does not seem to be valid email"), None)
ret_val = {}
for header in headers:
ret_val[header[0]] = header[1]
return RetVal2(phantom.APP_SUCCESS, ret_val)
def _decode_uni_string(self, input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
encoded_strings = re.findall(r'=\?.*\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = EWSONPREM_EXCEPTION_ERR_MESSAGE.format(error_code, error_msg)
self.debug_print("Decoding: {0}. {1}".format(encoded_strings, err))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
# Some non-ascii characters were causing decoding issue with
# the UnicodeDammit and working correctly with the decode function.
# keeping previous logic in the except block incase of failure.
value = value.decode(encoding)
new_str += value
new_str_create_count += 1
except Exception:
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
self.debug_print("Creating a new string entirely from the encoded_strings and assigning into input_str")
input_str = new_str
return input_str
def _get_email_headers_from_mail(self, mail, charset=None, email_headers=None):
if mail:
email_headers = list(mail.items()) # it's gives message headers
if charset is None:
charset = mail.get_content_charset()
if not charset:
charset = 'utf-8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
self.debug_print("{}. {}. {}".format(err, error_code, error_msg))
# Handle received seperately
try:
received_headers = list()
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
self.debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
if isinstance(subject, str):
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return headers
def _handle_email_with_container_id(self, action_result, container_id, ingest_email, target_container_id=None):
ret_val, email_data, email_id = self._get_email_data_from_container(container_id, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
action_result.update_summary({"email_id": email_id})
ret_val, header_dict = self._get_mail_header_dict(email_data, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
action_result.add_data(header_dict)
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS, "Successfully fetched email headers")
config = {
"extract_attachments": True,
"extract_domains": True,
"extract_hashes": True,
"extract_ips": True,
"extract_urls": True }
process_email = ProcessEmail()
ret_val, message = process_email.process_email(self, email_data, email_id, config, None, target_container_id)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, message)
# get the container id that of the email that was ingested
container_id = self._get_container_id(email_id)
action_result.update_summary({"container_id": container_id})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_email_with_vault_id(self, action_result, vault_id, ingest_email, target_container_id=None):
ret_val, email_data, email_id = self._get_email_data_from_vault(vault_id, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
try:
mail = email.message_from_string(email_data)
headers = self._get_email_headers_from_mail(mail)
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
error_text = EWSONPREM_EXCEPTION_ERR_MESSAGE.format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Unable to get email header string from message. {0}".format(error_text)), None
if not headers:
return action_result.set_status(phantom.APP_ERROR, "Unable to fetch the headers information from the provided file"), None
action_result.add_data(dict(headers))
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS, "Successfully fetched email headers")
config = {
"extract_attachments": True,
"extract_domains": True,
"extract_hashes": True,
"extract_ips": True,
"extract_urls": True }
process_email = ProcessEmail()
ret_val, message = process_email.process_email(self, email_data, email_id, config, None, target_container_id)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, message)
# get the container id that of the email that was ingested
container_id = self._get_container_id(email_id)
action_result.update_summary({"container_id": container_id})
return action_result.set_status(phantom.APP_SUCCESS)
def _get_email(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
email_id = self._handle_py_ver_compat_for_input_unicode(param.get(EWSONPREM_JSON_ID, ""))
container_id = param.get(EWS_JSON_CONTAINER_ID)
vault_id = param.get(EWS_JSON_VAULT_ID)
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
use_current_container = param.get('use_current_container')
target_container_id = None
if container_id is not None:
ret_val, container_id = self._validate_integer(action_result, container_id, "container_id")
if phantom.is_fail(ret_val):
return action_result.get_status()
if use_current_container:
target_container_id = self.get_container_id()
if not email_id and not container_id and not vault_id:
return action_result.set_status(phantom.APP_ERROR, "Please specify id, container_id or vault_id to get the email")
ingest_email = param.get(EWSONPREM_JSON_INGEST_EMAIL, False)
if container_id is not None:
return self._handle_email_with_container_id(action_result, container_id, ingest_email, target_container_id)
if vault_id is not None:
return self._handle_email_with_vault_id(action_result, vault_id, ingest_email, target_container_id)
else:
data = ews_soap.xml_get_emails_data([email_id], self._version)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
message = "Error while getting email data for id {0}. Error: {1}".format(self._handle_py_ver_compat_for_input_str(email_id),
action_result.get_message())
self.debug_print(message)
self.send_progress(message)
return phantom.APP_ERROR
self._cleanse_key_names(resp_json)
"""
ret_val, rfc822_format = self._get_rfc822_format(resp_json, action_result)
if (phantom.is_fail(ret_val)):
return phantom.APP_ERROR
if (not rfc822_format):
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain rfc822 data')
"""
resp_items = resp_json.get('m_Items')
if not resp_items or not isinstance(resp_items, dict):
message = {}
else:
message = next(iter(resp_items.values()))
# Remove mime content because it can be very large
if 't_MimeContent' in message:
message.pop('t_MimeContent')
action_result.add_data(message)
recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox')
if recipients_mailbox and (not isinstance(recipients_mailbox, list)):
message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox]
summary = {'subject': message.get('t_Subject'),
'create_time': message.get('t_DateTimeCreated'),
'sent_time': message.get('t_DateTimeSent')}
action_result.update_summary(summary)
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS)
try:
self._process_email_id(email_id, target_container_id)
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
error_text = EWSONPREM_EXCEPTION_ERR_MESSAGE.format(error_code, error_msg)
self.debug_print("Error occurred in _process_email_id with Message ID: {0}. {1}".format(email_id, error_text))
action_result.update_summary({"container_id": None})
return action_result.set_status(phantom.APP_ERROR, "Error processing email. {0}".format(error_text))
if target_container_id is None:
# get the container id that of the email that was ingested
container_id = self._get_container_id(email_id)
action_result.update_summary({"container_id": container_id})
else:
action_result.update_summary({"container_id": target_container_id})
return action_result.set_status(phantom.APP_SUCCESS)
def _update_email(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
email_id = self._handle_py_ver_compat_for_input_unicode(param[EWSONPREM_JSON_ID])
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
category = self._handle_py_ver_compat_for_input_unicode(param.get('category'))
subject = self._handle_py_ver_compat_for_input_unicode(param.get('subject'))
if subject is None and category is None:
return action_result.set_status(phantom.APP_ERROR, "Please specify one of the email properties to update")
# do a get on the message to get the change id
data = ews_soap.xml_get_emails_data([email_id], self._version)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
message = "Error while getting email data for id {0}. Error: {1}".format(self._handle_py_ver_compat_for_input_str(email_id),
action_result.get_message())
self.debug_print(message)
self.send_progress(message)
return phantom.APP_ERROR
try:
change_key = next(iter(resp_json['m:Items'].values()))['t:ItemId']['@ChangeKey']
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Unable to get the change key of the email to update")
if category is not None:
category = [x.strip() for x in category.split(',')]
category = list(filter(None, category))
data = ews_soap.get_update_email(email_id, change_key, category, subject)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_update_response)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
data = ews_soap.xml_get_emails_data([email_id], self._version)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
self._cleanse_key_names(resp_json)
resp_items = resp_json.get('m_Items')
if not resp_items or not isinstance(resp_items, dict):
message = {}
else:
message = next(iter(resp_items.values()))
categories = message.get('t_Categories', {}).get('t_String')
if categories:
if not isinstance(categories, list):
categories = [categories]
message['t_Categories'] = categories
action_result.add_data(message)
recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox')
if recipients_mailbox and (not isinstance(recipients_mailbox, list)):
| |
position embeddings
self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
# for onnx runtime
self.onnx_trace = False
def _reshape(self, tensor, first_dim, batch_size):
return tensor.reshape(first_dim, batch_size * self.num_attn_heads, self.head_dim).transpose(0, 1)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
hidden_states,
layer_state=None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
):
sequence_length, batch_size, hidden_size = hidden_states.size()
assert list(hidden_states.size()) == [
sequence_length,
batch_size,
hidden_size,
], f"`hidden_states` should be of shape {sequence_length, batch_size, hidden_size}, but is of shape {hidden_states.shape}"
# key and value of previous time steps are cached
saved_state = layer_state.get("self", None)
# project
query_states = self.query_proj(hidden_states)
key_states = self.key_proj(hidden_states)
value_states = self.value_proj(hidden_states)
# normalize
query_states = query_states / (self.head_dim ** 0.5)
# reshape
query_states = self._reshape(query_states, sequence_length, batch_size)
key_states = self._reshape(key_states, -1, batch_size)
value_states = self._reshape(value_states, -1, batch_size)
# chunk into main stream and predict stream
hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=0)
query_states_list = query_states.chunk(1 + self.ngram, dim=1)
key_states_list = key_states.chunk(1 + self.ngram, dim=1)
value_states_list = value_states.chunk(1 + self.ngram, dim=1)
main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
# saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
if saved_state is not None:
prev_main_key_states = saved_state["prev_key_states"].view(
batch_size * self.num_attn_heads, -1, self.head_dim
)
main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=1)
prev_main_value_states = saved_state["prev_value_states"].view(
batch_size * self.num_attn_heads, -1, self.head_dim
)
main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=1)
# Update cache
layer_state["self"] = {
"prev_key_states": main_key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
"prev_value_states": main_value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
}
# get seq_length of main stream only
main_sequence_length = sequence_length // (1 + self.ngram)
# MAIN-STREAM
# main attn weights
main_attn_weights = torch.bmm(main_query_states, main_key_states.transpose(1, 2))
# retrieve relative position embeddings for each layer -> see paper for more details
main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
)
main_attn_weights = main_attn_weights + main_relative_pos_embeddings
if attention_mask is not None:
main_attn_weights = main_attn_weights + attention_mask
main_attn_probs = softmax(
main_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(main_attn_weights)
main_attn_probs = F.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
# project to attn_output
main_attn_output = torch.bmm(main_attn_probs, main_value_states)
main_attn_output = (
main_attn_output.transpose(0, 1).contiguous().view(1, main_sequence_length, batch_size, hidden_size)
)
main_attn_output = self.out_proj(main_attn_output)
# PREDICT-STREAM
# [ngram, B*head, T, c]
predict_query_states = torch.cat(predict_query_states_list, 0).view(
self.ngram, -1, main_sequence_length, self.head_dim
)
# [ngram, B*head, 2*T, c]
predict_key_states = torch.cat(
[torch.cat([main_key_states, key], 1).unsqueeze(0) for key in predict_key_states_list], 0
)
# [ngram, T, B, C]
predict_hidden_states = torch.cat(hidden_states_predict_list, 0).view(
self.ngram, main_sequence_length, batch_size, hidden_size
)
# [ngram, B*head, 2*T, c]
predict_value_states = torch.cat(
[torch.cat([main_value_states, v_p], 1).unsqueeze(0) for v_p in predict_value_states_list], 0
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = torch.einsum("nbtc,nbsc->nbts", (predict_query_states, predict_key_states))
# [ngram, B*head, T, S]
# retrieve relative position embeddings for each layer -> see paper for more details
predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
if extended_predict_attention_mask is not None:
predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
predict_attn_probs = softmax(
predict_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(predict_attn_weights)
predict_attn_probs = F.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training)
# project to attention output
# [ngram, B*head, T, c]
predict_attn_output = torch.einsum("nbts,nbsc->nbtc", (predict_attn_probs, predict_value_states))
# [ngram, T, B, C]
predict_attn_output = (
predict_attn_output.transpose(1, 2)
.contiguous()
.view(self.ngram, main_sequence_length, batch_size, hidden_size)
)
predict_attn_output = self.out_proj(predict_attn_output)
# concat to single attn output
# [1+ngram*T, B, C]
attn_output = torch.cat([main_attn_output, predict_attn_output], 0).view(-1, batch_size, hidden_size)
# reshape into better form for `config.output_attentions`
main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, main_sequence_length, -1)
predict_attn_probs = predict_attn_probs.view(
self.ngram, batch_size, self.num_attn_heads, main_sequence_length, -1
).transpose(0, 1)
attn_output = F.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, main_attn_probs, predict_attn_probs
def get_main_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
):
# input hidden_states [T,B,C], input attn_weights [T*head,T,S], input position_ids [B,T] or [1,1]
if main_relative_position_buckets is None:
batch_size, sequence_length = hidden_states.shape[:2]
relative_positions = (
torch.arange(1, attn_weights.shape[-1] + 1)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(
batch_size, sequence_length, 1
) # [B, T, s]
main_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
hidden_states = hidden_states.transpose(0, 1) # [B,T,C]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) # [B,T,Buckets*head]
rel_pos_embeddings = rel_pos_embeddings.view(
rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
).permute(
0, 3, 1, 2
) # [B,T,Buckets,head]
rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:2] + (-1,)) # [B*head,T,Buckets]
main_relative_position_buckets = (
main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
.view(-1, main_relative_position_buckets.shape[-1])
.long()
) # [B*head*T, T]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) # [B*head*T,Buckets]
main_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=main_relative_position_buckets
).view(attn_weights.shape[:2] + (-1,))
return main_relative_pos_embeddings
def get_predict_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
):
# input hidden_states [ngram, T,B,C], input attn_weights [ngram, B*head,T,S], input position_ids [B,T] or [1,1], input predict_relative_position_buckets [B,T, 2*T] or None
sequence_length, batch_size = hidden_states.shape[1:3]
if predict_relative_position_buckets is None:
key_sequence_length = attn_weights.shape[-1]
assert (
position_ids[0][0] == key_sequence_length - 1
), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
relative_positions = (
torch.arange(0, key_sequence_length)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
predict_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
hidden_states = hidden_states.transpose(1, 2) # [ngram, B, T, C]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states).view(
hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
) # [ngram, B, T, bucket, head]
rel_pos_embeddings = rel_pos_embeddings.permute(0, 1, 4, 2, 3).reshape(
self.ngram * batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram*B*head, T, bucket]
predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0).repeat(
self.ngram, 1, self.num_attn_heads, 1
) # [ngram, B, head*T, S]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
predict_relative_position_buckets = predict_relative_position_buckets.view(
-1, predict_relative_position_buckets.size(-1)
).long() # [ngram*B*head*T, S]
predict_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
).view(
self.ngram, batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram, B*head, T, S]
return predict_relative_pos_embeddings
class ProphetNetEncoderLayer(nn.Module):
"""
Encoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetSelfAttention(config, config.num_encoder_attention_heads)
self.self_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size)
# 2nd residual block
self.feed_forward = ProhpetNetFeedForward(config, config.encoder_ffn_dim)
self.feed_forward_layer_norm = ProphetNetLayerNorm(config.hidden_size)
def forward(self, hidden_states, attention_mask):
# 1st residual block
attention_output, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
)
hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
# 2nd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
return hidden_states, attn_weights
class ProphetNetDecoderLayer(nn.Module):
"""
Decoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetNgramProphetNetSelfAttention(config)
self.self_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size)
# 2nd residual block
if config.add_cross_attention:
self.cross_attn = ProphetNetSelfAttention(config, config.num_decoder_attention_heads)
self.cross_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size)
# 3rd residual block
self.feed_forward = ProhpetNetFeedForward(config, config.decoder_ffn_dim)
self.feed_forward_layer_norm = ProphetNetLayerNorm(config.hidden_size)
def forward(
self,
hidden_states,
encoder_hidden_states=None,
encoder_attn_mask=None,
layer_state=None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
):
layer_state = layer_state if layer_state is not None else {}
# 1st residual block
ngram_attention_output, self_attn_weights, self_attn_weights_ngram = self.self_attn(
hidden_states=hidden_states,
layer_state=layer_state,
attention_mask=attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
)
hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
cross_attn_weights = None
if encoder_hidden_states is not None:
# 2nd residual block
attention_output, cross_attn_weights = self.cross_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
# 3rd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
return (
hidden_states,
self_attn_weights,
self_attn_weights_ngram,
cross_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
@add_start_docstrings(
"The standalone encoder part of the ProphetNetModel.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetEncoder(ProphetNetPreTrainedModel):
r"""
word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`):
The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with
pre-defined word embeddings instead of randomely initialized word embeddings.
"""
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
super().__init__(config)
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProhpetNetPositionalEmbeddings(config)
self.embeddings_layer_norm = ProphetNetLayerNorm(config.hidden_size)
self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetEncoder
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetEncoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either input_ids or inputs_embeds has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
elif | |
If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#07:Check:vManage:Validate all services are up
print(' #07:Checking:vManage:Validate all services are up')
log_file_logger.info('#07:Check:vManage:Validate all services are up')
writeFile(report_file, '#07:Check:vManage:Validate all services are up\n\n')
try:
nms_data, nms_failed, check_result, check_analysis, check_action = criticalCheckseven()
if check_result == 'Failed':
critical_checks['#07:Check:vManage:Validate all services are up'] = [check_analysis, check_action]
log_file_logger.error('#07: Check result: {}'.format(check_result))
log_file_logger.error('#07: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#07: List of services that are enabled but not running:\n{}'.format(nms_failed))
log_file_logger.error('#07: Status of all services :\n{}\n'.format(nms_data))
writeFile(report_file, 'Result: ERROR -{}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#07: Check result: {}'.format(check_result))
log_file_logger.info('#07: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#07: Status of all the services: \n{}\n'.format(nms_data))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #07:Check:vManage:Validate all services are up. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#08:Check:vManage:Elasticsearch Indices version
print(' #08:Checking:vManage:Elasticsearch Indices version ')
log_file_logger.info('#08:Check:vManage:Elasticsearch Indices version ')
writeFile(report_file, '#08:Check:vManage:Elasticsearch Indices version \n\n')
try:
version_list, check_result, check_analysis, check_action = criticalCheckeight(version_tuple)
if check_result == 'Failed':
critical_checks['#08:Check:vManage:Elasticsearch Indices version'] = [ check_analysis, check_action]
log_file_logger.error('#08: Check result: {}'.format(check_result))
log_file_logger.error('#08: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#08: List of indices with older versions :\n{}\n'.format(version_list))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#08: Check result: {}'.format(check_result))
log_file_logger.info('#08: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #08:Check:vManage:Elasticsearch Indices version. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#09:Check:vManage:Evaluate incoming DPI data size
print(' #09:Checking:vManage:Evaluate incoming DPI data size')
log_file_logger.info('#09:Check:vManage:Evaluate incoming DPI data size')
writeFile(report_file, '#09:Check:vManage:Evaluate incoming DPI data size\n\n')
try:
es_indices_est = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid,'management/elasticsearch/index/size/estimate', args.vmanage_port, tokenid))
appr_estimate_ondeday, dpi_estimate_ondeday, check_result, check_analysis,check_action = criticalChecknine(es_indices_est, server_type, cluster_size, cpu_count, total_devices, dpi_status)
if check_result == 'Failed':
critical_checks['#09:Check:vManage:Evaluate incoming DPI data size'] = [ check_analysis, check_action]
log_file_logger.error('#09: Check result: {}'.format(check_result))
log_file_logger.error('#09: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#09: Daily incoming DPI data : {}'.format(dpi_estimate_ondeday))
log_file_logger.error('#09: Daily incoming Approute data : {}\n'.format(appr_estimate_ondeday))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#09: Check result: {}'.format(check_result))
log_file_logger.info('#09: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #09:Check:vManage:Evaluate incoming DPI data size. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#10:Check:vManage:NTP status across network
print(' #10:Checking:vManage:NTP status across network')
log_file_logger.info('#10:Check:vManage:NTP status across network')
writeFile(report_file, '#10:Check:vManage:NTP status across network\n\n')
try:
ntp_nonworking, check_result, check_analysis, check_action = criticalCheckten(version_tuple, controllers_info)
if check_result == 'Failed':
critical_checks['#10:Check:vManage:NTP status across network'] = [ check_analysis, check_action]
log_file_logger.error('#10: Check result: {}'.format(check_result))
log_file_logger.error('#10: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#10: Devices with invalid ntp associations: \n{}\n'.format(ntp_nonworking))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#10: Check result: {}'.format(check_result))
log_file_logger.info('#10: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #10:Check:vManage:NTP status across network. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#11:Check:Controllers:Validate vSmart/vBond CPU count for scale
print(' #11:Checking:Controllers:Validate vSmart/vBond CPU count for scale')
log_file_logger.info('#11:Check:Controllers:Validate vSmart/vBond CPU count for scale')
writeFile(report_file, '#11:Check:Controllers:Validate vSmart/vBond CPU count for scale\n\n')
try:
for vbond in vbond_info:
output = json.loads(getRequest( version_tuple,vmanage_lo_ip, jsessionid,'device/system/synced/status?deviceId={}'.format(vbond_info[vbond][1]),args.vmanage_port, tokenid))
total_cpu_count = int(output['data'][0]['total_cpu_count'])
vbond_info[vbond].append(total_cpu_count)
for vsmart in vsmart_info:
output = json.loads(getRequest( version_tuple,vmanage_lo_ip,jsessionid, 'device/system/synced/status?deviceId={}'.format(vsmart_info[vsmart][1]),args.vmanage_port,tokenid))
total_cpu_count = int(output['data'][0]['total_cpu_count'])
vsmart_info[vsmart].append(total_cpu_count)
failed_vbonds,failed_vsmarts,check_result,check_analysis, check_action = criticalCheckeleven(total_devices, vbond_info, vsmart_info)
if check_result == 'Failed':
critical_checks['#11:Check:Controllers:Validate vSmart/vBond CPU count for scale'] = [ check_analysis, check_action]
log_file_logger.error('#11: Check result: {}'.format(check_result))
log_file_logger.error('#11: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#11: vBonds with insufficient CPU count: \n{}'.format(failed_vbonds))
log_file_logger.error('#11: vSmarts with insufficient CPU count: \n{}'.format(failed_vsmarts))
log_file_logger.error('#11: All vBonds info with total_cpu_count: \n{}'.format(vbond_info))
log_file_logger.error('#11: All vSmarts info with total_cpu_count: \n{}\n'.format(vsmart_info))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#11: Check result: {}'.format(check_result))
log_file_logger.info('#11: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#11: All vBonds info with total_cpu_count: \n{}'.format(vbond_info))
log_file_logger.info('#11: All vSmarts info with total_cpu_count: \n{}\n'.format(vsmart_info))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #11:Check:Controllers:Validate vSmart/vBond CPU count for scale. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Warning Checks
print('\n**** Performing Warning checks\n')
warning_checks = {}
log_file_logger.info('*** Performing Warning Checks')
#12:Check:vManage:CPU Speed
print(' #12:Checking:vManage:CPU Speed')
log_file_logger.info('#12:Check:vManage:CPU Speed')
writeFile(report_file, '#12:Check:vManage:CPU Speed\n\n')
try:
check_result,check_analysis,check_action = warningCheckone(cpu_speed)
if check_result == 'Failed':
warning_checks['#12:Check:vManage:CPU Speed'] = [ check_analysis, check_action]
log_file_logger.error('#12: Check result: {}'.format(check_result))
log_file_logger.error('#12: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#12: CPU clock speed: {}\n'.format(cpu_speed))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#12: Check result: {}'.format(check_result))
log_file_logger.info('#12: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #12:Check:vManage:CPU Speed. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#13:Check:vManage:Network Card type
print(' #13:Checking:vManage:Network Card type')
log_file_logger.info('#13:Check:vManage:Network Card type')
writeFile(report_file, '#13:Check:vManage:Network Card type\n\n')
try:
eth_drivers, check_action, check_analysis, check_result = warningChecktwo()
if check_result == 'Failed':
warning_checks['#13:Check:vManage:Network Card type'] = [ check_analysis, check_action]
log_file_logger.error('#13: Check result: {}'.format(check_result))
log_file_logger.error('#13: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#13: Ethercardswith e1000 card types: {}\n'.format(eth_drivers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#13: Check result: {}'.format(check_result))
log_file_logger.info('#13: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #13:Check:vManage:Network Card type. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#14:Check:vManage:Backup status
print(' #14:Checking:vManage:Backup status')
log_file_logger.info('#14:Check:vManage:Backup status')
writeFile(report_file, '#14:Check:vManage:Backup status\n\n')
try:
date_time_obj, check_result, check_analysis, check_action = warningCheckthree()
if check_result == 'Failed':
warning_checks['#14:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#14: Check result: {}'.format(check_result))
log_file_logger.error('#14: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#14: Last Backup was performed on:{}\n'.format(date_time_obj))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#14: Check result: {}'.format(check_result))
log_file_logger.info('#14: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #14:Check:vManage:Backup status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#15:Check:vManage:Evaluate Neo4j performance
print(' #15:Checking:vManage:Evaluate Neo4j performance')
log_file_logger.info('#15:Check:vManage:Evaluate Neo4j performance')
writeFile(report_file, '#15:Check:vManage:Evaluate Neo4j performance\n\n')
try:
check_result, check_analysis, check_action = warningCheckfour()
if check_result == 'Failed':
warning_checks['#15:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#15: Check result: {}'.format(check_result))
log_file_logger.error('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#15: Check result: {}'.format(check_result))
log_file_logger.info('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #15:Check:vManage:Evaluate Neo4j performance. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#16:Check:vManage:Confirm there are no pending tasks
print(' #16:Checking:vManage:Confirm there are no pending tasks')
log_file_logger.info('#16:Check:vManage:Confirm there are no pending tasks')
writeFile(report_file, '#16:Check:vManage:Confirm there are no pending tasks\n\n')
try:
tasks = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid,'device/action/status/tasks', args.vmanage_port, tokenid))
tasks_running, check_result, check_analysis, check_action = warningCheckfive(tasks)
if check_result == 'Failed':
warning_checks['#16:Check:vManage:Confirm there are no pending tasks'] = [ check_analysis, check_action]
log_file_logger.error('#16: Check result: {}'.format(check_result))
log_file_logger.error('#16: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#16: Tasks still running: {}\n'.format(tasks_running))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#16: Check result: {}'.format(check_result))
log_file_logger.info('#16: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #16:Check:vManage:Confirm there are no pending tasks. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#17:Check:vManage:Validate there are no empty password users
print(' #17:Checking:vManage:Validate there are no empty password users')
log_file_logger.info('#17:Check:vManage:Validate there are no empty password users')
writeFile(report_file, '#17:Check:vManage:Validate there are no empty password users\n\n')
try:
users_emptypass, check_result, check_analysis, check_action = warningChecksix(version_tuple)
if check_result == 'Failed':
warning_checks['#17:Check:vManage:Validate there are no empty password users'] = [ check_analysis, check_action]
log_file_logger.error('#17: Check result: {}'.format(check_result))
log_file_logger.error('#17: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#17: Users with empty passwords: {}\n'.format(users_emptypass))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#17: Check result: {}'.format(check_result))
log_file_logger.info('#17: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #17:Check:vManage:Validate there are no empty password users. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#18:Check:Controllers:Controller versions
print(' #18:Checking:Controllers:Controller versions')
log_file_logger.info('#18:Check:Controllers:Controller versions')
writeFile(report_file, '#18:Check:Controllers:Controller versions\n\n')
try:
check_result, check_analysis, check_action = warningCheckseven(controllers_info)
if check_result == 'Failed':
warning_checks['#18:Check:Controllers:Controller versions'] = [ check_analysis, check_action]
log_file_logger.error('#18: Check result: {}'.format(check_result))
log_file_logger.error('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#18: Check result: {}'.format(check_result))
log_file_logger.info('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #18:Check:Controllers:Controller versions. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#19:Check:Controllers:Confirm Certificate Expiration Dates
print(' #19:Checking:Controllers:Confirm Certificate Expiration Dates')
log_file_logger.info('#19:Check:Controllers:Confirm Certificate Expiration Dates')
writeFile(report_file, '#19:Check:Controllers:Confirm Certificate Expiration Dates\n\n')
try:
controllers_exp, controllers_notexp, check_result, check_analysis, check_action = warningCheckeight(controllers_info)
if check_result == 'Failed':
warning_checks['#19:Check:Controllers:Confirm Certificate Expiration Dates'] = [ check_analysis, check_action]
log_file_logger.error('#19: Check result: {}'.format(check_result))
log_file_logger.error('#19: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#19: Controllers with certificates close to expiration: \n{}\n'.format(controllers_exp))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#19: Check result: {}'.format(check_result))
log_file_logger.info('#19: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #19:Check:Controllers:Confirm Certificate Expiration Dates. \n Please check | |
<filename>jamdict/kanjidic2.py
# -*- coding: utf-8 -*-
'''
Python library for manipulating <NAME>'s KanjiDic2
Latest version can be found at https://github.com/neocl/jamdict
This package uses the [EDICT][1] and [KANJIDIC][2] dictionary files.
These files are the property of the [Electronic Dictionary Research and Development Group][3], and are used in conformance with the Group's [licence][4].
[1]: http://www.csse.monash.edu.au/~jwb/edict.html
[2]: http://www.edrdg.org/kanjidic/kanjd2index.html
[3]: http://www.edrdg.org/
[4]: http://www.edrdg.org/edrdg/licence.html
References:
JMDict website:
http://www.csse.monash.edu.au/~jwb/edict.html
http://www.edrdg.org/kanjidic/kanjd2index.html
Python documentation:
https://docs.python.org/
PEP 257 - Python Docstring Conventions:
https://www.python.org/dev/peps/pep-0257/
@author: <NAME> <<EMAIL>>
@license: MIT
'''
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
########################################################################
import os
import logging
from lxml import etree
from chirptext import chio
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
def getLogger():
return logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Models
# ------------------------------------------------------------------------------
class KanjiDic2(object):
def __init__(self, file_version, database_version, date_of_creation):
"""
<!ELEMENT header (file_version,database_version,date_of_creation)>
<!--
The single header element will contain identification information
about the version of the file
-->
<!ELEMENT file_version (#PCDATA)>
<!--
This field denotes the version of kanjidic2 structure, as more
than one version may exist.
-->
<!ELEMENT database_version (#PCDATA)>
<!--
The version of the file, in the format YYYY-NN, where NN will be
a number starting with 01 for the first version released in a
calendar year, then increasing for each version in that year.
-->
<!ELEMENT date_of_creation (#PCDATA)>
<!--
The date the file was created in international format (YYYY-MM-DD).
-->"""
self.file_version = file_version
self.database_version = database_version
self.date_of_creation = date_of_creation
self.characters = []
def __len__(self):
return len(self.characters)
def __getitem__(self, idx):
return self.characters[idx]
class Character(object):
"""<!ELEMENT character (literal,codepoint, radical, misc, dic_number?, query_code?, reading_meaning?)*>"""
def __init__(self):
"""
"""
self.ID = None
self.literal = '' # <!ELEMENT literal (#PCDATA)> The character itself in UTF8 coding.
self.codepoints = [] # <!ELEMENT codepoint (cp_value+)>
self.radicals = [] # <!ELEMENT radical (rad_value+)>
self.stroke_count = None # first stroke_count in misc
self.grade = None # <misc>/<grade>
self.stroke_miscounts = [] # <misc>/stroke_count[1:]
self.variants = [] # <misc>/<variant>
self.freq = None # <misc>/<freq>
self.rad_names = [] # <misc>/<rad_name> a list of strings
self.jlpt = None # <misc>/<jlpt>
self.dic_refs = [] # DicRef[]
self.query_codes = [] # QueryCode[]
self.rm_groups = [] # reading_meaning groups
self.nanoris = [] # a list of strings
def __repr__(self):
meanings = []
for rm in self.rm_groups:
for m in rm.meanings:
if m.m_lang == '':
meanings.append(m.value)
return "{l}:{sc}:{meanings}".format(l=self.literal, sc=self.stroke_count, meanings=','.join(meanings))
def __str__(self):
return self.literal
def to_json(self):
return {'literal': self.literal,
'codepoints': [cp.to_json() for cp in self.codepoints],
'radicals': [r.to_json() for r in self.radicals],
'stroke_count': self.stroke_count,
'grade': self.grade if self.grade else '',
'stroke_miscounts': self.stroke_miscounts,
'variants': [v.to_json() for v in self.variants],
'freq': self.freq if self.freq else 0,
'rad_names': self.rad_names,
'jlpt': self.jlpt if self.jlpt else '',
'dic_refs': [r.to_json() for r in self.dic_refs],
'q_codes': [q.to_json() for q in self.query_codes],
'rm': [rm.to_json() for rm in self.rm_groups],
'nanoris': list(self.nanoris)}
class CodePoint(object):
def __init__(self, cp_type='', value=''):
"""<!ELEMENT cp_value (#PCDATA)>
<!--
The cp_value contains the codepoint of the character in a particular
standard. The standard will be identified in the cp_type attribute.
-->
"""
self.cid = None
self.cp_type = cp_type
self.value = value
def __repr__(self):
if self.r_type:
return "({t}) {v}".format(t=self.cp_type, v=self.value)
else:
return self.value
def __str__(self):
return self.value
def to_json(self):
return {'type': self.cp_type, 'value': self.value}
class Radical(object):
def __init__(self, rad_type='', value=''):
"""<!ELEMENT radical (rad_value+)>
<!ELEMENT rad_value (#PCDATA)>
<!--
The radical number, in the range 1 to 214. The particular
classification type is stated in the rad_type attribute.
-->"""
self.cid = None
self.rad_type = rad_type
self.value = value
def __repr__(self):
if self.rad_type:
return "({t}) {v}".format(t=self.rad_type, v=self.value)
else:
return self.value
def __str__(self):
return self.value
def to_json(self):
return {'type': self.rad_type, 'value': self.value}
class Variant(object):
def __init__(self, var_type='', value=''):
"""<!ELEMENT variant (#PCDATA)>
<!--
Either a cross-reference code to another kanji, usually regarded as a
variant, or an alternative indexing code for the current kanji.
The type of variant is given in the var_type attribute.
-->
<!ATTLIST variant var_type CDATA #REQUIRED>
<!--
The var_type attribute indicates the type of variant code. The current
values are:
jis208 - in JIS X 0208 - kuten coding
jis212 - in JIS X 0212 - kuten coding
jis213 - in JIS X 0213 - kuten coding
(most of the above relate to "shinjitai/kyuujitai"
alternative character glyphs)
deroo - De Roo number - numeric
njecd - Halpern NJECD index number - numeric
s_h - The Kanji Dictionary (Spahn & Hadamitzky) - descriptor
nelson_c - "Classic" Nelson - numeric
oneill - Japanese Names (O'Neill) - numeric
ucs - Unicode codepoint- hex
--> """
self.cid = None
self.var_type = var_type
self.value = value
def __repr__(self):
if self.var_type:
return "({t}) {v}".format(t=self.var_type, v=self.value)
else:
return self.value
def __str__(self):
return self.value
def to_json(self):
return {'type': self.var_type, 'value': self.value}
class DicRef(object):
def __init__(self, dr_type='', value='', m_vol='', m_page=''):
"""<!ELEMENT dic_ref (#PCDATA)>
<!--
Each dic_ref contains an index number. The particular dictionary,
etc. is defined by the dr_type attribute.
-->
<!ATTLIST dic_ref dr_type CDATA #REQUIRED>
<!--
The dr_type defines the dictionary or reference book, etc. to which
dic_ref element applies. The initial allocation is:
nelson_c - "Modern Reader's Japanese-English Character Dictionary",
edited by <NAME> (now published as the "Classic"
Nelson).
nelson_n - "The New Nelson Japanese-English Character Dictionary",
edited by <NAME>.
halpern_njecd - "New Japanese-English Character Dictionary",
edited by <NAME>.
halpern_kkld - "Kanji Learners Dictionary" (Kodansha) edited by
<NAME>.
heisig - "Remembering The Kanji" by <NAME>.
gakken - "A New Dictionary of Kanji Usage" (Gakken)
oneill_names - "Japanese Names", by <NAME>.
oneill_kk - "Essential Kanji" by <NAME>.
moro - "Daikanwajiten" compiled by Morohashi. For some kanji two
additional attributes are used: m_vol: the volume of the
dictionary in which the kanji is found, and m_page: the page
number in the volume.
henshall - "A Guide To Remembering Japanese Characters" by
<NAME>. Henshall.
sh_kk - "Kanji and Kana" by Spahn and Hadamitzky.
sakade - "A Guide To Reading and Writing Japanese" edited by
<NAME>.
jf_cards - Japanese Kanji Flashcards, by <NAME> and
<NAME>. (Series 1)
henshall3 - "A Guide To Reading and Writing Japanese" 3rd
edition, edited by <NAME> and <NAME>.
tutt_cards - Tuttle Kanji Cards, compiled by <NAME>.
crowley - "The Kanji Way to Japanese Language Power" by
<NAME>.
kanji_in_context - "Kanji in Context" by Nishiguchi and Kono.
busy_people - "Japanese For Busy People" vols I-III, published
by the AJLT. The codes are the volume.chapter.
kodansha_compact - the "Kodansha Compact Kanji Guide".
maniette - codes from Yves Maniette's "Les Kanjis dans la tete" French adaptation of Heisig.
-->"""
self.cid = None
self.dr_type = dr_type
self.value = value
self.m_vol = m_vol
self.m_page = m_page
def __repr__(self):
if self.dr_type:
return "({t}) {v}".format(t=self.dr_type, v=self.value)
else:
return self.value
def __str__(self):
return self.value
def to_json(self):
return {'type': self.dr_type,
'value': self.value,
"m_vol": self.m_vol,
"m_page": self.m_page}
class QueryCode(object):
def __init__(self, qc_type='', value='', skip_misclass=""):
"""<!ELEMENT query_code (q_code+)>
<!--
These codes contain information relating to the glyph, and can be used
for finding a required kanji. The type of code is defined by the
qc_type attribute.
-->
<!ELEMENT q_code (#PCDATA)>
<!--
The q_code contains the actual query-code value, according to the
qc_type attribute.
-->
<!ATTLIST q_code qc_type CDATA #REQUIRED>
<!--
The qc_type attribute defines the type of query code. The current values
are:
skip - Halpern's SKIP (System of Kanji Indexing by Patterns)
code. The format is n-nn-nn. See the KANJIDIC | |
'connectivity_func1':
g = geosclassic.MPDSOMPImageConnectivityFunction1
elif stat_type == 'connectivity_func2':
g = geosclassic.MPDSOMPImageConnectivityFunction2
elif stat_type == 'covariance_not_centered':
g = geosclassic.MPDSOMPImageCovarianceNotCentred
elif stat_type == 'transiogram':
g = geosclassic.MPDSOMPImageTransiogram
elif stat_type == 'variogram':
g = geosclassic.MPDSOMPImageVariogram
else:
print("ERROR: 'stat_type' not valid")
return None
err = g(input_image_c, output_image_c, var_index,
hx_min, hx_max, hx_step,
hy_min, hy_max, hy_step,
hz_min, hz_max, hz_step,
show_progress, nth)
# --- Retrieve output image "in python"
if err:
err_message = geosclassic.mpds_get_error_message(-err)
err_message = err_message.replace('\n', '')
print(err_message)
output_image = None
else:
output_image = img_C2py(output_image_c)
# Free memory on C side: input_image_c
geosclassic.MPDSFreeImage(input_image_c)
#geosclassic.MPDSFree (input_image_c)
geosclassic.free_MPDS_IMAGE(input_image_c)
# Free memory on C side: output_image_c
geosclassic.MPDSFreeImage(output_image_c)
#geosclassic.MPDSFree (output_image_c)
geosclassic.free_MPDS_IMAGE(output_image_c)
return output_image
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imgConnectivityGammaValue(
input_image,
var_index=0,
geobody_image_in_input=False,
complementary_set=False,
connect_type='connect_face'):
"""
Computes the Gamma value for one variable v of the input image:
Gamma = 1/m^2 * sum_{i=1,...,N} n(i)^2,
where
N is the number of connected components (geobodies)
of the set {v>0}
n(i) is the size (number of cells) in the i-th connected component
m is the size (number of cells) of the set {v>0},
i.e. the indicator variable I(x) = 1 iff v(x) > 0, is considered.
The Gamma value is a global indicator of the connectivity for the binary
image of variable I
See reference:
<NAME>, <NAME> (2013), Connectivity metrics for subsurface flow
and transport. Adv Water Resour 51:168–196.
https://doi.org/10.1016/j.advwatres.2011.12.001
The definition of adjacent cells, required to compute the connected components,
depends on the keyword argument connect_type:
- connect_type = connect_face (default):
two grid cells are adjacent if they have a common face
- connect_type = connect_face_edge:
two grid cells are adjacent if they have a common face
or a common edge
- connect_type = connect_face_edge_corner:
two grid cells are adjacent if they have a common face
or a common edge or a common corner
:param input_image: (Img class) input image
:param var_index: (int) index of the considered variable in input image
(default: 0)
:param geobody_image_in_input:
(bool)
- True: the input image is already the geobody image,
(variable 'var_index' is the geobody label)
in this case the keyword arguments 'complementary_set'
and 'connect_type' are ignored, the geobody image
is not computed
- False: the geobody image for the indicator variable {v>0}
(v variable of index 'var_index') is computed (default)
:param complementary_set:
(bool) the complementary indicator variable (IC = 1-I) is used
if True, indicator variable I is used if False (default)
:param connect_type: (string) indicates which definition of adjacent cells is used
to compute connected component (see above), available mode:
'connect_face' (default),
'connect_face_edge',
'connect_face_edge_corner'
:return: (float) Gamma value (see above)
"""
# --- Check and prepare
if var_index < 0 or var_index >= input_image.nv:
print("ERROR: 'var_index' not valid")
return None
if not geobody_image_in_input and connect_type not in ('connect_face', 'connect_face_edge', 'connect_face_edge_corner'):
print("ERROR: unknown 'connect_type'")
return None
# Compute geobody image
if not geobody_image_in_input:
im_geobody = imgGeobodyImage(input_image,
var_index,
bound_inf=0.0,
bound_sup=None,
bound_inf_excluded=True,
bound_sup_excluded=True,
complementary_set=complementary_set,
connect_type=connect_type)
iv = 0
else:
im_geobody = input_image
iv = var_index
# Compute Gamma value
if im_geobody is not None:
ngeo = int(im_geobody.val[iv].max())
gamma = np.sum(np.array([float(np.sum(im_geobody.val[iv] == i))**2 for i in np.arange(1, ngeo+1)])) / float(np.sum(im_geobody.val[iv] != 0))**2
else:
return None
return gamma
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imgConnectivityGammaCurves(
input_image,
threshold_min=None,
threshold_max=None,
nthreshold=50,
connect_type='connect_face',
show_progress=False,
nthreads=-1):
"""
Computes Gamma curves for an input image containing one variable v
(continuous).
For a threshold t:
- we consider the indicator variable I(t) defined as
I(t)(x) = 1 iif v(x) <= t
- we compute
gamma(t) = 1/m^2 * sum_{i=1,...,N} n(i)^2,
where
N is the number of connected components (geobodies)
of the set {I(t)=1}
n(i) is the size (number of cells) in the i-th connected component
m is the size (number of cells) of the set {I(t)=1}
- we compute also gammaC(t), the gamma value for the complementary set
{IC(t)=1} where IC(t)(x) = 1 - I(t)(x)
This is repeated for different threshold values t, which gives the curves
gamma(t) and gammaC(t).
The Gamma value gamma(t) (resp. gammaC(t)) is a global indicator of the
connectivity for the binary variable I(t) (resp. IC(t)).
See reference:
<NAME>, <NAME> (2013), Connectivity metrics for subsurface flow
and transport. Adv Water Resour 51:168–196.
https://doi.org/10.1016/j.advwatres.2011.12.001
The definition of adjacent cells, required to compute the connected components,
depends on the keyword argument connect_type:
- connect_type = connect_face (default):
two grid cells are adjacent if they have a common face
- connect_type = connect_face_edge:
two grid cells are adjacent if they have a common face
or a common edge
- connect_type = connect_face_edge_corner:
two grid cells are adjacent if they have a common face
or a common edge or a common corner
:param input_image: (Img class) input image, should have only one variable
:param threshold_min: (float) minimal value of the threshold,
default (None): min of the input variable values
minus 1.e-10
:param threshold_max: (float) maximal value of the threshold,
default (None): max of the input variable values
plus 1.e-10
:param nthreshold: (int) number of thresholds considered (default: 50),
the threshold values will be:
numpy.linspace(threshold_min, threshold_max, nthreshold)
:param connect_type: (string) indicates which definition of adjacent cells is used
(see above), available mode:
'connect_face' (default),
'connect_face_edge',
'connect_face_edge_corner'
:param show_progress: (bool) indicates if progress is displayed (True) or
not (False), default: False
:param nthreads: (int) number of thread(s) to use for program (C),
(nthreads = -n <= 0: for maximal number of threads except n,
but at least 1)
:return out_array: (numpy 2d-array of floats) array of shape (nthreshold, 3),
with the threshold values in the column of index 0, and
the corresponding gamma and gammaC values in the column
of index 1 and column of index 2, i.e.:
out_array[:,0]: numpy.linspace(threshold_min, threshold_max, nthreshold)
out_array[i,1]: gamma(out_array[i,0])
out_array[i,2]: gammaC(out_array[i,0])
"""
# --- Check and prepare
if input_image.nv != 1:
print("ERROR: input image must have one variable only")
return None
if threshold_min is None:
threshold_min = np.nanmin(input_image.val) - 1.e-10
if threshold_max is None:
threshold_max = np.nanmax(input_image.val) + 1.e-10
if threshold_min > threshold_max:
print("ERROR: 'threshold_min' is greater than 'threshold_max'")
return None
if nthreshold < 0:
print("ERROR: 'nthreshold' is negative")
return None
elif nthreshold == 1:
threshold_step = 1.0
else:
threshold_step = (threshold_max - threshold_min) / (nthreshold - 1)
if threshold_step < geosclassic.MPDS_EPSILON:
print("ERROR: threshold step too small")
return None
if connect_type not in ('connect_face', 'connect_face_edge', 'connect_face_edge_corner'):
print("ERROR: unknown 'connect_type'")
return None
# Set input image "in C"
input_image_c = img_py2C(input_image)
# Allocate output variable in C
threshold_c = geosclassic.new_real_array(nthreshold)
gamma_c = geosclassic.new_real_array(nthreshold)
gammaC_c = geosclassic.new_real_array(nthreshold)
# --- Set number of threads
if nthreads <= 0:
nth = max(os.cpu_count() + nthreads, 1)
else:
nth = nthreads
# --- Compute Gamma curves (launch C code)
if connect_type == 'connect_face':
g = geosclassic.MPDSOMPImageConnectivity6GlobalIndicatorCurve
elif connect_type == 'connect_face_edge':
g = geosclassic.MPDSOMPImageConnectivity18GlobalIndicatorCurve
elif connect_type == 'connect_face_edge_corner':
g = geosclassic.MPDSOMPImageConnectivity26GlobalIndicatorCurve
else:
print("ERROR: 'connect_type' not valid")
return None
err = g(input_image_c, nthreshold, threshold_min, threshold_step,
threshold_c, gamma_c, gammaC_c,
show_progress, nth)
# --- Retrieve output "in python"
if err:
err_message = geosclassic.mpds_get_error_message(-err)
err_message = err_message.replace('\n', '')
print(err_message)
out_array = None
else:
threshold = np.zeros(nthreshold)
geosclassic.mpds_get_array_from_real_vector(threshold_c, 0, threshold)
gamma = np.zeros(nthreshold)
geosclassic.mpds_get_array_from_real_vector(gamma_c, 0, gamma)
gammaC = np.zeros(nthreshold)
geosclassic.mpds_get_array_from_real_vector(gammaC_c, 0, gammaC)
out_array = np.array((threshold, gamma, gammaC)).reshape(3, -1).T
# Free memory on C side: input_image_c
geosclassic.MPDSFreeImage(input_image_c)
#geosclassic.MPDSFree (input_image_c)
geosclassic.free_MPDS_IMAGE(input_image_c)
# Free memory on C side: threshold_c, gamma_c, gammaC_c
geosclassic.MPDSFree(threshold_c)
geosclassic.MPDSFree(gamma_c)
geosclassic.MPDSFree(gammaC_c)
return out_array
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imgConnectivityEulerNumber(
input_image,
var_index=0,
geobody_image_in_input=False,
complementary_set=False,
nthreads=-1):
"""
Computes the Euler number defined related to one variable v of the input image,
defined for the 3D image grid as
E = number of connected components (geobodies)
+ number of "holes"
- number of "handles"
for the set {v>0}, i.e. the indicator variable I(x) = 1 iff v(x)>0, is
considered.
The Euler number E can be computed by the formula:
E = sum_{i=1,...,N} (e0(i) - e1(i) + e2(i) - e3(i)),
where
- N the number of connected component (geobodies) in the set {I=1}
- for a geobody i:
e0(i) : the number of vertices (dim 0) in the i-th | |
self.kidsWithPerson(a)
if a.house.town == b.house.town:
peopleToMove = [b]
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
newOcc = len(peopleToMove)
oldOcc = len(a.house.occupants)
pReloc = self.relocationProb(newOcc, oldOcc, a.house.initialOccupants)
r = random.random()
if ( r > pReloc ):
targetHouse = a.house
#peopleToMove = [b]
#peopleToMove += self.kidsWithPartner(a)
peopleToMove = [b]
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
if targetHouse == b.house:
print('Target house equal to departure house in 1')
for i in peopleToMove:
if i in targetHouse.occupants:
print('Error in Join Spouses 1')
print(peopleToMove.index(i))
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses 1')
self.totalRelocations += 1
self.marriageRelocations += 1
b.independentStatus = True
b.elderlyWithFamily = False
b.yearIndependent = self.year
self.movePeopleIntoChosenHouse(targetHouse, b.house, peopleToMove, 'joiningSpouses (2)')
continue
else:
destination = a.house.town
peopleNotToMove = [x for x in list(a.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != a]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove = [x for x in list(a.house.occupants) if x not in peopleNotToMove]
peopleToMove.append(b)
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses (2)')
self.totalRelocations += 1
self.marriageRelocations += 1
b.independentStatus = True
b.elderlyWithFamily = False
b.yearIndependent = self.year
self.findNewHouse(peopleToMove, destination, 'joiningSpouses (3)')
continue
else:
if person.independentStatus == True:
aTownCare = self.computeTownSocialAttraction(person, 'only agent independent')
bTownCare = self.computeTownSocialAttraction(person.partner, 'only agent independent')
else:
bTownCare = self.computeTownSocialAttraction(person, 'only partner independent')
aTownCare = self.computeTownSocialAttraction(person.partner, 'only partner independent')
if a.income*aTownCare > b.income*bTownCare:
peopleToMove = [b]
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
newOcc = len(peopleToMove)
oldOcc = len(a.house.occupants)
pReloc = self.relocationProb(newOcc, oldOcc, a.house.initialOccupants)
r = random.random()
if ( r > pReloc ):
targetHouse = a.house
#peopleToMove = [b]
#peopleToMove += self.kidsWithPartner(a)
peopleToMove = [b]
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
if b.status == 'employed':
self.leaveJob(b)
if targetHouse == b.house:
print('Target house equal to departure house in 2')
for i in peopleToMove:
if i in targetHouse.occupants:
print('Error in Join Spouses 2')
print(peopleToMove.index(i))
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses 2')
self.totalRelocations += 1
self.marriageRelocations += 1
b.independentStatus = True
b.elderlyWithFamily = False
b.yearIndependent = self.year
self.movePeopleIntoChosenHouse(targetHouse, b.house, peopleToMove, 'joiningSpouses (4)')
continue
else:
destination = a.house.town
# peopleToMove = list(a.house.occupants)
peopleNotToMove = [x for x in list(a.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != a]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove = [x for x in list(a.house.occupants) if x not in peopleNotToMove]
peopleToMove.append(b)
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
# peopleToMove = [a, b]
if b.status == 'employed':
self.leaveJob(b)
# peopleToMove += self.bringTheKids(a)
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses (3)')
self.totalRelocations += 1
self.marriageRelocations += 1
b.independentStatus = True
b.elderlyWithFamily = False
b.yearIndependent = self.year
self.findNewHouse(peopleToMove, destination, 'joiningSpouses (5)')
continue
else:
destination = b.house.town
peopleToMove = [b]
peopleToMove += [x for x in b.children if x.dead == False and x.house == b.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove = [x for x in list(a.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != a]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove += [x for x in list(a.house.occupants) if x not in peopleNotToMove]
#peopleToMove += self.kidsWithPartner(a)
# peopleToMove = [b, a]
if a.status == 'employed':
self.leaveJob(a)
# peopleToMove += self.bringTheKids(a)
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses (4)')
self.totalRelocations += 1
self.marriageRelocations += 1
b.independentStatus = True
b.elderlyWithFamily = False
b.yearIndependent = self.year
self.findNewHouse(peopleToMove, destination, 'joiningSpouses (6)')
continue
# 3rd case: both living alone
elif person.independentStatus + person.partner.independentStatus == 2:
if check == 1:
print('Error: couple already joined')
else:
check = 1
#childrenWithPartner = self.kidsWithPartner(person)
#childrenWithPerson = self.kidsWithPerson(person)
newOcc1 = len(person.partner.house.occupants)
oldOcc1 = len(person.house.occupants)
totOcc1 = float(newOcc1 + oldOcc1)
ratio1 = totOcc1/float(person.house.initialOccupants)
newOcc2 = len(person.house.occupants)
oldOcc2 = len(person.partner.house.occupants)
totOcc2 = float(newOcc2 + oldOcc2)
ratio2 = totOcc2/float(person.partner.house.initialOccupants)
# If in the same town: move into bigger house
if ( person.house.town == person.partner.house.town ):
if ( ratio1 < ratio2 ):
a = person
b = person.partner
newOcc = newOcc1
else:
b = person
a = person.partner
newOcc = newOcc2
oldOcc = len(a.house.occupants)
pReloc = self.relocationProb(newOcc, oldOcc, a.house.initialOccupants)
r = random.random()
if ( r > pReloc ):
targetHouse = a.house
# peopleToMove = [b]
# peopleToMove += self.kidsWithPartner(a)
peopleNotToMove = [x for x in list(b.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != b]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove = [x for x in list(b.house.occupants) if x not in peopleNotToMove]
for i in peopleToMove:
if i in targetHouse.occupants:
print('Error in Join Spouses 3')
print(peopleToMove.index(i))
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) + ' times in Join Spouses 3')
self.totalRelocations += 1
self.marriageRelocations += 1
self.movePeopleIntoChosenHouse(targetHouse, b.house, peopleToMove, 'joiningSpouses (7)')
continue
else:
# peopleToMove = [a, b]
# peopleToMove += self.bringTheKids(a)
peopleNotToMove = [x for x in list(a.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != a]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove = [x for x in list(a.house.occupants) if x not in peopleNotToMove]
peopleNotToMove = [x for x in list(b.house.occupants) if (x.justMarried != None or x.independentStatus == True) and x != b]
children = []
for i in peopleNotToMove:
children += [x for x in i.children if x.dead == False and x.house == i.house and
x.justMarried == None and x.independentStatus == False]
peopleNotToMove += children
peopleToMove.extend([x for x in list(b.house.occupants) if x not in peopleNotToMove])
for i in peopleToMove:
repetitions = peopleToMove.count(i)
if repetitions > 1:
print('Person ' + str(i.id) + ' is counted ' + str(repetitions) | |
import math
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from collections.abc import Iterable
from ..initializer import initialize_from_cfg
from ...extensions import DeformableConvInOne
from ..normalize import build_norm_layer, parse_deprecated_bn_style
from ...utils.checkpoint import fully_checkpoint_sequential
__all__ = [
'SENet',
'senet154',
'se_resnet50',
'se_resnet101',
'se_resnet152',
'se_resnext50_32x4d',
'se_resnext101_32x4d',
'se_resnext101_64x4d']
class AdaptiveAvgPool2d(nn.Module):
def __init__(self, output_size):
super(AdaptiveAvgPool2d, self).__init__()
self.output_size = output_size
def extra_repr(self):
return 'output_size={}'.format(self.output_size)
def forward(self, x):
data_type = x.dtype
out = F.adaptive_avg_pool2d(x.float(), self.output_size)
out = out.to(data_type)
return out
def Sigmoid_Activate(input):
return input * F.sigmoid(input)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""Base class for bottlenecks that implements `forward()` method"""
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
#out = self.relu(out)
out = Sigmoid_Activate(out)
out = self.conv2(out)
out = self.norm2(out)
out = Sigmoid_Activate(out)
#out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
#out = self.relu(out)
out = Sigmoid_Activate(out)
return out
class SEBottleneck(Bottleneck):
"""Bottleneck for SENet154"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None,
deformable=False,
normalize={'type': 'solo_bn'}):
super(SEBottleneck, self).__init__()
self.norm1_name, norm1 = build_norm_layer(planes * 2, normalize, 1)
self.norm2_name, norm2 = build_norm_layer(planes * 4, normalize, 2)
self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)
key_conv = nn.Conv2d
if deformable:
key_conv = DeformableConvInOne
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = key_conv(planes * 2, planes * 4, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None,
deformable=False,
normalize={'type': 'solo_bn'}):
super(SEResNetBottleneck, self).__init__()
self.norm1_name, norm1 = build_norm_layer(planes, normalize, 1)
self.norm2_name, norm2 = build_norm_layer(planes, normalize, 2)
self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)
key_conv = nn.Conv2d
if deformable:
key_conv = DeformableConvInOne
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.add_module(self.norm1_name, norm1)
self.conv2 = key_conv(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""ResNeXt bottleneck type C with a Squeeze-and-Excitation module"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4, deformable=False,
normalize={'type': 'solo_bn'}):
super(SEResNeXtBottleneck, self).__init__()
key_conv = nn.Conv2d
if deformable:
key_conv = DeformableConvInOne
width = math.floor(planes * (base_width / 64)) * groups
self.norm1_name, norm1 = build_norm_layer(width, normalize, 1)
self.norm2_name, norm2 = build_norm_layer(width, normalize, 2)
self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)
self.add_module(self.norm1_name, norm1)
self.conv2 = key_conv(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
"""
"""
def __init__(self, block, layers, groups, reduction,
out_layers, out_strides,
frozen_layers=None,
deformable=None,
initializer=None,
inplanes=128,
bn=None,
normalize={'type': 'freeze_bn'},
checkpoint=False,
input_3x3=True, downsample_kernel_size=3,
downsample_padding=1):
"""
Arguments:
- block (``nn.Module``): ``Bottleneck`` class::
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
- layers (:obj:`list` of :obj:`int`): Number of residual blocks for 4 layers of the
network (layer1...layer4).
- groups (:obj:`int`): Number of groups for the 3x3 convolution in each::
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
- reduction (:obj:`int`): Reduction ratio for Squeeze-and-Excitation modules::
- For all models: 16
- dropout_p (:obj:`float` or None): Drop probability for the Dropout layer::
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
- inplanes (:obj:`int`): Number of input channels for layer1::
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
- input_3x3 (:obj:`bool`): If :obj:`True`, use three 3x3
convolutions instead of::
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
- downsample_kernel_size (:obj:`int`): Kernel size
for downsampling convolutions in layer2, layer3 and layer4::
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
- downsample_padding (:obj:`int`): Padding for downsampling
convolutions in layer2, layer3 and layer4::
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
- bn (:obj:`dict`): Deprecated (see normalize). Config of BatchNorm (see Configuration#Normalization).
- normalize (:obj:`dict`): Config of Normalization Layer (see Configuration#Normalization).
"""
super(SENet, self).__init__()
if bn is not None:
normalize = parse_deprecated_bn_style(bn)
self.segments = self.get_segments(checkpoint)
self.inplanes = inplanes
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)),
build_norm_layer(64, normalize, 1),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
build_norm_layer(64, normalize, 2),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)),
build_norm_layer(inplanes, normalize, 3),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False)),
build_norm_layer(inplanes, normalize, 1),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0,
deformable=deformable,
normalize=normalize
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding,
deformable=deformable,
normalize=normalize
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding,
deformable=deformable,
normalize=normalize
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding,
deformable=deformable,
normalize=normalize
)
if frozen_layers is not None and len(frozen_layers) > 0:
assert min(frozen_layers) >= 0, frozen_layers
assert max(frozen_layers) <= 4, frozen_layers
assert min(out_layers) >= 0, out_layers
assert max(out_layers) <= 4, out_layers
self.out_layers = out_layers
self.out_strides = out_strides
self.frozen_layers = frozen_layers
midplanes = [64, 256, 512, 1024, 2048]
self.out_planes = [midplanes[i] for i in self.out_layers]
if initializer is not None:
initialize_from_cfg(self, initializer)
# It's IMPORTANT when you want to freeze part of your backbone.
# ALWAYS remember freeze layers in __init__ to avoid passing freezed params
# to optimizer
self.freeze_layer()
def get_segments(self, checkpoint):
if isinstance(checkpoint, Iterable):
segments = [int(x) for x in checkpoint]
else:
segments = [int(checkpoint)] * 5
return segments
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0,
deformable=False,
normalize={'type': 'solo_bn'}):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=downsample_kernel_size, stride=stride,
padding=downsample_padding, bias=False),
build_norm_layer(planes * block.expansion, normalize)[1]
)
layers = []
layers.append(block(self.inplanes, planes,
groups, reduction, stride, downsample,
normalize=normalize))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups,
reduction, normalize=normalize))
if deformable:
layers[-1] = block(self.inplanes, planes, groups,
reduction, deformable=deformable, normalize=normalize)
return nn.Sequential(*layers)
def get_outplanes(self):
"""
"""
return self.out_planes
def get_outstrides(self):
return self.out_strides
def train(self, mode=True):
"""
Sets the module in training mode.
This has any effect only on modules such as Dropout or BatchNorm.
Returns:
Module: self
"""
self.training = mode
for module in self.children():
module.train(mode)
self.freeze_layer()
return self
def freeze_layer(self):
layers = [self.layer0, self.layer1, self.layer2, self.layer3, self.layer4]
for layer_idx in self.frozen_layers:
layer = layers[layer_idx]
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def checkpoint_fwd(self, layer, input, segments=2):
"""checkpoint forward"""
# Make sure that the input to checkpoint have requires_grad=True, so that
# the autograd can take care of the checkpointed part of model
if not input.requires_grad:
input.detach_()
input.requires_grad = True
return fully_checkpoint_sequential(layer, segments, input)
def forward(self, input):
"""
Arguments:
- input (:obj:`dict`): output of
:class:`~pod.datasets.base_dataset.BaseDataset`
Returns:
- out (:obj:`dict`):
Output example::
{
'features': [], # list of tenosr
'strides': | |
<reponame>cmadjar/datalad-crawler
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Nodes to interact with annex -- initiate a new dataset or operate with existing one
via Annexificator class, which could be used to add files, checkout branches, etc
"""
import os
import re
from os import listdir
from os.path import join as opj, exists, isabs, lexists, curdir, realpath
from os.path import split as ops
from os.path import isdir
from os.path import relpath
from os import unlink
from humanize import naturalsize
from six import iteritems
from six import string_types
from distutils.version import LooseVersion
from datalad.version import __version__
from datalad.api import add_archive_content
from datalad.api import clean
from datalad.utils import rmtree, updated
from datalad.utils import lmtime
from datalad.utils import find_files
from datalad.utils import auto_repr
from datalad.utils import try_multiple
from datalad.utils import assure_list
from datalad.downloaders.providers import Providers
from datalad.api import create
from datalad.support.gitrepo import GitRepo, _normalize_path
from datalad.support.annexrepo import AnnexRepo
from datalad.support.stats import ActivityStats
from datalad.support.versions import get_versions
from datalad.support.exceptions import AnnexBatchCommandError
from datalad.support.network import get_url_straight_filename
from datalad.support.network import get_url_disposition_filename
from datalad import cfg
from datalad.cmd import get_runner
from datalad_crawler.pipeline import initiate_pipeline_config
from datalad_crawler.dbs.files import PhysicalFileStatusesDB
from datalad_crawler.dbs.files import JsonFileStatusesDB
from datalad_crawler.dbs.versions import SingleVersionDB
from datalad.customremotes.base import init_datalad_remote
from datalad.dochelpers import exc_str
from logging import getLogger
lgr = getLogger('datalad.crawl.annex')
_runner = get_runner()
_call = _runner.call
_run = _runner.run
# TODO: make use of datalad_stats
@auto_repr
class initiate_dataset(object):
"""Action to initiate a dataset following one of the known templates
"""
def __init__(self, template, dataset_name=None,
path=None, branch=None, backend=None,
template_func=None, template_kwargs=None,
add_to_super='auto',
data_fields=[], add_fields={}, existing=None):
"""
Parameters
----------
template : str
Which template (probably matching the superdataset name) to use.
TODO: refer to specs of template that it might understand some
arguments encoded, such as #func=custom_pipeline
template_func : str, optional
Explicitly specify the function name within template module
template_kwargs: dict, optional
Keyword arguments to pass into the `template_func`.
dataset_name : str, optional
Name of the dataset. If None, reacts on 'dataset_name' in data
path : str, optional
Path were to initiate the dataset. If not specified, would use
default path for all new datasets (DATALAD_CRAWL_COLLECTIONSPATH)
branch : str, optional
Which branch to initialize
backend : str, optional
Supported by git-annex backend. By default (if None specified),
it is MD5E backend to improve compatibility with filesystems
having a relatively small limit for a maximum path size
add_to_super : bool or 'auto', optional
Add to super-dataset
data_fields : list or tuple of str, optional
Additional fields from data to store into configuration for
the dataset crawling options -- would be passed into the corresponding
crawler template
add_fields : dict, optional
Dictionary of additional fields to store in the crawler configuration
to be passed into the template
existing : ('skip', 'raise', 'adjust', 'replace', 'crawl'), optional
Behavior if encountering existing dataset
"""
# TODO: add_fields might not be flexible enough for storing more elaborate
# configurations for e.g. "basic" template
self.template = template
self.template_func = template_func
self.template_kwargs = template_kwargs
self.dataset_name = dataset_name
self.data_fields = data_fields
self.add_fields = add_fields
self.existing = existing
self.path = path
self.branch = branch
# TODO: backend -> backends (https://github.com/datalad/datalad/issues/358)
self.backend = backend
self.add_to_super = add_to_super
def _initiate_dataset(self, path, name):
lgr.info("Initiating dataset %s" % name)
if self.branch is not None:
raise NotImplementedError("Disabled for now")
# because all the 'create' magic is stuffed into the constructor ATM
# we need first to initiate a git repository
git_repo = GitRepo(path, create=True)
# since we are initiating, that branch shouldn't exist yet, thus --orphan
git_repo.checkout(self.branch, options=["--orphan"])
# TODO: RF whenevever create becomes a dedicated factory/method
# and/or branch becomes an option for the "creator"
backend = self.backend or cfg.obtain('datalad.crawl.default_backend', default='MD5E')
direct = cfg.obtain('datalad.crawl.init_direct', default=False)
if direct:
raise NotImplementedError("Disabled for now to init direct mode ones")
ds = create(
path=path,
force=False,
# no_annex=False, # TODO: add as an arg
# Passing save arg based on backend was that we need to save only if
# custom backend was specified, but now with dataset id -- should always save
# save=not bool(backend),
# annex_version=None,
annex_backend=backend,
#git_opts=None,
#annex_opts=None,
#annex_init_opts=None
)
if self.add_to_super:
# place hack from 'add-to-super' times here
# MIH: tests indicate that this wants to discover any dataset above
# not just true superdatasets
sds = ds.get_superdataset(registered_only=False)
if sds is not None:
lgr.debug("Adding %s as a subdataset to %s", ds, sds)
sds.add(ds.path, save=False)
# this leaves the subdataset staged in the parent
elif str(self.add_to_super) != 'auto':
raise ValueError(
"Was instructed to add to super dataset but no super dataset "
"was found for %s" % ds
)
return ds
def _save_crawl_config(self, dataset_path, data):
kwargs = self.template_kwargs or {}
# update with those from data
kwargs.update({f: data[f] for f in self.data_fields})
# additional options given as a dictionary
kwargs.update(self.add_fields)
return initiate_pipeline_config(
template=self.template,
template_func=self.template_func,
template_kwargs=kwargs,
path=dataset_path,
commit=True
)
def __call__(self, data={}):
# figure out directory where create such a dataset
dataset_name = self.dataset_name or data.get('dataset_name', None)
dataset_path = opj(os.curdir, dataset_name) \
if self.path is None \
else self.path
data_updated = updated(data, {'dataset_path': dataset_path,
'dataset_name': dataset_name})
lgr.debug("Request to initialize a dataset %s at %s", dataset_name, dataset_path)
init = True
if exists(dataset_path):
# TODO: config crawl.subdataset.existing = skip|raise|replace|crawl|adjust
# TODO: config crawl.subdataset.crawl_new = false|true
existing = self.existing or 'skip'
if existing == 'skip':
lgr.info("Skipping dataset %s since already exists" % dataset_name)
yield data_updated
return
elif existing == 'raise':
raise RuntimeError("%s already exists" % dataset_path)
elif existing == 'replace':
_call(rmtree, dataset_path)
elif existing == 'adjust':
# E.g. just regenerate configs/meta
init = False
else: # TODO: 'crawl' ;)
raise ValueError(self.existing)
if init:
_call(self._initiate_dataset, dataset_path, dataset_name)
_call(self._save_crawl_config, dataset_path, data)
yield data_updated
class Annexificator(object):
"""A helper which would encapsulate the operation of adding new content to git/annex repo
If 'filename' field was not found in the data, filename from the URL
gets taken.
'path' field of data (if present) is used to define path within the subdirectory.
Should be relative. If absolute found -- ValueError is raised
"""
def __init__(self, path=None,
no_annex=False,
mode='full', options=None,
special_remotes=[],
allow_dirty=False, yield_non_updated=False,
auto_finalize=True,
statusdb=None,
skip_problematic=False,
largefiles=None,
**kwargs):
"""
Note that always_commit=False for the used AnnexRepo to minimize number
of unnecessary commits
Parameters
----------
mode : str of {'full', 'fast', 'relaxed'}
What mode of download to use for the content. In "full" content gets downloaded
and checksummed (according to the backend), 'fast' and 'relaxed' are just original
annex modes where no actual download is performed and the files' keys are their URLs
no_annex : bool
Assume/create a simple Git repository, without git-annex
special_remotes : list, optional
List of custom special remotes to initialize and enable by default
yield_non_updated : bool, optional
Either to yield original data (with filepath) if load was not updated in annex
auto_finalize : bool, optional
In some cases, if e.g. adding a file in place of an existing directory or placing
a file under a directory for which there is a file atm, we would 'finalize' before
carrying out the operation
statusdb : {'json', 'fileattr'}, optional
DB of file statuses which will be used to figure out if remote load has changed.
If None, no statusdb will be used so Annexificator will process every given URL
as if it leads to new content. 'json' -- JsonFileStatusesDB will
be used which will store information about each provided file/url into a JSON file.
'fileattr' -- PhysicalFileStatusesDB will be used to decide based on information in
annex and file(s) mtime on the disk.
Note that statusdb "lives" within the branch, so switch_branch would drop existing DB (which
should get committed within the branch) and would create a new one if DB is requested
again.
skip_problematic: bool, optional
If True, it would not raise an exception if e.g. url is 404 or forbidden -- then just
nothing is yielded, and effectively that entry is skipped
largefiles: str, optional
A setting to pass | |
<reponame>pulumi/pulumi-alicloud<filename>sdk/python/pulumi_alicloud/config/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AssumeRole',
'Endpoints',
]
@pulumi.output_type
class AssumeRole(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "roleArn":
suggest = "role_arn"
elif key == "sessionExpiration":
suggest = "session_expiration"
elif key == "sessionName":
suggest = "session_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssumeRole. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssumeRole.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssumeRole.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
role_arn: str,
policy: Optional[str] = None,
session_expiration: Optional[int] = None,
session_name: Optional[str] = None):
pulumi.set(__self__, "role_arn", role_arn)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if session_expiration is not None:
pulumi.set(__self__, "session_expiration", session_expiration)
if session_name is not None:
pulumi.set(__self__, "session_name", session_name)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def policy(self) -> Optional[str]:
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="sessionExpiration")
def session_expiration(self) -> Optional[int]:
return pulumi.get(self, "session_expiration")
@property
@pulumi.getter(name="sessionName")
def session_name(self) -> Optional[str]:
return pulumi.get(self, "session_name")
@pulumi.output_type
class Endpoints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "brainIndustrial":
suggest = "brain_industrial"
elif key == "dmsEnterprise":
suggest = "dms_enterprise"
elif key == "hcsSgw":
suggest = "hcs_sgw"
elif key == "rKvstore":
suggest = "r_kvstore"
elif key == "wafOpenapi":
suggest = "waf_openapi"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in Endpoints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
Endpoints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
Endpoints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
actiontrail: Optional[str] = None,
adb: Optional[str] = None,
alb: Optional[str] = None,
alidfs: Optional[str] = None,
alidns: Optional[str] = None,
alikafka: Optional[str] = None,
apigateway: Optional[str] = None,
arms: Optional[str] = None,
bastionhost: Optional[str] = None,
brain_industrial: Optional[str] = None,
bssopenapi: Optional[str] = None,
cas: Optional[str] = None,
cassandra: Optional[str] = None,
cbn: Optional[str] = None,
cddc: Optional[str] = None,
cdn: Optional[str] = None,
cds: Optional[str] = None,
clickhouse: Optional[str] = None,
cloudauth: Optional[str] = None,
cloudphone: Optional[str] = None,
cloudsso: Optional[str] = None,
cms: Optional[str] = None,
config: Optional[str] = None,
cr: Optional[str] = None,
cs: Optional[str] = None,
datahub: Optional[str] = None,
dataworkspublic: Optional[str] = None,
dbfs: Optional[str] = None,
dcdn: Optional[str] = None,
ddosbgp: Optional[str] = None,
ddoscoo: Optional[str] = None,
dds: Optional[str] = None,
devopsrdc: Optional[str] = None,
dg: Optional[str] = None,
dm: Optional[str] = None,
dms_enterprise: Optional[str] = None,
dns: Optional[str] = None,
drds: Optional[str] = None,
dts: Optional[str] = None,
eais: Optional[str] = None,
eci: Optional[str] = None,
ecs: Optional[str] = None,
ehpc: Optional[str] = None,
eipanycast: Optional[str] = None,
elasticsearch: Optional[str] = None,
emr: Optional[str] = None,
ens: Optional[str] = None,
ess: Optional[str] = None,
eventbridge: Optional[str] = None,
fc: Optional[str] = None,
fnf: Optional[str] = None,
ga: Optional[str] = None,
gds: Optional[str] = None,
gpdb: Optional[str] = None,
gwsecd: Optional[str] = None,
hbr: Optional[str] = None,
hcs_sgw: Optional[str] = None,
hitsdb: Optional[str] = None,
imm: Optional[str] = None,
imp: Optional[str] = None,
ims: Optional[str] = None,
iot: Optional[str] = None,
kms: Optional[str] = None,
kvstore: Optional[str] = None,
location: Optional[str] = None,
log: Optional[str] = None,
market: Optional[str] = None,
maxcompute: Optional[str] = None,
mhub: Optional[str] = None,
mns: Optional[str] = None,
mscopensubscription: Optional[str] = None,
mse: Optional[str] = None,
nas: Optional[str] = None,
ons: Optional[str] = None,
onsproxy: Optional[str] = None,
oos: Optional[str] = None,
opensearch: Optional[str] = None,
oss: Optional[str] = None,
ots: Optional[str] = None,
polardb: Optional[str] = None,
privatelink: Optional[str] = None,
pvtz: Optional[str] = None,
quickbi: Optional[str] = None,
quotas: Optional[str] = None,
r_kvstore: Optional[str] = None,
ram: Optional[str] = None,
rds: Optional[str] = None,
redisa: Optional[str] = None,
resourcemanager: Optional[str] = None,
resourcesharing: Optional[str] = None,
ros: Optional[str] = None,
sas: Optional[str] = None,
scdn: Optional[str] = None,
sddp: Optional[str] = None,
serverless: Optional[str] = None,
servicemesh: Optional[str] = None,
sgw: Optional[str] = None,
slb: Optional[str] = None,
sts: Optional[str] = None,
swas: Optional[str] = None,
vod: Optional[str] = None,
vpc: Optional[str] = None,
vs: Optional[str] = None,
waf: Optional[str] = None,
waf_openapi: Optional[str] = None):
if actiontrail is not None:
pulumi.set(__self__, "actiontrail", actiontrail)
if adb is not None:
pulumi.set(__self__, "adb", adb)
if alb is not None:
pulumi.set(__self__, "alb", alb)
if alidfs is not None:
pulumi.set(__self__, "alidfs", alidfs)
if alidns is not None:
pulumi.set(__self__, "alidns", alidns)
if alikafka is not None:
pulumi.set(__self__, "alikafka", alikafka)
if apigateway is not None:
pulumi.set(__self__, "apigateway", apigateway)
if arms is not None:
pulumi.set(__self__, "arms", arms)
if bastionhost is not None:
pulumi.set(__self__, "bastionhost", bastionhost)
if brain_industrial is not None:
pulumi.set(__self__, "brain_industrial", brain_industrial)
if bssopenapi is not None:
pulumi.set(__self__, "bssopenapi", bssopenapi)
if cas is not None:
pulumi.set(__self__, "cas", cas)
if cassandra is not None:
pulumi.set(__self__, "cassandra", cassandra)
if cbn is not None:
pulumi.set(__self__, "cbn", cbn)
if cddc is not None:
pulumi.set(__self__, "cddc", cddc)
if cdn is not None:
pulumi.set(__self__, "cdn", cdn)
if cds is not None:
pulumi.set(__self__, "cds", cds)
if clickhouse is not None:
pulumi.set(__self__, "clickhouse", clickhouse)
if cloudauth is not None:
pulumi.set(__self__, "cloudauth", cloudauth)
if cloudphone is not None:
pulumi.set(__self__, "cloudphone", cloudphone)
if cloudsso is not None:
pulumi.set(__self__, "cloudsso", cloudsso)
if cms is not None:
pulumi.set(__self__, "cms", cms)
if config is not None:
pulumi.set(__self__, "config", config)
if cr is not None:
pulumi.set(__self__, "cr", cr)
if cs is not None:
pulumi.set(__self__, "cs", cs)
if datahub is not None:
pulumi.set(__self__, "datahub", datahub)
if dataworkspublic is not None:
pulumi.set(__self__, "dataworkspublic", dataworkspublic)
if dbfs is not None:
pulumi.set(__self__, "dbfs", dbfs)
if dcdn is not None:
pulumi.set(__self__, "dcdn", dcdn)
if ddosbgp is not None:
pulumi.set(__self__, "ddosbgp", ddosbgp)
if ddoscoo is not None:
pulumi.set(__self__, "ddoscoo", ddoscoo)
if dds is not None:
pulumi.set(__self__, "dds", dds)
if devopsrdc is not None:
pulumi.set(__self__, "devopsrdc", devopsrdc)
if dg is not None:
pulumi.set(__self__, "dg", dg)
if dm is not None:
pulumi.set(__self__, "dm", dm)
if dms_enterprise is not None:
pulumi.set(__self__, "dms_enterprise", dms_enterprise)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if drds is not None:
pulumi.set(__self__, "drds", drds)
if dts is not None:
pulumi.set(__self__, "dts", dts)
if eais is not None:
pulumi.set(__self__, "eais", eais)
if eci is not None:
pulumi.set(__self__, "eci", eci)
if ecs is not None:
pulumi.set(__self__, "ecs", ecs)
if ehpc is not None:
pulumi.set(__self__, "ehpc", ehpc)
if eipanycast is not None:
pulumi.set(__self__, "eipanycast", eipanycast)
if elasticsearch is not None:
pulumi.set(__self__, "elasticsearch", elasticsearch)
if emr is not None:
pulumi.set(__self__, "emr", emr)
if ens is not None:
pulumi.set(__self__, "ens", ens)
if ess is not None:
pulumi.set(__self__, "ess", ess)
if eventbridge is not None:
pulumi.set(__self__, "eventbridge", eventbridge)
if fc is not None:
pulumi.set(__self__, "fc", fc)
if fnf is not None:
pulumi.set(__self__, "fnf", fnf)
if ga is not None:
pulumi.set(__self__, "ga", ga)
if gds is not None:
pulumi.set(__self__, "gds", gds)
if gpdb is not None:
pulumi.set(__self__, "gpdb", gpdb)
if gwsecd is not None:
pulumi.set(__self__, "gwsecd", gwsecd)
if hbr is not None:
pulumi.set(__self__, "hbr", hbr)
if hcs_sgw is not None:
pulumi.set(__self__, "hcs_sgw", hcs_sgw)
if hitsdb is not None:
pulumi.set(__self__, "hitsdb", hitsdb)
if imm is not None:
pulumi.set(__self__, "imm", imm)
if imp is not None:
pulumi.set(__self__, "imp", imp)
if ims is not None:
pulumi.set(__self__, "ims", ims)
if iot is not None:
pulumi.set(__self__, "iot", iot)
if kms is not None:
pulumi.set(__self__, "kms", kms)
if kvstore is not None:
pulumi.set(__self__, "kvstore", kvstore)
if location is not None:
pulumi.set(__self__, "location", location)
if log is not None:
pulumi.set(__self__, "log", log)
if market is not None:
pulumi.set(__self__, "market", market)
if maxcompute is not None:
pulumi.set(__self__, "maxcompute", maxcompute)
if mhub is not None:
pulumi.set(__self__, "mhub", mhub)
if mns is not None:
pulumi.set(__self__, "mns", mns)
if mscopensubscription is not None:
pulumi.set(__self__, "mscopensubscription", mscopensubscription)
if mse is not None:
pulumi.set(__self__, "mse", mse)
if | |
import sys
import os
import os.path as osp
import time
import shutil
import pandas as pd
from collections import deque
import torch
import codecs
import imgviz
import argparse
from pathlib import Path
import functools
from qtpy import QtCore
from qtpy.QtCore import Qt
from qtpy import QtWidgets
from qtpy import QtGui
from labelme import PY2
from labelme import QT5
import PIL
from PIL import ImageQt
import requests
import subprocess
from labelme.app import MainWindow
from labelme.utils import newIcon
from labelme.utils import newAction
from labelme.widgets import BrightnessContrastDialog
from labelme.widgets import LabelListWidgetItem
from labelme.label_file import LabelFileError
from labelme.label_file import LabelFile
from labelme import utils
from labelme.widgets import ToolBar
from labelme.config import get_config
from annolid.annotation import labelme2coco
from annolid.data import videos
from annolid.gui.widgets import ExtractFrameDialog
from annolid.gui.widgets import ConvertCOODialog
from annolid.gui.widgets import TrainModelDialog
from annolid.gui.widgets import Glitter2Dialog
from annolid.gui.widgets import QualityControlDialog
from annolid.gui.widgets import TrackDialog
from qtpy.QtWebEngineWidgets import QWebEngineView
from annolid.postprocessing.glitter import tracks2nix
from annolid.postprocessing.quality_control import TracksResults
from annolid.gui.widgets import ProgressingWindow
import webbrowser
import atexit
from annolid.gui.widgets.video_slider import VideoSlider
from annolid.gui.widgets.step_size_widget import StepSizeWidget
from annolid.postprocessing.quality_control import pred_dict_to_labelme
from annolid.annotation.keypoints import save_labels
__appname__ = 'Annolid'
__version__ = "1.1.1"
LABEL_COLORMAP = imgviz.label_colormap(value=200)
class FlexibleWorker(QtCore.QObject):
start = QtCore.Signal()
def __init__(self, function, *args, **kwargs):
super(FlexibleWorker, self).__init__()
self.function = function
self.args = args
self.kwargs = kwargs
def run(self):
self.function(*self.args, **self.kwargs)
class LoadFrameThread(QtCore.QObject):
"""Thread for loading video frames.
"""
res_frame = QtCore.Signal(QtGui.QImage)
process = QtCore.Signal()
frame_queue = []
request_waiting_time = 1
reload_times = None
previous_process_time = 0
video_loader = None
def __init__(self, *args, **kwargs):
super(LoadFrameThread, self).__init__(*args, **kwargs)
self.working_lock = QtCore.QMutex()
self.current_load_times = deque(maxlen=5)
self.process.connect(self.load)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.load)
self.timer.start(20)
def load(self):
self.previous_process_time = time.time()
if not self.frame_queue:
return
self.working_lock.lock()
if not self.frame_queue:
return
frame_number = self.frame_queue[-1]
self.frame_queue = []
try:
t_start = time.time()
frame = self.video_loader.load_frame(frame_number)
self.current_load_times.append(time.time() - t_start)
average_load_time = sum(self.current_load_times) / \
len(self.current_load_times)
self.request_waiting_time = average_load_time
except Exception:
frame = None
self.working_lock.unlock()
if frame is not None:
img_pil = PIL.Image.fromarray(frame)
imageData = utils.img_pil_to_data(img_pil)
image = QtGui.QImage.fromData(imageData)
self.res_frame.emit(image)
def request(self, frame_number):
self.frame_queue.append(frame_number)
t_last = time.time() - self.previous_process_time
if t_last > self.request_waiting_time:
self.previous_process_time = time.time()
self.process.emit()
def start_tensorboard(log_dir=None,
tensorboard_url='http://localhost:6006'):
process = None
if log_dir is None:
here = Path(__file__).parent
log_dir = here.parent.resolve() / "runs" / "logs"
try:
r = requests.get(tensorboard_url)
except requests.exceptions.ConnectionError:
process = subprocess.Popen(
['tensorboard', f'--logdir={str(log_dir)}'])
time.sleep(8)
return process
class VisualizationWindow(QtWidgets.QDialog):
def __init__(self):
super(VisualizationWindow, self).__init__()
self.setWindowTitle("Visualization Tensorboard")
self.process = start_tensorboard()
self.browser = QWebEngineView()
self.browser.setUrl(QtCore.QUrl(self.tensorboar_url))
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.browser)
self.setLayout(vbox)
self.show()
def closeEvent(self, event):
if self.process is not None:
time.sleep(3)
self.process.kill()
event.accept()
class AnnolidWindow(MainWindow):
"""Annolid Main Window based on Labelme.
"""
def __init__(self,
config=None
):
self.config = config
super(AnnolidWindow, self).__init__(config=self.config)
self.flag_dock.setVisible(True)
self.label_dock.setVisible(True)
self.shape_dock.setVisible(True)
self.file_dock.setVisible(True)
self.here = Path(__file__).resolve().parent
action = functools.partial(newAction, self)
self._df = None
self.label_stats = {}
self.shape_hash_ids = {}
self.changed_json_stats = {}
self._pred_res_folder_suffix = '_tracking_results_labelme'
self.frame_number = 0
self.video_loader = None
self.video_file = None
self.annotation_dir = None
self.step_size = 1
self.stepSizeWidget = StepSizeWidget()
open_video = action(
self.tr("&Open Video"),
self.openVideo,
None,
"Open Video",
self.tr("Open video")
)
open_video.setIcon(QtGui.QIcon(
str(
self.here / "icons/open_video.png"
)
))
step_size = QtWidgets.QWidgetAction(self)
step_size.setIcon(QtGui.QIcon(
str(
self.here / "icons/fast_forward.png"
)
))
step_size.setDefaultWidget(self.stepSizeWidget)
self.stepSizeWidget.setWhatsThis(
self.tr(
"Step for the next or prev image. e.g. 30"
)
)
self.stepSizeWidget.setEnabled(False)
coco = action(
self.tr("&COCO format"),
self.coco,
'Ctrl+C+O',
"coco",
self.tr("Convert to COCO format"),
)
coco.setIcon(QtGui.QIcon(str(
self.here / "icons/coco.png")))
save_labeles = action(
self.tr("&Save labels"),
self.save_labels,
'Ctrl+Shift+L',
'Save Labels',
self.tr("Save labels to txt file")
)
save_labeles.setIcon(QtGui.QIcon(
str(self.here/"icons/label_list.png")
))
frames = action(
self.tr("&Extract frames"),
self.frames,
'Ctrl+Shift+E',
"Extract frames",
self.tr("Extract frames frome a video"),
)
models = action(
self.tr("&Train models"),
self.models,
"Ctrl+Shift+T",
"Train models",
self.tr("Train neural networks")
)
models.setIcon(QtGui.QIcon(str(
self.here / "icons/models.png")))
frames.setIcon(QtGui.QIcon(str(
self.here / "icons/extract_frames.png")))
tracks = action(
self.tr("&Track Animals"),
self.tracks,
"Ctrl+Shift+O",
"Track Animals",
self.tr("Track animals and Objects")
)
tracks.setIcon(QtGui.QIcon(str(
self.here / 'icons/track.png'
)))
glitter2 = action(
self.tr("&Glitter2"),
self.glitter2,
"Ctrl+Shift+G",
self.tr("Convert to Glitter2 nix format")
)
glitter2.setIcon(QtGui.QIcon(str(
self.here / 'icons/glitter2_logo.png'
)))
quality_control = action(
self.tr("&Quality Control"),
self.quality_control,
"Ctrl+Shift+G",
self.tr("Convert to tracking results to labelme format")
)
quality_control.setIcon(QtGui.QIcon(str(
self.here / 'icons/quality_control.png'
)))
visualization = action(
self.tr("&Visualization"),
self.visualization,
'Ctrl+Shift+V',
"Visualization",
self.tr("Visualization results"),
)
colab = action(
self.tr("&Open in Colab"),
self.train_on_colab,
icon="Colab",
tip=self.tr("Open in Colab"),
)
colab.setIcon(QtGui.QIcon(str(
self.here / "icons/colab.png")))
visualization.setIcon(QtGui.QIcon(str(
self.here / "icons/visualization.png")))
self.menus = utils.struct(
recentFiles=QtWidgets.QMenu(self.tr("Open &Recent")),
frames=self.menu(self.tr("&Extract Frames")),
open_video=self.menu(self.tr("&Open Video")),
coco=self.menu(self.tr("&COCO")),
models=self.menu(self.tr("&Train models")),
visualization=self.menu(self.tr("&Visualization")),
tracks=self.menu(self.tr("&Track Animals")),
glitter2=self.menu(self.tr("&Glitter2")),
save_labels=self.menu(self.tr("&Save Labels")),
quality_control=self.menu(self.tr("&Quality Control")),
colab=self.menu(self.tr("&Open in Colab")),
)
_action_tools = list(self.actions.tool)
_action_tools.insert(0, frames)
_action_tools.insert(1, open_video)
_action_tools.insert(2, step_size)
_action_tools.append(coco)
_action_tools.append(models)
_action_tools.append(visualization)
_action_tools.append(tracks)
_action_tools.append(glitter2)
_action_tools.append(save_labeles)
_action_tools.append(quality_control)
_action_tools.append(colab)
self.actions.tool = tuple(_action_tools)
self.tools.clear()
utils.addActions(self.tools, self.actions.tool)
utils.addActions(self.menus.frames, (frames,))
utils.addActions(self.menus.open_video, (open_video,))
utils.addActions(self.menus.coco, (coco,))
utils.addActions(self.menus.models, (models,))
utils.addActions(self.menus.visualization, (visualization,))
utils.addActions(self.menus.tracks, (tracks,))
utils.addActions(self.menus.glitter2, (glitter2,))
utils.addActions(self.menus.save_labels, (save_labeles,))
utils.addActions(self.menus.quality_control, (quality_control,))
utils.addActions(self.menus.colab, (colab,))
self.statusBar().showMessage(self.tr("%s started.") % __appname__)
self.statusBar().show()
self.setWindowTitle(__appname__)
self.settings = QtCore.QSettings("Annolid", 'Annolid')
self.video_results_folder = None
self.seekbar = None
self.frame_worker = QtCore.QThread()
self.frame_loader = LoadFrameThread()
self.seg_pred_thread = QtCore.QThread()
self.seg_train_thread = QtCore.QThread()
self.destroyed.connect(self.clean_up)
self.stepSizeWidget.valueChanged.connect(self.update_step_size)
atexit.register(self.clean_up)
def update_step_size(self, value):
self.step_size = value
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
self.uniqLabelList.clear()
# clear the file list
self.fileListWidget.clear()
if self.video_loader is not None:
self.video_loader = None
self.num_frames = None
self.video_file = None
self.annotation_dir = None
self.statusBar().removeWidget(self.seekbar)
self.seekbar = None
self._df = None
self.label_stats = {}
self.shape_hash_ids = {}
self.changed_json_stats = {}
self._pred_res_folder_suffix = '_tracking_results_labelme'
self.frame_number = 0
self.step_size = 1
self.video_results_folder = None
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName("%sToolBar" % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
utils.addActions(toolbar, actions)
self.addToolBar(Qt.TopToolBarArea, toolbar)
return toolbar
def scanAllImages(self, folderPath):
extensions = [
".%s" % fmt.data().decode().lower()
for fmt in QtGui.QImageReader.supportedImageFormats()
]
extensions.append('.json')
self.only_json_files = True
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = osp.join(root, file)
if self.only_json_files and not file.lower().endswith('.json'):
self.only_json_files = False
images.append(relativePath)
images.sort(key=lambda x: x.lower())
return images
def _addItem(self, filename, label_file):
item = QtWidgets.QListWidgetItem(filename)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
if QtCore.QFile.exists(label_file) and LabelFile.is_label_file(
label_file
):
item.setCheckState(Qt.Checked)
else:
item.setCheckState(Qt.Unchecked)
if not self.fileListWidget.findItems(filename, Qt.MatchExactly):
self.fileListWidget.addItem(item)
def _getLabelFile(self, filename):
label_file = osp.splitext(filename)[0] + ".json"
if self.output_dir:
label_file_without_path = osp.basename(label_file)
label_file = osp.join(self.output_dir, label_file_without_path)
return label_file
def importDirImages(self, dirpath, pattern=None, load=True):
self.actions.openNextImg.setEnabled(True)
self.actions.openPrevImg.setEnabled(True)
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.annotation_dir = dirpath
self.filename = None
self.fileListWidget.clear()
for filename in self.scanAllImages(dirpath):
if pattern and pattern not in filename:
continue
label_file = self._getLabelFile(filename)
if not filename.endswith('.json') or self.only_json_files:
self._addItem(filename, label_file)
self.openNextImg(load=load)
def _get_rgb_by_label(self, label):
if self._config["shape_color"] == "auto":
item = self.uniqLabelList.findItemsByLabel(label)[0]
label_id = self.uniqLabelList.indexFromItem(item).row() + 1
label_id += self._config["shift_auto_shape_color"]
return LABEL_COLORMAP[label_id % len(LABEL_COLORMAP)]
elif (
self._config["shape_color"] == "manual"
and self._config["label_colors"]
and label in self._config["label_colors"]
):
return self._config["label_colors"][label]
elif self._config["default_shape_color"]:
return self._config["default_shape_color"]
def _update_shape_color(self, shape):
if not self.uniqLabelList.findItemsByLabel(shape.label):
item = self.uniqLabelList.createItemFromLabel(shape.label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(item, shape.label, rgb)
r, g, b = self._get_rgb_by_label(shape.label)
shape.line_color = QtGui.QColor(r, g, b)
shape.vertex_fill_color = QtGui.QColor(r, g, b)
shape.hvertex_fill_color = QtGui.QColor(255, 255, 255)
shape.fill_color = QtGui.QColor(r, g, b, 128)
shape.select_line_color = QtGui.QColor(255, 255, 255)
shape.select_fill_color = QtGui.QColor(r, g, b, 155)
return r, g, b
def addLabel(self, shape):
if shape.group_id is None:
text = shape.label
else:
text = "{} ({})".format(shape.label, shape.group_id)
shape_points_hash = hash(
str(sorted(shape.points, key=lambda point: point.x())))
self.shape_hash_ids[shape_points_hash] = self.shape_hash_ids.get(
shape_points_hash, 0) + 1
if self.shape_hash_ids[shape_points_hash] <= 1:
self.label_stats[text] = self.label_stats.get(text, 0) + 1
label_list_item = LabelListWidgetItem(text, shape)
self.labelList.addItem(label_list_item)
items = self.uniqLabelList.findItemsByLabel(shape.label)
if not items:
item = self.uniqLabelList.createItemFromLabel(
shape.label
)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(
item, f"{shape.label} [{self.label_stats.get(text,0)} instance]", rgb)
else:
for item in items:
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(
item, f"{shape.label} [{self.label_stats.get(text,0)} instances]", rgb)
self.labelDialog.addLabelHistory(shape.label)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
r, g, b = self._update_shape_color(shape)
label_list_item.setText(
'{} <font color="#{:02x}{:02x}{:02x}">●</font>'.format(
text, r, g, b
)
)
def editLabel(self, item=None):
if item and not isinstance(item, LabelListWidgetItem):
raise TypeError("item must be LabelListWidgetItem type")
if not self.canvas.editing():
return
if not item:
item = self.currentItem()
if item is None:
return
shape = item.shape()
if shape is None:
return
text, flags, group_id = self.labelDialog.popUp(
text=shape.label, flags=shape.flags, group_id=shape.group_id,
)
if text is None:
return
if not self.validateLabel(text):
self.errorMessage(
self.tr("Invalid label"),
self.tr("Invalid label '{}' with validation type '{}'").format(
text, self._config["validate_label"]
),
)
return
shape.label = text
shape.flags = flags
shape.group_id = group_id
r, g, b = self._update_shape_color(shape)
if shape.group_id is None:
item.setText(
'{} <font color="#{:02x}{:02x}{:02x}">●</font>'.format(
shape.label, r, g, b))
else:
item.setText("{} ({})".format(shape.label, shape.group_id))
self.setDirty()
if not self.uniqLabelList.findItemsByLabel(shape.label):
item = QtWidgets.QListWidgetItem()
item.setData(Qt.UserRole, shape.label)
self.uniqLabelList.addItem(item)
def _saveImageFile(self, filename):
image_filename = filename.replace('.json', '.png')
imgage_jpg_file = filename.replace('.json', '.jpg')
# save png if there is no png or jpg image in the folder
if (not Path(image_filename).exists()
and not Path(imgage_jpg_file).exists()):
img = utils.img_data_to_arr(self.imageData)
imgviz.io.imsave(image_filename, | |
# import ptvsd
# ptvsd.enable_attach(address = ('172.16.71.13', 3000))
# ptvsd.wait_for_attach()
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as opt
import collections
import logging
import argparse
from modules import *
import tqdm
from sklearn import metrics
from corpus.read_utills import *
from corpus.imn_dataset import ASPECT_Dataset, DOC_Dataset
from tools import config_setting
from torch.nn.utils.rnn import pad_sequence
class Train(object):
def __init__(self,config, nb_class, overall_maxlen, embedding_matrix, use_opinion, domain, vocab_path):
# 声明参数
self.config = config
# 获得词向量矩阵
self.embedding_matrix = embedding_matrix
# 获得单句最大长度
self.overall_maxlen = overall_maxlen
# 声明模型
self.imn_model = IMN(config, nb_class, use_opinion, overall_maxlen)
# 获得可用GPU
cuda_condition = torch.cuda.is_available()
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
# 将模型发送到GPU/CPU
self.imn_model.to(self.device)
# 声明需要的数据集
train_aspect_dataset = ASPECT_Dataset(config, domain, vocab_path, 'train', overall_maxlen)
self.train_aspect_dataloader = DataLoader(train_aspect_dataset,
batch_size=config.batch_size,
num_workers=0,
drop_last=True)
test_aspect_dataset = ASPECT_Dataset(config, domain, vocab_path, 'test', overall_maxlen)
self.test_aspect_dataloader = DataLoader(test_aspect_dataset,
batch_size=config.batch_size,
num_workers=0,
drop_last=True)
if config.use_doc:
doc_senti_dataset = DOC_Dataset(config, vocab_path, 'doc_senti')
self.doc_senti_dataloader = DataLoader(doc_senti_dataset,
batch_size=config.batch_size,
num_workers=0,
drop_last=True)
test_doc_senti_dataset = DOC_Dataset(config, vocab_path, 'doc_senti', train=0)
self.test_doc_senti_dataloader = DataLoader(test_doc_senti_dataset,
batch_size=config.batch_size,
num_workers=0,
drop_last=True)
doc_domain_dataset = DOC_Dataset(config, vocab_path, 'doc_domain')
self.doc_domain_dataloader = DataLoader(doc_domain_dataset,
batch_size=config.batch_size,
num_workers=0)
test_doc_domain_dataset = DOC_Dataset(config, vocab_path, 'doc_domain', train=0)
self.test_doc_domain_dataloader = DataLoader(test_doc_domain_dataset,
batch_size=config.batch_size,
num_workers=0)
# 声明模型需要优化的参数
self.optim_params = list(self.imn_model.parameters())
# 声明优化器
self.optimizer = self.get_optimizer(config)
def get_optimizer(self,config):
# if config.algorithm == 'sgd':
return opt.SGD(self.optim_params, lr=0.01, momentum=0.0, weight_decay=0.0, nesterov=False)
def get_prob(self,epoch_count):
prob = 5/(5+np.exp(epoch_count/5))
return prob
def pre_train(self, epoch):
self.imn_model.train()
self.iteration(epoch, [self.doc_senti_dataloader, self.doc_domain_dataloader], 'pretrain', train=True)
def pre_test(self, epoch):
self.imn_model.eval()
with torch.no_grad():
self.iteration(epoch, [self.test_doc_senti_dataloader, self.test_doc_domain_dataloader], 'pretrain', train=False)
def train(self, epoch):
self.imn_model.train()
self.iteration(epoch, [self.train_aspect_dataloader, self.doc_senti_dataloader, self.doc_domain_dataloader], 'train', train=True)
def test(self, epoch):
self.imn_model.eval()
with torch.no_grad():
return self.iteration(epoch, [self.test_aspect_dataloader], 'train', train=False)
def iteration(self, epoch, data_loader, phrase, train=True):
'''
:param phrase: 控制模型的训练阶段(pretrain/train)
:param train: 控制模型是训练还是运行阶段 (True/False)
'''
if phrase == 'pretrain':
senti_dl = data_loader[0]
domain_dl = data_loader[1]
str_code = 'train' if train else 'test'
data_iter = tqdm.tqdm(enumerate(zip(senti_dl, domain_dl)),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
total_senti_loss = 0
total_domain_loss = 0
total_loss = 0
all_senti_pre, all_senti_lab = [], []
all_domain_pre, all_domain_lab = [], []
bs = 0 # 计数batc_size
for i, d in data_iter:
# 取得数据并发送给计算设备
senti_data = d[0]
domain_data = d[1]
batch_senti_x = senti_data['x'].to(self.device)
batch_senti_y = senti_data['y'].to(self.device)
batch_domain_x = domain_data['x'].to(self.device)
batch_domain_y = domain_data['y'].to(self.device)
# 转换为对应的词向量
bt_senti_x_emb = self.embedding_matrix[batch_senti_x].cuda()
bt_domain_x_emb = self.embedding_matrix[batch_domain_x].cuda()
# 前向传播得到结果
doc_prob_1, doc_prob_2 = self.imn_model.forward([bt_senti_x_emb, bt_domain_x_emb], 'doc_model')
if train:
# 计算损失
loss1 = self.compute_loss(doc_prob_1, batch_senti_y, 'NLLLoss')
loss2 = self.compute_loss(doc_prob_2, batch_domain_y, 'BCELoss')
loss = loss1 + loss2
# 反向传播
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 为计算当前epoch的平均loss
total_senti_loss += loss1.item()
total_domain_loss += loss2.item()
total_loss += loss.item()
else:
# 提取出预测的结果和标记,并存在all_predictions, all_label里
doc_prob_1_ = doc_prob_1.argmax(dim=-1).tolist()
senti_y = batch_senti_y.cpu().argmax(dim=-1).tolist()
all_senti_pre.extend(doc_prob_1_)
all_senti_lab.extend(senti_y)
doc_prob_2_ = doc_prob_2.squeeze().tolist()
doc_prob_2_ = [1 if i >= 0.5 else 0 for i in doc_prob_2_]
domain_y = batch_domain_y.cpu().squeeze().tolist()
all_domain_pre.extend(doc_prob_2_)
all_domain_lab.extend(domain_y)
bs += 1
# 计算auc
senti_auc = metrics.recall_score(all_senti_lab, all_senti_lab, average='micro')
domain_auc = metrics.accuracy_score(all_domain_lab, all_domain_pre)
# 打印输出
if train:
print("Pretrain doc-level model: Epoch: %d, senti_loss: %f, domain_loss: %f, loss: %f"%(epoch, total_senti_loss/(bs), total_domain_loss/(bs), total_loss/(bs)))
else:
print("Pretest doc-level model: Epoch: %d, senti_auc: %f, domain_auc: %f"%(epoch, senti_auc, domain_auc))
elif phrase == 'train':
if train:
# 训练阶段
aspect_dl = data_loader[0]
senti_dl = data_loader[1]
senti_dl_iter = iter(senti_dl)
domain_dl = data_loader[2]
domain_dl_iter = iter(domain_dl)
str_code = 'train'
# data_iter = tqdm.tqdm(enumerate(aspect_dl),
# desc="EP_%s:%d \n" % (str_code, epoch),
# total=len(aspect_dl),
# bar_format="{l_bar}{r_bar}")
data_iter = tqdm.tqdm(enumerate(aspect_dl))
gold_prob = self.get_prob(epoch)
rnd = np.random.uniform()
if rnd < gold_prob:
gold_prob = np.ones((self.config.batch_size, self.overall_maxlen))
else:
gold_prob = np.zeros((self.config.batch_size, self.overall_maxlen))
total_loss, total_aspect_loss, total_senti_loss = 0, 0, 0
# 记录有多少个batch
bs = 0
for i, data in data_iter: # 一个batch
batch_x = data['x']
batch_y_ae = data['y_aspect'].to(self.device)
batch_y_as = data['y_sentiment'].to(self.device)
batch_y_op = data['y_opinion'].to(self.device)
batch_mask = data['y_mask'].to(self.device)
# 转换为词向量
bt_x_emb = self.embedding_matrix[batch_x].cuda()
# 前向传播
aspect_probs, sentiment_probs = self.imn_model.forward([bt_x_emb, batch_y_op, gold_prob], 'aspect_model')
# aspect_probs = aspect_probs.view([self.config.batch_size*self.overall_maxlen, self.nb_class])
# sentiment_probs = sentiment_probs.view([self.config.batch_size*self.overall_maxlen, self.nb_class])
# 计算损失
aspect_probs = aspect_probs.permute(0,2,1)
sentiment_probs = sentiment_probs.permute(0,2,1)
aspect_loss = self.compute_loss(aspect_probs, batch_y_ae,'NLLLoss')
senti_loss = self.compute_loss(sentiment_probs, batch_y_as, 'NLLLoss')
loss = aspect_loss + senti_loss
# 清空梯度,反向传播
# 在更新aspect model时需要注意固定住DS和DD任务相关的层
if self.config.use_doc == 1 and self.config.interactions > 0:
# fix the document-specific parameters when updating aspect model
for name, param in self.imn_model.named_parameters():
if 'DS' in name or 'DD' in name:
param.requires_grad = False
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 累计loss
total_loss += loss.item()
total_aspect_loss += aspect_loss.item()
total_senti_loss += senti_loss.item()
# 复原以便在交替训练时可以更新DS/DD任务参数
if self.config.use_doc == 1 and self.config.interactions > 0:
# allow the document-specific parameters when updating doc model
for name, param in self.imn_model.named_parameters():
if 'DS' in name or 'DD' in name:
param.requires_grad = True
# 插入doc训练
if i%self.config.mr == 0 and self.config.use_doc:
senti_data = next(senti_dl_iter)
domain_data = next(domain_dl_iter)
x_1, y_1 = senti_data['x'], senti_data['y']
x_2, y_2 = domain_data['x'], domain_data['y']
x_1_emb = self.embedding_matrix[x_1].cuda()
y_1 = y_1.to(self.device)
x_2_emb = self.embedding_matrix[x_2].cuda()
y_2 = y_2.to(self.device)
doc_prob_1, doc_prob_2 = self.imn_model.forward([x_1_emb, x_2_emb], 'doc_model')
loss1 = self.compute_loss(doc_prob_1, y_1, 'NLLLoss')
loss2 = self.compute_loss(doc_prob_2, y_2, 'BCELoss')
loss = loss1 + loss2
# 反向传播
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
bs += 1
# 一个epoch结束
av_loss = total_loss/bs
av_as_loss = total_aspect_loss/bs
av_sen_loss = total_senti_loss/bs
print('aspect-level model train: Epoch %d, loss: %f, as_loss: %f, sen_loss: %f' % (epoch, av_loss, av_as_loss, av_sen_loss))
else:
# 测试阶段
aspect_dl = data_loader[0]
all_aspect_lab, all_aspect_pre, all_senti_lab, all_senti_pre, all_mask = [], [], [], [], []
total_loss, total_aspect_loss, total_senti_loss = 0, 0, 0
bs = 0
# 遍历所有数据
for i, data in enumerate(aspect_dl):
batch_x = data['x']
batch_y_op = data['y_opinion'].to(self.device)
batch_mask = data['y_mask'].to(self.device)
batch_y_ae = data['y_aspect'].to(self.device)
batch_y_as = data['y_sentiment'].to(self.device)
bt_x_emb = self.embedding_matrix[batch_x].cuda()
# 测试时直接使用预测的opinion信息
batch_gold_prob = np.zeros((batch_x.size()[0], self.overall_maxlen))
# 测试数据
aspect_probs, sentiment_probs = self.imn_model.forward([bt_x_emb, batch_y_op, batch_gold_prob],phrase='aspect_model')
# 对测试集仍然采用损失函数测试
aspect_probs = aspect_probs.permute(0,2,1)
sentiment_probs = sentiment_probs.permute(0,2,1)
aspect_loss = self.compute_loss(aspect_probs, batch_y_ae,'NLLLoss')
senti_loss = self.compute_loss(sentiment_probs, batch_y_as, 'NLLLoss')
loss = aspect_loss + senti_loss
# 累计loss
total_loss += loss.item()
total_aspect_loss += aspect_loss.item()
total_senti_loss += senti_loss.item()
# 计算auc
all_aspect_lab.extend(batch_y_ae)
all_senti_lab.extend(batch_y_as)
all_aspect_pre.extend(aspect_probs)
all_senti_pre.extend(sentiment_probs)
all_mask.extend(batch_mask)
bs += 1
all_aspect_lab = [_.tolist() for _ in all_aspect_lab]
all_senti_lab = [_.tolist() for _ in all_senti_lab]
all_aspect_pre = [_.tolist() for _ in all_aspect_pre]
all_senti_pre = [_.tolist() for _ in all_senti_pre]
all_mask = [_.tolist() for _ in all_mask]
# 计算得分还有问题!!
# f_aspect, f_opinion, acc_s, f_senti, f_absa \
# = self.get_metric(all_aspect_lab, all_aspect_pre, all_senti_lab, all_senti_pre, all_mask, self.config.train_op)
# print('Train aspect-level model: Epoch %d, f_aspect: %f, f_opinion: %f, f_senti: %f, f_absa: %f' % (epoch, f_aspect, f_opinion, f_senti, f_absa))
# 一个epoch结束
av_loss = total_loss/bs
av_as_loss = total_aspect_loss/bs
av_sen_loss = total_senti_loss/bs
print('aspect-level model test: Epoch %d, loss: %f, as_loss: %f, sen_loss: %f' % (epoch, av_loss, av_as_loss, av_sen_loss))
# return f_absa
return av_loss
def convert_to_list(self, y_aspect, y_sentiment, mask):
y_aspect_list = [] # 所有sentence的label构成的列表
y_sentiment_list = []
# 取出每个句子和该句子对应的mask
for seq_aspect, seq_sentiment, seq_mask in zip(y_aspect, y_sentiment, mask):
l_a = [] # 一个sentence的每个字对应的aspect_label
l_s = [] # 一个sentence的每个字对应的sentiment_label
# 取出每个字和该字对应的mask(该字是否是padding的)
for label_dist_a, label_dist_s, m in zip(seq_aspect, seq_sentiment, seq_mask):
if m == 0: # 是pandding,就不算
break
else:
# 对一个字的 aspect_label one-hot表示取argmax可以得到该字的label,例如1,2,3
l_a.append(np.argmax(label_dist_a))
### all entries are zeros means that it is a background word or word with conflict sentiment
### which are not counted for training AS
### also when evaluating, we do not count conflict examples
# 对一个字的 sentiment_label one-hot表示,如果全为0,说明是一个背景词或者中性词,不考虑
if not np.any(label_dist_s):
l_s.append(0)
else:
l_s.append(np.argmax(label_dist_s)+1)
y_aspect_list.append(l_a)
y_sentiment_list.append(l_s)
return y_aspect_list, y_sentiment_list
def score(self, true_aspect, predict_aspect, true_sentiment, predict_sentiment, train_op):
if train_op:
begin = 3
inside = 4
else:
begin = 1
inside = 2
# predicted sentiment distribution for aspect terms that are correctly extracted
pred_count = {'pos':0, 'neg':0, 'neu':0}
# gold sentiment distribution for aspect terms that are correctly extracted
rel_count = {'pos':0, 'neg':0, 'neu':0}
# sentiment distribution for terms that get both span and sentiment predicted correctly
correct_count = {'pos':0, 'neg':0, 'neu':0}
# sentiment distribution in original data
total_count = {'pos':0, 'neg':0, 'neu':0}
polarity_map = {1: 'pos', 2: 'neg', 3: 'neu'}
# count of predicted conflict aspect term
predicted_conf = 0
correct, predicted, relevant = 0, 0, 0
for i in range(len(true_aspect)):
true_seq = true_aspect[i]
predict = predict_aspect[i]
# 遍历句子的每一个字
for num in range(len(true_seq)):
if true_seq[num] == begin:
relevant += 1
if not train_op:
if true_sentiment[i][num]!=0:
total_count[polarity_map[true_sentiment[i][num]]]+=1
if predict[num] == begin:# 预测的aspect的起始位置是正确的
match = True
# 判断预测与真实aspect后续是否完全匹配正确
for j in range(num+1, len(true_seq)):
if true_seq[j] == inside and predict[j] == inside:
| |
<filename>pognlp/vader_lexicon.py
"""The VADER default lexicon as a string
VADER doesn't seem to import its default lexicon in a way that PyInstaller
# can package. This is a cheap hack so we can pass it to our custom lexicon
# loader.
"""
VADER_LEXICON = """$: -1.5 0.80623 [-1, -1, -1, -1, -3, -1, -3, -1, -2, -1]
%) -0.4 1.0198 [-1, 0, -1, 0, 0, -2, -1, 2, -1, 0]
%-) -1.5 1.43178 [-2, 0, -2, -2, -1, 2, -2, -3, -2, -3]
&-: -0.4 1.42829 [-3, -1, 0, 0, -1, -1, -1, 2, -1, 2]
&: -0.7 0.64031 [0, -1, -1, -1, 1, -1, -1, -1, -1, -1]
( '}{' ) 1.6 0.66332 [1, 2, 2, 1, 1, 2, 2, 1, 3, 1]
(% -0.9 0.9434 [0, 0, 1, -1, -1, -1, -2, -2, -1, -2]
('-: 2.2 1.16619 [4, 1, 4, 3, 1, 2, 3, 1, 2, 1]
(': 2.3 0.9 [1, 3, 3, 2, 2, 4, 2, 3, 1, 2]
((-: 2.1 0.53852 [2, 2, 2, 1, 2, 3, 2, 2, 3, 2]
(* 1.1 1.13578 [2, 1, 1, -1, 1, 2, 2, -1, 2, 2]
(-% -0.7 1.26886 [-1, 2, 0, -1, -1, -2, 0, 0, -3, -1]
(-* 1.3 1.26886 [4, 1, 2, 0, 2, -1, 1, 2, 1, 1]
(-: 1.6 0.8 [2, 2, 1, 3, 1, 1, 1, 3, 1, 1]
(-:0 2.8 0.87178 [3, 2, 3, 4, 3, 2, 3, 1, 4, 3]
(-:< -0.4 2.15407 [-3, 3, -1, -1, 2, -1, -2, 3, -3, -1]
(-:o 1.5 0.67082 [3, 1, 1, 2, 2, 2, 1, 1, 1, 1]
(-:O 1.5 0.67082 [3, 1, 1, 2, 2, 2, 1, 1, 1, 1]
(-:{ -0.1 1.57797 [-2, -3, 1, -2, 1, 1, 0, 0, 2, 1]
(-:|>* 1.9 0.83066 [3, 2, 2, 1, 0, 2, 3, 2, 2, 2]
(-; 1.3 1.18743 [3, 2, 3, 0, 1, -1, 1, 2, 1, 1]
(-;| 2.1 1.13578 [3, 2, 2, 4, 1, 1, 1, 4, 2, 1]
(8 2.6 1.0198 [4, 2, 1, 3, 3, 3, 3, 1, 2, 4]
(: 2.2 1.16619 [3, 1, 1, 2, 1, 2, 4, 3, 4, 1]
(:0 2.4 1.11355 [0, 2, 3, 4, 3, 2, 3, 3, 1, 3]
(:< -0.2 2.03961 [-2, -3, 1, 1, 2, -1, 2, 1, -4, 1]
(:o 2.5 0.92195 [3, 3, 1, 3, 3, 1, 2, 2, 4, 3]
(:O 2.5 0.92195 [3, 3, 1, 3, 3, 1, 2, 2, 4, 3]
(; 1.1 1.22066 [3, 1, 1, -1, 1, 2, 2, -1, 1, 2]
(;< 0.3 1.00499 [1, 2, -1, -1, 0, 0, 1, -1, 1, 1]
(= 2.2 1.16619 [3, 1, 2, 2, 1, 1, 4, 3, 4, 1]
(?: 2.1 0.83066 [2, 2, 1, 3, 2, 2, 4, 1, 2, 2]
(^: 1.5 0.67082 [1, 2, 2, 1, 3, 2, 1, 1, 1, 1]
(^; 1.5 0.5 [1, 2, 2, 1, 2, 1, 2, 1, 1, 2]
(^;0 2.0 0.7746 [2, 2, 1, 2, 1, 4, 2, 2, 2, 2]
(^;o 1.9 0.83066 [2, 2, 1, 2, 1, 4, 2, 1, 2, 2]
(o: 1.6 0.8 [2, 1, 3, 1, 1, 1, 2, 3, 1, 1]
)': -2.0 0.44721 [-2, -2, -2, -2, -1, -3, -2, -2, -2, -2]
)-': -2.1 0.53852 [-2, -2, -3, -2, -1, -2, -3, -2, -2, -2]
)-: -2.1 0.9434 [-3, -2, -4, -1, -3, -2, -2, -2, -1, -1]
)-:< -2.2 0.4 [-2, -2, -2, -2, -2, -2, -3, -3, -2, -2]
)-:{ -2.1 0.9434 [-1, -3, -2, -1, -2, -2, -3, -4, -1, -2]
): -1.8 0.87178 [-1, -3, -1, -2, -1, -3, -1, -3, -1, -2]
):< -1.9 0.53852 [-1, -3, -2, -2, -2, -1, -2, -2, -2, -2]
):{ -2.3 0.78102 [-1, -2, -3, -3, -2, -2, -4, -2, -2, -2]
);< -2.6 0.8 [-2, -2, -2, -3, -2, -3, -2, -2, -4, -4]
*) 0.6 1.42829 [1, -1, 1, -3, 1, 1, 2, 1, 1, 2]
*-) 0.3 1.61555 [1, -3, -2, 2, 1, 1, -1, 2, 1, 1]
*-: 2.1 1.51327 [2, 2, 4, 4, 2, 1, -1, 4, 1, 2]
*-; 2.4 1.62481 [2, 3, 4, 4, 2, 1, -1, 4, 1, 4]
*: 1.9 1.04403 [2, 1, 1, 3, 1, 2, 4, 3, 1, 1]
*<|:-) 1.6 1.28062 [0, 1, 3, 1, 1, 2, 3, 0, 4, 1]
*\0/* 2.3 1.00499 [2, 0, 3, 1, 3, 3, 2, 3, 3, 3]
*^: 1.6 1.42829 [2, 2, 1, 3, 2, 2, 3, 3, -1, -1]
,-: 1.2 0.4 [1, 1, 2, 1, 1, 1, 1, 1, 2, 1]
---'-;-{@ 2.3 1.18743 [0, 1, 3, 4, 2, 3, 2, 2, 2, 4]
--<--<@ 2.2 1.249 [0, 1, 2, 4, 2, 1, 3, 2, 3, 4]
.-: -1.2 0.4 [-1, -1, -1, -1, -1, -1, -2, -1, -2, -1]
..###-: -1.7 0.78102 [-2, -3, -3, -2, -1, -1, -1, -1, -1, -2]
..###: -1.9 1.04403 [-4, -1, -3, -1, -2, -2, -1, -3, -1, -1]
/-: -1.3 0.64031 [-1, -1, -1, -1, -1, -1, -1, -2, -3, -1]
/: -1.3 0.45826 [-2, -1, -1, -1, -2, -1, -1, -2, -1, -1]
/:< -1.4 0.4899 [-1, -2, -2, -1, -1, -1, -1, -1, -2, -2]
/= -0.9 0.53852 [-1, -1, -1, 0, -1, -2, -1, -1, -1, 0]
/^: -1.0 0.7746 [-2, -1, -2, 1, -1, -1, -1, -1, -1, -1]
/o: -1.4 0.66332 [0, -2, -1, -1, -2, -2, -1, -2, -1, -2]
0-8 0.1 1.44568 [2, -1, -2, 0, 2, 0, 2, 0, -2, 0]
0-| -1.2 0.4 [-2, -1, -1, -1, -1, -1, -1, -1, -2, -1]
0:) 1.9 1.04403 [2, 2, 2, 1, 0, 2, 4, 1, 3, 2]
0:-) 1.4 0.91652 [2, 1, 0, 1, 2, 3, 2, 1, 2, 0]
0:-3 1.5 0.92195 [2, 1, 0, 2, 2, 3, 2, 1, 2, 0]
0:03 1.9 1.22066 [2, 3, 2, 0, 0, 1, 4, 2, 3, 2]
0;^) 1.6 0.91652 [0, 1, 3, 1, 2, 1, 2, 1, 2, 3]
0_o -0.3 0.78102 [0, -2, 0, 1, 0, 0, -1, 0, -1, 0]
10q 2.1 1.22066 [1, 3, 1, 2, 1, 4, 3, 4, 1, 1]
1337 2.1 1.13578 [3, 1, 4, 0, 2, 3, 1, 2, 2, 3]
143 3.2 0.74833 [4, 4, 2, 3, 2, 3, 4, 3, 4, 3]
1432 2.6 0.8 [4, 3, 3, 2, 2, 4, 2, 2, 2, 2]
14aa41 2.4 0.91652 [3, 2, 2, 4, 2, 2, 1, 2, 4, 2]
182 -2.9 1.3 [-4, 0, -3, -3, -1, -3, -4, -4, -4, -3]
187 -3.1 1.22066 [-4, 0, -4, -3, -2, -4, -3, -3, -4, -4]
2g2b4g 2.8 0.6 [4, 2, 3, 2, 3, 3, 3, 3, 2, 3]
2g2bt -0.1 1.57797 [-1, 2, -1, 1, 0, 2, 0, -3, -2, 1]
2qt 2.1 0.83066 [3, 3, 3, 3, 2, 1, 2, 1, 2, 1]
3:( -2.2 0.87178 [-4, -3, -2, -3, -2, -1, -1, -2, -2, -2]
3:) 0.5 1.28452 [-2, 1, -2, 1, 1, 1, 1, 2, 1, 1]
3:-( -2.3 0.78102 [-2, -3, -2, -2, -2, -2, -4, -1, -3, -2]
3:-) -1.4 1.35647 [-1, -2, 1, 1, -2, -2, -3, -1, -3, -2]
4col -2.2 1.16619 [-2, -3, -1, -3, -4, -1, -2, -1, -4, -1]
4q -3.1 1.51327 [-3, -3, -4, -2, -4, -4, -4, 1, -4, -4]
5fs 1.5 1.11803 [1, 2, 1, 1, 2, 3, 2, 3, -1, 1]
8) 1.9 0.7 [2, 2, 2, 1, 1, 2, 2, 3, 3, 1]
8-d 1.7 0.64031 [1, 2, 0, 2, 2, 2, 2, 2, 2, 2]
8-o -0.3 0.78102 [1, -1, 0, 0, 0, -1, 0, -2, 0, 0]
86 -1.6 1.0198 [-1, -1, -1, -1, -1, -4, -1, -2, -1, -3]
8d 2.9 0.53852 [3, 3, 4, 2, 3, 3, 3, 3, 2, 3]
:###.. -2.4 0.91652 [-3, -2, -4, -3, -1, -2, -2, -3, -1, -3]
:$ -0.2 1.83303 [-2, -1, 0, 0, -1, 1, 4, -3, 1, -1]
:& -0.6 1.0198 [-2, -1, 0, 0, -1, -1, 1, -2, 1, -1]
:'( -2.2 0.74833 [-2, -1, -2, -2, -2, -2, -4, -3, -2, -2]
:') 2.3 0.78102 [3, 1, 3, 2, 2, 2, 2, 4, 2, 2]
:'-( -2.4 0.66332 [-2, -1, -2, -3, -2, -3, -3, -3, -2, -3]
:'-) 2.7 0.64031 [2, 1, 3, 3, 3, 3, 3, 3, 3, 3]
:( -1.9 1.13578 [-2, -3, -2, 0, -1, -1, -2, -3, -1, -4]
:) 2.0 1.18322 [2, 2, 1, 1, 1, 1, 4, 3, 4, 1]
:* 2.5 1.0247 [3, 2, 1, 1, 2, 3, 4, 3, 4, 2]
:-###.. -2.5 0.92195 [-3, -2, -3, -2, -4, -3, -1, -3, -1, -3]
:-& -0.5 0.92195 [-1, -1, 0, -1, -1, -1, -1, 0, 2, -1]
:-( -1.5 0.5 [-2, -1, -1, -1, -2, -2, -2, -1, -2, -1]
:-) 1.3 0.45826 [1, 1, 1, 1, 2, 1, 2, 1, 2, 1]
:-)) 2.8 1.07703 [3, 4, 4, 1, 2, 2, 4, 2, 4, 2]
:-* 1.7 0.64031 [1, 2, 1, 1, 1, 3, 2, 2, 2, 2]
:-, 1.1 0.53852 [1, 1, 1, 0, 1, 1, 1, 1, 2, 2]
:-. -0.9 0.53852 [-1, -1, 0, -1, 0, -1, -1, -1, -2, -1]
:-/ -1.2 0.6 [0, -1, -1, -1, -1, -2, -2, -1, -1, -2]
:-< -1.5 0.5 [-2, -1, -1, -2, -1, -2, -2, -1, -2, -1]
:-d 2.3 0.45826 [2, 2, 3, 3, 2, 3, 2, 2, 2, 2]
:-D 2.3 0.45826 [2, 2, 3, 3, 2, 3, 2, 2, 2, 2]
:-o 0.1 1.3 [2, -1, -2, 0, 1, 1, 2, 0, -1, -1]
:-p 1.2 0.4 [1, 2, 1, 1, 1, 1, 2, 1, 1, 1]
:-[ -1.6 0.4899 [-1, -2, -1, -2, -2, -1, -2, -1, -2, -2]
:-\ -0.9 0.3 [-1, -1, -1, -1, -1, -1, -1, 0, -1, -1]
:-c -1.3 0.45826 [-1, -1, -1, -2, -2, -1, -2, -1, -1, -1]
:-p 1.5 0.5 [1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
:-| -0.7 0.64031 [-1, -1, 0, 0, 0, -1, -1, -2, 0, -1]
:-|| -2.5 0.67082 [-2, -2, -2, -3, -2, -3, -3, -2, -2, -4]
:-Þ 0.9 1.04403 [1, -1, 1, 2, 1, -1, 1, 2, 2, 1]
:/ -1.4 0.66332 [-1, -1, -1, -1, -1, -1, -3, -2, -2, -1]
:3 2.3 1.26886 [4, 1, 1, 1, 2, 2, 4, 3, 4, 1]
:< -2.1 0.7 [-3, -1, -2, -2, -2, -2, -3, -3, -2, -1]
:> 2.1 1.22066 [3, 1, 1, 1, 1, 2, 4, 3, 4, 1]
:?) 1.3 0.64031 [3, 1, 1, 1, 1, 2, 1, 1, 1, 1]
:?c -1.6 0.4899 [-1, -2, -1, -1, -2, -2, -1, -2, -2, -2]
:@ -2.5 0.80623 [-1, -3, -3, -2, -1, -3, -3, -3, -3, -3]
:d 2.3 1.1 [4, 2, 2, 1, 2, 1, 4, 3, 3, 1]
:D 2.3 1.1 [4, 2, 2, 1, 2, 1, 4, 3, 3, 1]
:l -1.7 0.9 [-1, -3, -1, -1, -1, -3, -2, -3, -1, -1]
:o -0.4 1.35647 [2, -1, -2, 0, 1, 0, -3, 0, | |
import codecs
import tempfile
from django.test import SimpleTestCase
from StringIO import StringIO
from corehq.util.spreadsheets.excel import WorkbookJSONReader
from couchexport.export import export_raw
from couchexport.models import Format
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.app_manager.translations import \
process_bulk_app_translation_upload, expected_bulk_app_sheet_rows, \
expected_bulk_app_sheet_headers
class BulkAppTranslationTestBase(SimpleTestCase, TestXmlMixin):
def setUp(self):
"""
Instantiate an app from file_path + app.json
"""
super(BulkAppTranslationTestBase, self).setUp()
self.app = Application.wrap(self.get_json("app"))
def upload_raw_excel_translations(self, excel_headers, excel_data, expected_messages=None):
"""
Prepares bulk app translation excel file and uploads it
Structure of the xlsx file can be specified as following
excel_headers:
(("employee", ("id", "name", "gender")),
("building", ("id", "name", "address")))
excel_data:
(("employee", (("1", "cory", "m"),
("2", "christian", "m"),
("3", "amelia", "f"))),
("building", (("1", "dimagi", "585 mass ave."),
("2", "old dimagi", "529 main st."))))
"""
if not expected_messages:
expected_messages = ["App Translations Updated!"]
file = StringIO()
export_raw(excel_headers, excel_data, file, format=Format.XLS_2007)
with tempfile.TemporaryFile(suffix='.xlsx') as f:
f.write(file.getvalue())
messages = process_bulk_app_translation_upload(self.app, f)
self.assertListEqual(
[m[1] for m in messages], expected_messages
)
def do_upload(self, name, expected_messages=None):
"""
Upload the bulk app translation file at file_path + upload.xlsx
Note: Use upload_raw_excel_translations() instead. It allows easy modifications
and diffs of xlsx data.
ToDo: Refactor tests using do_upload to use upload_raw_excel_translations(), use
WorkbookJSONReader.work_book_headers_as_tuples(), and
WorkbookJSONReader.work_book_data_as_tuples(), for making tuples from excel files
"""
if not expected_messages:
expected_messages = ["App Translations Updated!"]
with codecs.open(self.get_path(name, "xlsx")) as f:
messages = process_bulk_app_translation_upload(self.app, f)
self.assertListEqual(
[m[1] for m in messages], expected_messages
)
def assert_question_label(self, text, module_id, form_id, language, question_path):
"""
assert that the given text is equal to the label of the given question.
Return the label of the given question
:param text:
:param module_id: module index
:param form_id: form index
:param question_path: path to question (including "/data/")
:return: the label of the question
"""
form = self.app.get_module(module_id).get_form(form_id)
labels = {}
for lang in self.app.langs:
for question in form.get_questions(
[lang], include_triggers=True, include_groups=True):
labels[(question['value'], lang)] = question['label']
self.assertEqual(
labels[(question_path, language)],
text
)
def assert_case_property_label(self, text, field, module_id, short_or_long, language):
module = self.app.get_module(module_id)
cols = module.case_details[short_or_long].columns
col = next(col for col in cols if col.field == field)
self.assertEqual(text, col.header.get(language, None))
class BulkAppTranslationBasicTest(BulkAppTranslationTestBase):
file_path = "data", "bulk_app_translation", "basic"
upload_headers = (
("Modules_and_forms", (
"Type", "sheet_name", "default_en", "default_fra", "label_for_cases_en", "label_for_cases_fra", 'icon_filepath_en', 'icon_filepath_fra', 'audio_filepath_en', 'audio_filepath_fra', "unique_id"
)),
("module1", (
"case_property", "list_or_detail", "default_en", "default_fra"
)),
("module1_form1", (
"label", "default_en", "default_fra", "audio_en", "audio_fra", "image_en", "image_fra", "video_en", "video_fra",
))
)
upload_headers_bad_column = ( # bad column is default-fra
("Modules_and_forms", (
"Type", "sheet_name", "default_en", "default_fra",
"label_for_cases_en", "label_for_cases_fra", "icon_filepath_en", "icon_filepath_fra",
"audio_filepath_en", "audio_filepath_fra" , "unique_id"
)),
("module1", (
"case_property", "list_or_detail", "default_en", "default_fra"
)),
("module1_form1", (
"label", "default_en", "default-fra", "audio_en", "audio_fra",
"image_en", "image_fra", "video_en", "video_fra",
))
)
upload_data = (
("Modules_and_forms", (
("Module", "module1", "My & awesome module", "", "Cases", "Cases", "", "", "", "", "8f4f7085a93506cba4295eab9beae8723c0cee2a"),
("Form", "module1_form1", "My more & awesome form", "", "", "", "", "", "", "", "93ea2a40df57d8f33b472f5b2b023882281722d4")
)),
("module1", (
("name", "list", "Name", "Nom"),
("name", "detail", "", "Nom"),
("other-prop (ID Mapping Text)", "detail", "Other Prop", ""),
("foo (ID Mapping Value)", "detail", "bar", "french bar"),
("baz (ID Mapping Value)", "detail", "quz", ""),
)),
("module1_form1", (
("question1-label", "in english", "it's in french", "", "", "", "", "", ""),
("question2-label", "one < two", "un < deux", "", "", "", "", "", ""),
("question2-item1-label", "item1", "item1", "", "", "", "", "", ""),
("question2-item2-label", "item2", "item2", "", "", "", "", "", ""),
("question3-label", "question3", "question3's label", "", "", "", "", "", ""),
("question3/question4-label", 'question6: <output value="/data/question6"/>', 'question6: <output value="/data/question6"/>', "", "", "", "", "", ""),
("question3/question5-label", "English Label", "English Label", "", "", "", "", "", ""),
("question7-label", 'question1: <output value="/data/question1"/> < 5', "question7", "", "", "", "", "", ""),
('add_markdown-label', 'add_markdown: ~~new \u0939\u093f markdown~~', 'add_markdown: ~~new \u0939\u093f markdown~~', '', '', '', '', '', ''),
('remove_markdown-label', 'remove_markdown', 'remove_markdown', '', '', '', '', '', ''),
('update_markdown-label', '## smaller_markdown', '## smaller_markdown', '', '', '', '', '', ''),
('vetoed_markdown-label', '*i just happen to like stars a lot*', '*i just happen to like stars a lot*', '', '', '', '', '', ''),
))
)
upload_no_change_headers = (
('Modules_and_forms', ('Type', 'sheet_name', 'default_en', 'default_fra', 'label_for_cases_en', 'label_for_cases_fra', 'icon_filepath_en', 'icon_filepath_fra', 'audio_filepath_en', 'audio_filepath_fra', 'unique_id')),
('module1', ('case_property', 'list_or_detail', 'default_en', 'default_fra')),
('module1_form1', ('label', 'default_en', 'default_fra', 'audio_en', 'audio_fra', 'image_en', 'image_fra', 'video_en', 'video_fra'))
)
upload_no_change_data = (
('Modules_and_forms',
(('Module', 'module1', 'My & awesome module', '', 'Cases', 'Cases', '', '', '', '', '8f4f7085a93506cba4295eab9beae8723c0cee2a'),
('Form', 'module1_form1', 'My more & awesome form', '', '', '', '', '', '', '', '93ea2a40df57d8f33b472f5b2b023882281722d4'))),
('module1',
(('name', 'list', 'Name', ''),
('name', 'detail', 'Name', ''),
('other-prop (ID Mapping Text)', 'detail', 'Other Prop', 'Autre Prop'),
('foo (ID Mapping Value)', 'detail', 'bar', ''),
('baz (ID Mapping Value)', 'detail', 'quz', ''))),
('module1_form1',
(('question1-label', 'question1', 'question1', '', '', '', '', '', ''),
('question2-label', 'question2', 'question2', '', '', '', '', '', ''),
('question2-item1-label', 'item1', 'item1', '', '', '', '', '', ''),
('question2-item2-label', 'item2', 'item2', '', '', '', '', '', ''),
('question3-label', 'question3', 'question3', '', '', '', '', '', ''),
('question3/question4-label', 'question4', 'question4', '', '', '', '', '', ''),
('question3/question5-label', 'question5', 'question5', '', '', '', '', '', ''),
('question7-label', 'question7', 'question7', '', '', '', '', '', ''),
('add_markdown-label', 'add_markdown', 'add_markdown', '', '', '', '', '', ''),
('remove_markdown-label', 'remove_markdown: ~~remove this~~', 'remove_markdown: ~~remove this~~', '', '', '', '', '', ''),
('update_markdown-label', '# update_markdown', '# update_markdown', '', '', '', '', '', ''),
('vetoed_markdown-label', '*i just happen to like stars*', '*i just happen to like stars*', '', '', '', '', '', ''),
))
)
def test_set_up(self):
self._shared_test_initial_set_up()
def test_no_change_upload(self):
self.upload_raw_excel_translations(self.upload_no_change_headers, self.upload_no_change_data)
self._shared_test_initial_set_up()
def _shared_test_initial_set_up(self):
self.assert_question_label("question1", 0, 0, "en", "/data/question1")
self.assert_case_property_label("Autre Prop", "other-prop", 0, "long", "fra")
def test_change_upload(self):
self.upload_raw_excel_translations(self.upload_headers, self.upload_data)
self.assert_question_label("in english", 0, 0, "en", "/data/question1")
self.assert_question_label("it's in french", 0, 0, "fra", "/data/question1")
# Test that translations can be deleted.
self.assert_question_label("English Label", 0, 0, "fra", "/data/question3/question5")
self.assert_case_property_label(None, "other-prop", 0, "long", "fra")
self.assert_case_property_label(None, "name", 0, "long", "en")
module = self.app.get_module(0)
self.assertEqual(
module.case_details.long.columns[1].enum[0].value['fra'],
'french bar'
)
self.assertEqual(
module.case_details.short.columns[0].header['fra'],
'Nom'
)
# Test special characters and output refs
self.assert_question_label("one < two", 0, 0, "en", "/data/question2")
self.assert_question_label("un < deux", 0, 0, "fra", "/data/question2")
self.assert_question_label("question3's label", 0, 0, "fra", "/data/question3")
self.assert_question_label("question6: ____", 0, 0, "en", "/data/question3/question4")
self.assert_question_label("question1: ____ < 5", 0, 0, "en", "/data/question7")
# Test markdown
self.assert_question_label("add_markdown: ~~new \u0939\u093f markdown~~", 0, 0, "en", "/data/add_markdown")
self.assert_question_label("remove_markdown", 0, 0, "en", "/data/remove_markdown")
self.assert_question_label("## smaller_markdown", 0, 0, "en", "/data/update_markdown")
self.assert_question_label("*i just happen to like stars a lot*", 0, 0, "en", "/data/vetoed_markdown")
form = self.app.get_module(0).get_form(0)
self.assertXmlEqual(self.get_xml("change_upload_form"), form.render_xform())
def test_missing_itext(self):
self.app = Application.wrap(self.get_json("app_no_itext"))
self.assert_question_label('question1', 0, 0, "en", "/data/question1")
try:
self.upload_raw_excel_translations(self.upload_no_change_headers, self.upload_no_change_data)
except Exception as e:
self.fail(e)
def test_bad_column_name(self):
self.upload_raw_excel_translations(self.upload_headers_bad_column,
self.upload_data,
expected_messages=[
u'Sheet "module1_form1" has less columns than expected. Sheet '
'will be processed but the following translations will be '
'unchanged: default_fra',
u'Sheet "module1_form1" has unrecognized columns. Sheet will '
'be processed but ignoring the following columns: default-fra',
u'App Translations Updated!'
]
)
class MismatchedItextReferenceTest(BulkAppTranslationTestBase):
"""
Test the bulk app translation upload when the itext reference in a question
in the xform body does not match the question's id/path.
The upload is an unchanged download.
"""
file_path = "data", "bulk_app_translation", "mismatched_ref"
def test_unchanged_upload(self):
self.do_upload("upload")
self.assert_question_label("question2", 0, 0, "en", "/data/foo/question2")
class BulkAppTranslationFormTest(BulkAppTranslationTestBase):
file_path = "data", "bulk_app_translation", "form_modifications"
def test_removing_form_translations(self):
self.do_upload("modifications")
form = self.app.get_module(0).get_form(0)
self.assertXmlEqual(self.get_xml("expected_form"), form.render_xform())
class BulkAppTranslationDownloadTest(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'bulk_app_translation', 'download')
maxDiff = None
excel_headers = (
('Modules_and_forms', ('Type', 'sheet_name', 'default_en', 'label_for_cases_en', 'icon_filepath_en', 'audio_filepath_en', 'unique_id')),
('module1', ('case_property', 'list_or_detail', 'default_en')),
('module1_form1', ('label', 'default_en', 'audio_en', 'image_en', 'video_en'))
)
excel_data = (
('Modules_and_forms',
(('Module', 'module1', 'Stethoscope', 'Cases', 'jr://file/commcare/image/module0.png', '', '58ce5c9cf6eda401526973773ef216e7980bc6cc'),
('Form',
'module1_form1',
'Stethoscope Form',
'',
'jr://file/commcare/image/module0_form0.png',
'',
'c480ace490edc870ae952765e8dfacec33c69fec'))),
('module1', (('name', 'list', 'Name'), ('name', 'detail', 'Name'))),
('module1_form1',
(('What_does_this_look_like-label', 'What does this look like?', '', 'jr://file/commcare/image/data/What_does_this_look_like.png', ''),
('no_media-label', 'No media', '', '', ''),
('has_refs-label', 'Here is a ref <output value="/data/no_media"/> with some trailing text and "bad" < xml.', '', '', '')))
)
@classmethod
def setUpClass(cls):
cls.app = Application.wrap(cls.get_json("app"))
# Todo, refactor this into BulkAppTranslationTestBase.upload_raw_excel_translations
file = StringIO()
export_raw(cls.excel_headers, cls.excel_data, file, format=Format.XLS_2007)
with tempfile.TemporaryFile(suffix='.xlsx') as f:
f.write(file.getvalue())
wb_reader = WorkbookJSONReader(f)
cls.expected_workbook = [{'name': ws.title, 'rows': list(ws)}
for ws in wb_reader.worksheets]
def test_download(self):
actual_headers = expected_bulk_app_sheet_headers(self.app)
actual_rows = expected_bulk_app_sheet_rows(self.app)
actual_workbook = [
{'name': title,
'rows': [dict(zip(headers, row)) for row in actual_rows[title]]}
for title, | |
at normal incidence averaged over the visible spectrum range
| of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Slat Beam-Diffuse Visible Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_slat_beamdiffuse_visible_transmittance` or None if not set
"""
return self["Front Side Slat Beam-Diffuse Visible Transmittance"]
@front_side_slat_beamdiffuse_visible_transmittance.setter
def front_side_slat_beamdiffuse_visible_transmittance(self, value=None):
""" Corresponds to IDD field `Front Side Slat Beam-Diffuse Visible Transmittance`
"""
self["Front Side Slat Beam-Diffuse Visible Transmittance"] = value
@property
def back_side_slat_beamdiffuse_visible_transmittance(self):
"""field `Back Side Slat Beam-Diffuse Visible Transmittance`
| The back side beam-diffuse visible transmittance of the slat
| at normal incidence averaged over the visible spectrum range
| of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Slat Beam-Diffuse Visible Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_slat_beamdiffuse_visible_transmittance` or None if not set
"""
return self["Back Side Slat Beam-Diffuse Visible Transmittance"]
@back_side_slat_beamdiffuse_visible_transmittance.setter
def back_side_slat_beamdiffuse_visible_transmittance(self, value=None):
""" Corresponds to IDD field `Back Side Slat Beam-Diffuse Visible Transmittance`
"""
self["Back Side Slat Beam-Diffuse Visible Transmittance"] = value
@property
def front_side_slat_beamdiffuse_visible_reflectance(self):
"""field `Front Side Slat Beam-Diffuse Visible Reflectance`
| The front side beam-diffuse visible reflectance of the slat
| at normal incidence averaged over the visible spectrum range
| of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Slat Beam-Diffuse Visible Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_slat_beamdiffuse_visible_reflectance` or None if not set
"""
return self["Front Side Slat Beam-Diffuse Visible Reflectance"]
@front_side_slat_beamdiffuse_visible_reflectance.setter
def front_side_slat_beamdiffuse_visible_reflectance(self, value=None):
""" Corresponds to IDD field `Front Side Slat Beam-Diffuse Visible Reflectance`
"""
self["Front Side Slat Beam-Diffuse Visible Reflectance"] = value
@property
def back_side_slat_beamdiffuse_visible_reflectance(self):
"""field `Back Side Slat Beam-Diffuse Visible Reflectance`
| The back side beam-diffuse visible reflectance of the slat
| at normal incidence averaged over the visible spectrum range
| of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Slat Beam-Diffuse Visible Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_slat_beamdiffuse_visible_reflectance` or None if not set
"""
return self["Back Side Slat Beam-Diffuse Visible Reflectance"]
@back_side_slat_beamdiffuse_visible_reflectance.setter
def back_side_slat_beamdiffuse_visible_reflectance(self, value=None):
""" Corresponds to IDD field `Back Side Slat Beam-Diffuse Visible Reflectance`
"""
self["Back Side Slat Beam-Diffuse Visible Reflectance"] = value
@property
def slat_diffusediffuse_solar_transmittance(self):
"""field `Slat Diffuse-Diffuse Solar Transmittance`
| The beam-diffuse solar transmittance of the slat averaged
| over the entire solar spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Slat Diffuse-Diffuse Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `slat_diffusediffuse_solar_transmittance` or None if not set
"""
return self["Slat Diffuse-Diffuse Solar Transmittance"]
@slat_diffusediffuse_solar_transmittance.setter
def slat_diffusediffuse_solar_transmittance(self, value=None):
""" Corresponds to IDD field `Slat Diffuse-Diffuse Solar Transmittance`
"""
self["Slat Diffuse-Diffuse Solar Transmittance"] = value
@property
def front_side_slat_diffusediffuse_solar_reflectance(self):
"""field `Front Side Slat Diffuse-Diffuse Solar Reflectance`
| The front side beam-diffuse solar reflectance of the slat
| averaged over the entire solar spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Slat Diffuse-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_slat_diffusediffuse_solar_reflectance` or None if not set
"""
return self["Front Side Slat Diffuse-Diffuse Solar Reflectance"]
@front_side_slat_diffusediffuse_solar_reflectance.setter
def front_side_slat_diffusediffuse_solar_reflectance(self, value=None):
""" Corresponds to IDD field `Front Side Slat Diffuse-Diffuse Solar Reflectance`
"""
self["Front Side Slat Diffuse-Diffuse Solar Reflectance"] = value
@property
def back_side_slat_diffusediffuse_solar_reflectance(self):
"""field `Back Side Slat Diffuse-Diffuse Solar Reflectance`
| The back side beam-diffuse solar reflectance of the slat
| averaged over the entire solar spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Slat Diffuse-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_slat_diffusediffuse_solar_reflectance` or None if not set
"""
return self["Back Side Slat Diffuse-Diffuse Solar Reflectance"]
@back_side_slat_diffusediffuse_solar_reflectance.setter
def back_side_slat_diffusediffuse_solar_reflectance(self, value=None):
""" Corresponds to IDD field `Back Side Slat Diffuse-Diffuse Solar Reflectance`
"""
self["Back Side Slat Diffuse-Diffuse Solar Reflectance"] = value
@property
def slat_diffusediffuse_visible_transmittance(self):
"""field `Slat Diffuse-Diffuse Visible Transmittance`
| The beam-diffuse visible transmittance of the slat averaged
| over the visible spectrum range of solar radiation.
| value < 1.0
Args:
value (float): value for IDD Field `Slat Diffuse-Diffuse Visible Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `slat_diffusediffuse_visible_transmittance` or None if not set
"""
return self["Slat Diffuse-Diffuse Visible Transmittance"]
@slat_diffusediffuse_visible_transmittance.setter
def slat_diffusediffuse_visible_transmittance(self, value=None):
""" Corresponds to IDD field `Slat Diffuse-Diffuse Visible Transmittance`
"""
self["Slat Diffuse-Diffuse Visible Transmittance"] = value
@property
def front_side_slat_diffusediffuse_visible_reflectance(self):
"""field `Front Side Slat Diffuse-Diffuse Visible Reflectance`
| The front side beam-diffuse visible reflectance of the slat
| averaged over the visible spectrum range of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Slat Diffuse-Diffuse Visible Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_slat_diffusediffuse_visible_reflectance` or None if not set
"""
return self["Front Side Slat Diffuse-Diffuse Visible Reflectance"]
@front_side_slat_diffusediffuse_visible_reflectance.setter
def front_side_slat_diffusediffuse_visible_reflectance(self, value=None):
""" Corresponds to IDD field `Front Side Slat Diffuse-Diffuse Visible Reflectance`
"""
self["Front Side Slat Diffuse-Diffuse Visible Reflectance"] = value
@property
def back_side_slat_diffusediffuse_visible_reflectance(self):
"""field `Back Side Slat Diffuse-Diffuse Visible Reflectance`
| The back side beam-diffuse visible reflectance of the slat
| averaged over the visible spectrum range of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Slat Diffuse-Diffuse Visible Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_slat_diffusediffuse_visible_reflectance` or None if not set
"""
return self["Back Side Slat Diffuse-Diffuse Visible Reflectance"]
@back_side_slat_diffusediffuse_visible_reflectance.setter
def back_side_slat_diffusediffuse_visible_reflectance(self, value=None):
""" Corresponds to IDD field `Back Side Slat Diffuse-Diffuse Visible Reflectance`
"""
self["Back Side Slat Diffuse-Diffuse Visible Reflectance"] = value
@property
def slat_infrared_transmittance(self):
"""field `Slat Infrared Transmittance`
| Long-wave hemispherical transmittance of the slat material.
| Assumed to be the same for both sides of the slat.
| value < 1.0
Args:
value (float): value for IDD Field `Slat Infrared Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `slat_infrared_transmittance` or None if not set
"""
return self["Slat Infrared Transmittance"]
@slat_infrared_transmittance.setter
def slat_infrared_transmittance(self, value=None):
"""Corresponds to IDD field `Slat Infrared Transmittance`"""
self["Slat Infrared Transmittance"] = value
@property
def front_side_slat_infrared_emissivity(self):
"""field `Front Side Slat Infrared Emissivity`
| Front side long-wave hemispherical emissivity of the slat material.
| Units: dimensionless
| Default value: 0.9
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Slat Infrared Emissivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_slat_infrared_emissivity` or None if not set
"""
return self["Front Side Slat Infrared Emissivity"]
@front_side_slat_infrared_emissivity.setter
def front_side_slat_infrared_emissivity(self, value=0.9):
"""Corresponds to IDD field `Front Side Slat Infrared Emissivity`"""
self["Front Side Slat Infrared Emissivity"] = value
@property
def back_side_slat_infrared_emissivity(self):
"""field `Back Side Slat Infrared Emissivity`
| Back side long-wave hemispherical emissivity of the slat material.
| Units: dimensionless
| Default value: 0.9
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Slat Infrared Emissivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_slat_infrared_emissivity` or None if not set
"""
return self["Back Side Slat Infrared Emissivity"]
@back_side_slat_infrared_emissivity.setter
def back_side_slat_infrared_emissivity(self, value=0.9):
"""Corresponds to IDD field `Back Side Slat Infrared Emissivity`"""
self["Back Side Slat Infrared Emissivity"] = value
@property
def slat_angle_control(self):
"""field `Slat Angle Control`
| Used only if slat angle control is desired to either maximize solar
| gain (MaximizeSolar), maximize visibility while eliminating beam solar
| radiation (BlockBeamSolar), | |
import numpy as np
import copy
import warnings
from bnpy.util.StateSeqUtil import calcContigBlocksFromZ
from bnpy.data.XData import XData
def proposeNewResp_randBlocks(Z_n, propResp,
origK=0,
PRNG=np.random.RandomState,
Kfresh=3,
minBlockSize=1,
maxBlockSize=10,
**kwargs):
''' Create new value of resp matrix with randomly-placed new blocks.
We create Kfresh new blocks in total.
Each one can potentially wipe out some (or all) of previous blocks.
Returns
-------
propResp : 2D array of size N x Kmax
propK : int
total number of states used in propResp array
'''
# Unpack and make sure size limits work out
T = Z_n.size
if minBlockSize >= T:
return propResp, origK
maxBlockSize = np.minimum(maxBlockSize, T)
for kfresh in range(Kfresh):
blockSize = PRNG.randint(minBlockSize, maxBlockSize)
a = PRNG.randint(0, T - blockSize + 1)
b = a + blockSize
propResp[a:b, :origK] = 0
propResp[a:b, origK + kfresh] = 1
return propResp, origK + Kfresh
def proposeNewResp_bisectExistingBlocks(Z_n, propResp,
Data_n=None,
tempModel=None,
origK=0,
PRNG=np.random.RandomState,
Kfresh=3,
PastAttemptLog=dict(),
**kwargs):
''' Create new value of resp matrix with randomly-placed new blocks.
We create Kfresh new blocks in total.
Each one can potentially wipe out some (or all) of previous blocks.
Returns
-------
propResp : 2D array of size N x Kmax
propK : int
total number of states used in propResp array
'''
# Iterate over current contig blocks
blockSizes, blockStarts, blockStates = \
calcContigBlocksFromZ(Z_n, returnStates=1)
nBlocks = len(blockSizes)
if 'blocks' not in PastAttemptLog:
PastAttemptLog['blocks'] = dict()
if 'strategy' not in PastAttemptLog:
PastAttemptLog['strategy'] = 'byState'
# PastAttemptLog['strategy'] = PRNG.choice(
# ['byState', 'bySize'])
if PastAttemptLog['strategy'] == 'byState':
Kcur = blockStates.max() + 1
Kextra = Kcur - PastAttemptLog['uIDs'].size
if Kextra > 0:
maxUID = PastAttemptLog['maxUID']
uIDs = PastAttemptLog['uIDs']
for extraPos in range(Kextra):
maxUID += 1
uIDs = np.append(uIDs, maxUID)
PastAttemptLog['maxUID'] = maxUID
PastAttemptLog['uIDs'] = uIDs
candidateStateUIDs = set()
for state in np.unique(blockStates):
uid = PastAttemptLog['uIDs'][state]
candidateStateUIDs.add(uid)
if 'nTryByStateUID' not in PastAttemptLog:
PastAttemptLog['nTryByStateUID'] = dict()
minTry = np.inf
for badState, nTry in list(PastAttemptLog['nTryByStateUID'].items()):
if badState in candidateStateUIDs:
if nTry < minTry:
minTry = nTry
untriedList = [x for x in candidateStateUIDs
if x not in PastAttemptLog['nTryByStateUID'] or
PastAttemptLog['nTryByStateUID'][x] == minTry]
if len(untriedList) > 0:
candidateStateUIDs = untriedList
else:
# Keep only candidates that have been tried the least
for badState, nTry in list(PastAttemptLog['nTryByStateUID'].items()):
# Remove bad State from candidateStateUIDs
if badState in candidateStateUIDs:
if nTry > minTry:
candidateStateUIDs.remove(badState)
candidateStateUIDs = np.asarray([x for x in candidateStateUIDs])
# Pick a state that we haven't tried yet,
# uniformly at random
if len(candidateStateUIDs) > 0:
chosenStateUID = PRNG.choice(np.asarray(candidateStateUIDs))
chosenState = np.flatnonzero(
chosenStateUID == PastAttemptLog['uIDs'])[0]
chosen_mask = blockStates == chosenState
chosenBlockIDs = np.flatnonzero(chosen_mask)
if chosenBlockIDs.size > 1:
# Favor blocks assigned to this state that are larger
p = blockSizes[chosen_mask].copy()
p /= p.sum()
chosenBlockIDs = PRNG.choice(chosenBlockIDs,
size=np.minimum(
Kfresh,
len(chosenBlockIDs)),
p=p, replace=False)
remBlockIDs = np.flatnonzero(np.logical_not(chosen_mask))
PRNG.shuffle(remBlockIDs)
order = np.hstack([
chosenBlockIDs,
remBlockIDs
])
else:
# Just use the block sizes and starts in random order
order = PRNG.permutation(blockSizes.size)
blockSizes = blockSizes[order]
blockStarts = blockStarts[order]
blockStates = blockStates[order]
else:
sortOrder = np.argsort(-1 * blockSizes)
blockSizes = blockSizes[sortOrder]
blockStarts = blockStarts[sortOrder]
blockStates = blockStates[sortOrder]
nBlocks = len(blockSizes)
kfresh = 0 # number of new states added
for blockID in range(nBlocks):
if kfresh >= Kfresh:
break
a = blockStarts[blockID]
b = blockStarts[blockID] + blockSizes[blockID]
# Avoid overlapping with previous attempts that failed
maxOverlapWithPreviousFailure = 0.0
for (preva, prevb), prevm in list(PastAttemptLog['blocks'].items()):
# skip previous attempts that succeed
if prevm > preva:
continue
Tunion = np.maximum(b, prevb) - np.minimum(a, preva)
minb = np.minimum(b, prevb)
maxa = np.maximum(a, preva)
if maxa < minb:
Tintersect = minb - maxa
else:
Tintersect = 0
continue
IoU = Tintersect / float(Tunion)
maxOverlapWithPreviousFailure = np.maximum(
maxOverlapWithPreviousFailure, IoU)
if maxOverlapWithPreviousFailure > 0.95:
# print 'SKIPPING BLOCK %d,%d with overlap %.2f' % (
# a, b, maxOverlapWithPreviousFailure)
continue
stride = int(np.ceil((b - a) / 25.0))
stride = np.maximum(1, stride)
offset = PRNG.choice(np.arange(stride))
a += offset
bestm = findBestCutForBlock(Data_n, tempModel,
a=a,
b=b,
stride=stride)
PastAttemptLog['blocks'][(a, b)] = bestm
print('TARGETING UID: ', PastAttemptLog['uIDs'][blockStates[blockID]])
print('BEST BISECTION CUT: [%4d, %4d, %4d] w/ stride %d' % (
a, bestm, b, stride))
curUID = PastAttemptLog['uIDs'][blockStates[blockID]]
if bestm == a:
if curUID in PastAttemptLog['nTryByStateUID']:
PastAttemptLog['nTryByStateUID'][curUID] += 1
else:
PastAttemptLog['nTryByStateUID'][curUID] = 1
else:
PastAttemptLog['nTryByStateUID'][curUID] = 0 # success!
if bestm == a:
propResp[a:b, :origK] = 0
propResp[a:b, origK + kfresh] = 1
kfresh += 1
else:
propResp[a:bestm, :origK] = 0
propResp[a:bestm, origK + kfresh] = 1
kfresh += 1
if kfresh >= Kfresh:
break
propResp[bestm:b, :origK] = 0
propResp[bestm:b, origK + kfresh] = 1
kfresh += 1
return propResp, origK + kfresh
def proposeNewResp_bisectGrownBlocks(Z_n, propResp,
Data_n=None,
tempModel=None,
origK=0,
PRNG=np.random.RandomState,
Kfresh=3,
growthBlockSize=10,
PastAttemptLog=dict(),
**kwargs):
''' Create new value of resp matrix with randomly-placed new blocks.
We create Kfresh new blocks in total.
Each one can potentially wipe out some (or all) of previous blocks.
Returns
-------
propResp : 2D array of size N x Kmax
propK : int
total number of states used in propResp array
'''
# Iterate over current contig blocks
blockSizes, blockStarts, blockStates = \
calcContigBlocksFromZ(Z_n, returnStates=1)
nBlocks = len(blockSizes)
if 'blocks' not in PastAttemptLog:
PastAttemptLog['blocks'] = dict()
if 'strategy' not in PastAttemptLog:
PastAttemptLog['strategy'] = 'byState'
# PastAttemptLog['strategy'] = PRNG.choice(
# ['byState', 'bySize'])
if PastAttemptLog['strategy'] == 'byState':
Kcur = blockStates.max() + 1
Kextra = Kcur - PastAttemptLog['uIDs'].size
if Kextra > 0:
maxUID = PastAttemptLog['maxUID']
uIDs = PastAttemptLog['uIDs']
for extraPos in range(Kextra):
maxUID += 1
uIDs = np.append(uIDs, maxUID)
PastAttemptLog['maxUID'] = maxUID
PastAttemptLog['uIDs'] = uIDs
candidateStateUIDs = set()
for state in np.unique(blockStates):
uid = PastAttemptLog['uIDs'][state]
candidateStateUIDs.add(uid)
if 'nTryByStateUID' not in PastAttemptLog:
PastAttemptLog['nTryByStateUID'] = dict()
minTry = np.inf
for badState, nTry in list(PastAttemptLog['nTryByStateUID'].items()):
if badState in candidateStateUIDs:
if nTry < minTry:
minTry = nTry
untriedList = [x for x in candidateStateUIDs
if x not in PastAttemptLog['nTryByStateUID'] or
PastAttemptLog['nTryByStateUID'][x] == 0]
if len(untriedList) > 0:
candidateStateUIDs = untriedList
else:
# Keep only candidates that have been tried the least
for badState, nTry in list(PastAttemptLog['nTryByStateUID'].items()):
# Remove bad State from candidateStateUIDs
if badState in candidateStateUIDs:
if nTry > minTry:
candidateStateUIDs.remove(badState)
candidateStateUIDs = np.asarray([x for x in candidateStateUIDs])
# Pick a state that we haven't tried yet,
# uniformly at random
if len(candidateStateUIDs) > 0:
chosenStateUID = PRNG.choice(np.asarray(candidateStateUIDs))
chosenState = np.flatnonzero(
chosenStateUID == PastAttemptLog['uIDs'])[0]
chosen_mask = blockStates == chosenState
chosenBlockIDs = np.flatnonzero(chosen_mask)
if chosenBlockIDs.size > 1:
# Favor blocks assigned to this state that are larger
p = blockSizes[chosen_mask].copy()
p /= p.sum()
chosenBlockIDs = PRNG.choice(chosenBlockIDs,
size=np.minimum(
Kfresh,
len(chosenBlockIDs)),
p=p, replace=False)
remBlockIDs = np.flatnonzero(np.logical_not(chosen_mask))
PRNG.shuffle(remBlockIDs)
order = np.hstack([
chosenBlockIDs,
remBlockIDs
])
else:
# Just use the block sizes and starts in random order
order = PRNG.permutation(blockSizes.size)
blockSizes = blockSizes[order]
blockStarts = blockStarts[order]
blockStates = blockStates[order]
else:
sortOrder = np.argsort(-1 * blockSizes)
blockSizes = blockSizes[sortOrder]
blockStarts = blockStarts[sortOrder]
blockStates = blockStates[sortOrder]
nBlocks = len(blockSizes)
kfresh = 0 # number of new states added
for blockID in range(nBlocks):
if kfresh >= Kfresh:
break
a = blockStarts[blockID]
b = blockStarts[blockID] + blockSizes[blockID]
# Avoid overlapping with previous attempts that failed
maxOverlapWithPreviousFailure = 0.0
for (preva, prevb), prevm in list(PastAttemptLog['blocks'].items()):
# skip previous attempts that succeed
if prevm > preva:
continue
Tunion = np.maximum(b, prevb) - np.minimum(a, preva)
minb = np.minimum(b, prevb)
maxa = np.maximum(a, preva)
if maxa < minb:
Tintersect = minb - maxa
else:
Tintersect = 0
continue
IoU = Tintersect / float(Tunion)
maxOverlapWithPreviousFailure = np.maximum(
maxOverlapWithPreviousFailure, IoU)
if maxOverlapWithPreviousFailure > 0.95:
continue
stride = int(np.ceil((b - a) / 25.0))
stride = np.maximum(1, stride)
# If we've tried this state before and FAILED,
# maybe its time to randomly grow this block outwards
curUID = PastAttemptLog['uIDs'][blockStates[blockID]]
if curUID in PastAttemptLog['nTryByStateUID']:
nFail = PastAttemptLog['nTryByStateUID'][curUID]
if nFail > 0:
growthPattern = PRNG.choice(
['left', 'right', 'leftandright', 'none'])
newa = a
newb = b
if growthPattern.count('left'):
newa = a - PRNG.randint(1, growthBlockSize)
newa = np.maximum(newa, 0)
if growthPattern.count('right'):
newb = b + PRNG.randint(1, growthBlockSize)
newb = np.minimum(newb, Data_n.nObs)
a = newa
b = newb
bestm = findBestCutForBlock(Data_n, tempModel,
a=a,
b=b,
stride=stride)
PastAttemptLog['blocks'][(a, b)] = bestm
print('TARGETING UID: ', PastAttemptLog['uIDs'][blockStates[blockID]])
| |
<reponame>marcdemers/py_vollib_vectorized<filename>py_vollib_vectorized/greeks.py
import numpy as np
import pandas as pd
from ._numerical_greeks import numerical_delta_black, numerical_theta_black, \
numerical_vega_black, numerical_rho_black, numerical_gamma_black
from ._numerical_greeks import numerical_delta_black_scholes, numerical_theta_black_scholes, \
numerical_vega_black_scholes, numerical_rho_black_scholes, numerical_gamma_black_scholes
from ._numerical_greeks import numerical_delta_black_scholes_merton, numerical_theta_black_scholes_merton, \
numerical_vega_black_scholes_merton, numerical_rho_black_scholes_merton, numerical_gamma_black_scholes_merton
from .util.data_format import _preprocess_flags, maybe_format_data_and_broadcast, _validate_data
def delta(flag, S, K, t, r, sigma, q=None, *, model="black_scholes", return_as="dataframe", dtype=np.float64):
"""
Return the delta of a contract, as specified by the pricing model `model`.
Broadcasting is applied on the inputs.
:param flag: For each contract, this should be specified as `c` for a call option and `p` for a put option.
:param S: The price of the underlying asset.
:param K: The strike price.
:param t: The annualized time to expiration. Must be positive. For small TTEs, use a small value (1e-3).
:param r: The Interest Free Rate.
:param sigma: The Implied Volatility.
:param q: The annualized continuous dividend yield.
:param model: Must be one of 'black', 'black_scholes' or 'black_scholes_merton'.
:param return_as: To return as a :obj:`pd.Series` object, use "series". To return as a :obj:`pd.DataFrame` object, use "dataframe". Any other value will return a :obj:`numpy.array` object.
:param dtype: Data type.
:return: :obj:`pd.Series`, :obj:`pd.DataFrame` or :obj:`numpy.array` object containing the delta for each contract.
>>> import py_vollib.black_scholes.greeks.numerical
>>> import py_vollib_vectorized
>>> flag = ['c', 'p']
>>> S = 95
>>> K = [100, 90]
>>> t = .2
>>> r = .2
>>> sigma = .2
>>> py_vollib.black_scholes.greeks.numerical.delta(flag, S, K, t, r, sigma, return_as='numpy')
array([ 0.46750566, -0.1364465 ])
>>> py_vollib_vectorized.vectorized_delta(flag, S, K, t, r, sigma, model='black_scholes', return_as='numpy') # equivalent
array([ 0.46750566, -0.1364465 ])
"""
flag = _preprocess_flags(flag, dtype=dtype)
S, K, t, r, sigma, flag = maybe_format_data_and_broadcast(S, K, t, r, sigma, flag, dtype=dtype)
_validate_data(flag, S, K, t, r, sigma)
if model == "black":
b = 0
delta = numerical_delta_black(flag, S, K, t, r, sigma, b)
elif model == "black_scholes":
b = r
delta = numerical_delta_black_scholes(flag, S, K, t, r, sigma, b)
elif model == "black_scholes_merton":
if q is None:
raise ValueError("Must pass a `q` to black scholes merton model (annualized continuous dividend yield).")
q = maybe_format_data_and_broadcast(q, dtype=dtype)[0]
S, K, t, r, sigma, q = maybe_format_data_and_broadcast(S, K, t, r, sigma, q,
dtype=dtype) # recheck to make sure q matches
_validate_data(r, q)
b = r - q
delta = numerical_delta_black_scholes_merton(flag, S, K, t, r, sigma, b)
else:
raise ValueError("Model must be one of: `black`, `black_scholes`, `black_scholes_merton`")
delta = np.ascontiguousarray(delta)
if return_as == "series":
return pd.Series(delta, name="delta")
elif return_as == "dataframe":
return pd.DataFrame(delta, columns=["delta"])
return delta
def theta(flag, S, K, t, r, sigma, q=None, *, model="black_scholes", return_as="dataframe", dtype=np.float64):
"""
Return the theta of a contract, as specified by the pricing model `model`.
Broadcasting is applied on the inputs.
:param flag: For each contract, this should be specified as `c` for a call option and `p` for a put option.
:param S: The price of the underlying asset.
:param K: The strike price.
:param t: The annualized time to expiration. Must be positive. For small TTEs, use a small value (1e-3).
:param r: The Interest Free Rate.
:param sigma: The Implied Volatility.
:param q: The annualized continuous dividend yield.
:param model: Must be one of 'black', 'black_scholes' or 'black_scholes_merton'.
:param return_as: To return as a :obj:`pd.Series` object, use "series". To return as a :obj:`pd.DataFrame` object, use "dataframe". Any other value will return a :obj:`numpy.array` object.
:param dtype: Data type.
:return: :obj:`pd.Series`, :obj:`pd.DataFrame` or :obj:`numpy.array` object containing the theta for each contract.
>>> import py_vollib.black_scholes.greeks.numerical
>>> import py_vollib_vectorized
>>> flag = ['c', 'p']
>>> S = 95
>>> K = [100, 90]
>>> t = .2
>>> r = .2
>>> sigma = .2
>>> py_vollib.black_scholes.greeks.numerical.theta(flag, S, K, t, r, sigma, return_as='numpy')
array([-0.04589963, -0.00533543])
>>> py_vollib_vectorized.vectorized_theta(flag, S, K, t, r, sigma, model='black_scholes', return_as='numpy') # equivalent
array([-0.04589963, -0.00533543])
"""
flag = _preprocess_flags(flag, dtype=dtype)
S, K, t, r, sigma, flag = maybe_format_data_and_broadcast(S, K, t, r, sigma, flag, dtype=dtype)
_validate_data(flag, S, K, t, r, sigma)
if model == "black":
b = 0
theta = numerical_theta_black(flag, S, K, t, r, sigma, b)
elif model == "black_scholes":
b = r
theta = numerical_theta_black_scholes(flag, S, K, t, r, sigma, b)
elif model == "black_scholes_merton":
if q is None:
raise ValueError("Must pass a `q` to black scholes merton model (annualized continuous dividend yield).")
S, K, t, r, sigma, q = maybe_format_data_and_broadcast(S, K, t, r, sigma, q,
dtype=dtype) # recheck to make sure q matches
_validate_data(r, q)
b = r - q
theta = numerical_theta_black_scholes_merton(flag, S, K, t, r, sigma, b)
else:
raise ValueError("Model must be one of: `black`, `black_scholes`, `black_scholes_merton`")
theta = np.ascontiguousarray(theta)
if return_as == "series":
return pd.Series(theta, name="theta")
elif return_as == "dataframe":
return pd.DataFrame(theta, columns=["theta"])
return theta
def vega(flag, S, K, t, r, sigma, q=None, *, model="black_scholes", return_as="dataframe", dtype=np.float64):
"""
Return the vega of a contract, as specified by the pricing model `model`.
Broadcasting is applied on the inputs.
:param flag: For each contract, this should be specified as `c` for a call option and `p` for a put option.
:param S: The price of the underlying asset.
:param K: The strike price.
:param t: The annualized time to expiration. Must be positive. For small TTEs, use a small value (1e-3).
:param r: The Interest Free Rate.
:param sigma: The Implied Volatility.
:param q: The annualized continuous dividend yield.
:param model: Must be one of 'black', 'black_scholes' or 'black_scholes_merton'.
:param return_as: To return as a :obj:`pd.Series` object, use "series". To return as a :obj:`pd.DataFrame` object, use "dataframe". Any other value will return a :obj:`numpy.array` object.
:param dtype: Data type.
:return: :obj:`pd.Series`, :obj:`pd.DataFrame` or :obj:`numpy.array` object containing the vega for each contract.
>>> import py_vollib.black_scholes.greeks.numerical
>>> import py_vollib_vectorized
>>> flag = ['c', 'p']
>>> S = 95
>>> K = [100, 90]
>>> t = .2
>>> r = .2
>>> sigma = .2
>>> py_vollib.black_scholes.greeks.numerical.vega(flag, S, K, t, r, sigma, return_as='numpy')
array([0.16892575, 0.0928379 ])
>>> py_vollib_vectorized.vectorized_vega(flag, S, K, t, r, sigma, model='black_scholes', return_as='numpy') # equivalent
array([0.16892575, 0.0928379 ])
"""
flag = _preprocess_flags(flag, dtype=dtype)
S, K, t, r, sigma, flag = maybe_format_data_and_broadcast(S, K, t, r, sigma, flag, dtype=dtype)
_validate_data(flag, S, K, t, r, sigma)
if model == "black":
b = 0
vega = numerical_vega_black(flag, S, K, t, r, sigma, b)
elif model == "black_scholes":
b = r
vega = numerical_vega_black_scholes(flag, S, K, t, r, sigma, b)
elif model == "black_scholes_merton":
if q is None:
raise ValueError("Must pass a `q` to black scholes merton model (annualized continuous dividend yield).")
S, K, t, r, sigma, q = maybe_format_data_and_broadcast(S, K, t, r, sigma, q,
dtype=dtype) # recheck to make sure q matches
_validate_data(r, q)
b = r - q
vega = numerical_vega_black_scholes_merton(flag, S, K, t, r, sigma, b)
else:
raise ValueError("Model must be one of: `black`, `black_scholes`, `black_scholes_merton`")
vega = np.ascontiguousarray(vega)
if return_as == "series":
return pd.Series(vega, name="vega")
elif return_as == "dataframe":
return pd.DataFrame(vega, columns=["vega"])
return vega
def rho(flag, S, K, t, r, sigma, q=None, *, model="black_scholes", return_as="dataframe", dtype=np.float64):
"""
Return the rho of a contract, as specified by the pricing model `model`.
Broadcasting is applied on the inputs.
:param flag: For each contract, this should be specified as `c` for a call option and `p` for a put option.
:param S: The price of the underlying asset.
:param K: The strike price.
:param t: The annualized time to expiration. Must be positive. For small TTEs, use a small value (1e-3).
:param r: The Interest Free Rate.
:param sigma: The Implied Volatility.
:param q: The annualized continuous dividend yield.
:param model: Must be one of 'black', 'black_scholes' or 'black_scholes_merton'.
:param return_as: To return as a :obj:`pd.Series` object, use "series". To return as a :obj:`pd.DataFrame` object, use "dataframe". Any other value will return a :obj:`numpy.array` object.
:param dtype: Data type.
:return: :obj:`pd.Series`, :obj:`pd.DataFrame` or :obj:`numpy.array` object containing the rho for each contract.
>>> import py_vollib.black_scholes.greeks.numerical
>>> import py_vollib_vectorized
>>> flag = ['c', 'p']
>>> S = 95
>>> K = [100, 90]
>>> t | |
import tkinter as tk
from p_types import PokemonType
from moves import Move
from pokemon import Pokemon
from battle import Battle
class Game:
def __init__(self):
self.battle_num = 0
self.max_battles = 3
self.current_battle = None
def startBattle(self):
self.battle_num += 1
self.current_battle = Battle(self.battle_num, self.max_battles)
return self.current_battle
class UI:
def __init__(self, game):
self.game = game
self.messages = {
"start_message": """Welcome to the Pokemon Battle Tower!
In order to become the Battle Tower Champion, you must win {} battles in a row.
Becoming the champion will not be easy, because every battle will be harder than last one.""".format(self.game.max_battles),
"choose_message": """Battle #{}!
Please Select 3-6 pokemons as partners.
""",
"battle_won": "You won the battle! Next battle will start in a few seconds.",
"lose": "You lost the battle... Maybe you'll win next time :)",
"champ": "You are now the Pokemon Battle Tower Champion!",
}
class GUI(UI):
def __init__(self, game):
UI.__init__(self, game)
# main window
self.top = tk.Tk()
self.top.minsize(500, 400)
self.top.resizable(0,0)
self.top.title("Pokemon Battle Tower")
self.top.iconbitmap("icon.ico")
# first/choose screen with tower background. use self.tower_img to make the image stay in memory!
self.start_canvas = tk.Canvas(self.top, bg="#ddd")
self.start_canvas.pack(fill = tk.BOTH, expand = True)
self.tower_img = tk.PhotoImage(file = "./images/tower.png")
self.start_canvas.create_image(-211, 0, anchor = tk.NW, image = self.tower_img)
# first screen button and text
button = tk.Button(self.start_canvas, text = "START GAME", fg = "blue", command = self.chooseScreen)
button.pack(side = tk.BOTTOM, pady = 20)
label = tk.Message(self.start_canvas, text = self.messages["start_message"], bg="#eee", width = 450, bd = 10)
label.pack(side = tk.BOTTOM, pady = 0)
# mark start_canvas as current_scr
self.current_scr = self.start_canvas
# battle_frame to be used later
self.battle_frame = tk.Frame(self.top, bd = 0)
self.battle_canvas = tk.Canvas(self.battle_frame, height = 280, bg="#fff")
self.battle_canvas.pack(fill = tk.X, expand = False, side = tk.TOP)
self.bg_img = [tk.PhotoImage(file = "./images/bg_{}.png".format(k)) for k in [1,2,3]]
self.current_battle_back = None
moves_and_messages = tk.Frame(self.battle_frame, bg = "#000")
moves_and_messages.pack(fill = tk.BOTH, expand = True, padx = 2, pady = 2)
# battle_frames: install messages
message_block = tk.Frame(moves_and_messages, bg = "#ddd", width = 150)
message_block.pack(fill = tk.Y, expand = False, pady = 2, padx = (2, 0), side = tk.RIGHT)
# contain messages so they won't expand
text_container = tk.Frame(message_block, bd = 0, width = 150)
text_container.pack(side = tk.LEFT, fill = tk.BOTH, expand = True)
text_container.grid_propagate(False)
text_container.grid_rowconfigure(0, weight=1)
text_container.grid_columnconfigure(0, weight=1)
self.message_text = tk.Text(text_container, bg = "#ddd", relief = tk.FLAT, wrap = tk.WORD, font = ("Arial", 8))
self.message_text.tag_configure('bold', font=('Arial', 8, 'bold'))
self.message_text.grid(row=0, column=0, sticky="nsew")
# scrollbar for messages
message_scroll = tk.Scrollbar(message_block)
self.message_text.configure(yscrollcommand = message_scroll.set, state = tk.DISABLED)
message_scroll.configure(command = self.message_text.yview)
message_scroll.pack(side = tk.RIGHT, fill = tk.Y)
# battle_frames: install moves
moves_block = tk.Frame(moves_and_messages, bg = "#ddd")
moves_block.pack(fill = tk.BOTH, expand = True, pady = 2, padx = 0, side = tk.LEFT)
self.moves = []
self.selected_move = tk.IntVar()
for i in range(4):
self.moves.append( tk.Radiobutton(moves_block, text = " - ", value = i, variable = self.selected_move, state = tk.DISABLED, width = 18, height = 2, anchor = tk.W, bg = "#ddd", justify = tk.LEFT) )
self.moves[i].grid(row = i%2, column = int(i/2))
self.confirm_button = tk.Button(moves_block, text = "Go!", anchor = tk.S, state = tk.DISABLED, command = self.confirmMove)
self.confirm_button.grid(row = 0, column = 2, rowspan = 2, padx = (5, 10))
moves_block.grid_rowconfigure(0, weight=1)
moves_block.grid_rowconfigure(1, weight=1)
moves_block.grid_columnconfigure(0, weight=1)
moves_block.grid_columnconfigure(1, weight=1)
moves_block.grid_columnconfigure(2, weight=0)
# battle_frames: pokemon details
self.partner_details = tk.StringVar()
self.enemy_details = tk.StringVar()
details = [{'x': 20, 'y': 120, 'var': self.partner_details}, {'x': 370, 'y': 20, 'var': self.enemy_details}]
for d in details:
details_frame = tk.LabelFrame(self.battle_canvas, bg = "#000", relief = tk.FLAT)
details_frame.place(x = d['x'], y = d['y'])
details_label = tk.Label(details_frame, textvariable = d['var'], bg = "#f9f9fa", width = 18, anchor = tk.CENTER)
details_label.pack(expand = True, side = tk.LEFT)
# battle_frames: battle animations set up
self.animation = dict()
self.animation["partner"] = {'ref': None, 'img': None, 'allow': False, 'func': None}
self.animation["enemy"] = {'ref': None, 'img': None, 'allow': False, 'func': None}
def chooseScreen(self):
battle = self.game.startBattle()
# we came from a battle, just pack_forget the screen so it can be re-used later
if self.current_scr != self.start_canvas:
self.current_scr.pack_forget()
self.start_canvas.pack(fill = tk.BOTH, expand = True)
# get the first screen, destroy everything except for background...
self.current_scr = self.start_canvas
for child in self.current_scr.winfo_children():
child.destroy()
# just a button
button = tk.Button(self.start_canvas, text = "CONFIRM SELECTION", fg = "blue")
button.pack(side = tk.BOTTOM, pady = 20)
# frame for message and pokemon options
frame = tk.Frame(self.start_canvas, bg="#eee", width = 450, bd = 10)
frame.pack(side = tk.BOTTOM, pady = 0)
# put message in top row of frame
message = self.messages["choose_message"].format(self.game.battle_num)
label = tk.Message(frame, text = message, bg="#eee", width = 400, anchor = tk.NW)
label.grid(row = 0, column = 0, columnspan = 3)
# get pokemon options:
options = battle.availableList()
# put pokemon options in frame
menus = []
intVars = []
for i in range(6):
intVars.append(tk.IntVar(value = -1))
txt = tk.StringVar(value = "Pokemon #{}".format(i+1))
menus.append( tk.Menubutton(frame, textvariable = txt, width = 15, relief = tk.GROOVE, bg="#fff") )
menus[i].grid(row = int(i/3)+1, column = i%3, padx = 1)
menus[i].menu = tk.Menu(menus[i], tearoff = 0)
menus[i]['menu'] = menus[i].menu
for j in range(len(options)):
menus[i].menu.add_radiobutton(label = options[j]['name'], value = j, variable = intVars[i], command = lambda n = i, k=j, t = txt: t.set(options[k]['name']))
# check if player chose enough pokemons, if so start the battle
def checkOptions():
selection = [options[var.get()] for var in intVars if var.get() > -1]
partner = battle.selectPartners(selection)
if partner == None:
from tkinter.messagebox import showwarning
showwarning("Not Enough Pokemons", "Please choose at least 3 pokemons for the battle.")
else:
self.startBattle(partner)
button.configure(command = checkOptions)
def startBattle(self, partner):
# forget last screen, mark battle_frame as current
self.current_scr.pack_forget()
self.battle_frame.pack(fill = tk.BOTH, expand = True)
self.current_scr = self.battle_frame
# delete bg of last battle if there is one, and insert the new background
if self.current_battle_back != None:
self.battle_canvas.delete(self.current_battle_back)
bg_y_cord = -140 if self.game.battle_num != 2 else -80
self.current_battle_back = self.battle_canvas.create_image(-200, bg_y_cord, anchor = tk.NW, image = self.bg_img[self.game.battle_num - 1])
# clear details
for d in [self.partner_details, self.enemy_details]:
d.set("")
# clear messages
self.message_text.configure(state = tk.NORMAL)
self.message_text.delete(1.0, tk.END)
self.message_text.configure(state = tk.DISABLED)
# clear animations
for pl in ["enemy", "partner"]:
if self.animation[pl]['ref'] != None:
self.animation[pl]['allow'] = False
self.battle_canvas.after_cancel(self.animation[pl]['func'])
self.animation[pl]['ref'] = None
self.battle_canvas.delete(self.animation[pl]['img'])
# get the pokemons
self.setPokemon(partner)
enemy = self.game.current_battle.selectEnemies()
self.setPokemon(enemy)
return self.turn()
def battleMessage(self, message, bold = False):
# print the message
self.message_text.configure(state = tk.NORMAL)
if bold:
self.message_text.insert(tk.END, message + "\n", "bold")
else:
self.message_text.insert(tk.END, message + "\n")
self.message_text.configure(state = tk.DISABLED)
# scroll messages down
self.message_text.yview(tk.END)
def load_gif_frames(self, pname, is_partner):
i = 0
img = []
side = "back" if is_partner else "front"
while True:
try:
img.append(tk.PhotoImage(file = "./images/{}/{}.gif".format(side, pname.lower()), format = "gif -index {}".format(i)))
i += 1
except:
break
return img
def setPokemon(self, pokemon):
is_partner = not pokemon.isEnemy()
self.battleMessage("{} sent out {}!".format("You" if is_partner else "Enemy", pokemon.getName()), True)
if is_partner:
self.battleMessage(pokemon.printStats())
det = self.partner_details if is_partner else self.enemy_details
det.set("{} (HP: {}%)\n{}".format(pokemon.getName(), 100, "/".join(format(tp.getName()) for tp in pokemon.getTypes())))
pl = "partner" if is_partner else "enemy"
img = self.load_gif_frames(pokemon.getName(), is_partner)
self.animation[pl]['ref'] = img[0]
img_width = img[0].width()
cord_y, x_start, x_end, img_anchor = (260, -img_width, 70, tk.SW) if is_partner else (180, 500+img_width, 500+img_width-180, tk.SE)
self.animation[pl]['img'] = self.battle_canvas.create_image(x_start, cord_y, anchor = img_anchor, image = self.animation[pl]['ref'])
# go through all frames of animation in a loop
def animate():
if self.animation[pl]['allow'] == True:
# get next frame
selected_img = img.pop(0)
img.append(selected_img)
# put the next frame
ready = self.battle_canvas.create_image(x_end, cord_y, anchor = img_anchor, image = selected_img)
# delete previous frame
self.battle_canvas.delete(self.animation[pl]['img'])
self.animation[pl]['img'] = ready
self.animation[pl]['ref'] = selected_img
# return here in 20 ms
self.animation[pl]['func'] = self.battle_canvas.after(20, animate)
# animate pokemon into the screen, then turn animate() on
def entrance(i):
self.battle_canvas.delete(self.animation[pl]['img'])
selected_img = img.pop(0)
img.append(selected_img)
self.animation[pl]['ref'] = selected_img
alpha = ((30-i)/30)*x_start + (i/30)*x_end
self.animation[pl]['img'] = self.battle_canvas.create_image(alpha, cord_y, anchor = img_anchor, image = self.animation[pl]['ref'])
if i < 30:
self.battle_canvas.after(15, entrance, i+1)
if i == 30:
# entrance sequence is done, turn on animate()
self.animation[pl]['allow'] = True
animate()
entrance(0)
self.battleMessage("")
def faint(self, is_partner):
pl = "partner" if is_partner else "enemy"
# turn current animation off
self.animation[pl]['allow'] = False
self.battle_canvas.after_cancel(self.animation[pl]['func'])
# clear fainted pokemon's details
details = | |
0)
if self.useProgrammerScale:
self.loadScaleOld()
else:
self.loadScaleNew()
def __debugScale(self):
prosecutionPanPos = self.prosecutionPanNodePath.getPos()
origin = Point3(0, 0, 0)
prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin)
panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin)
self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos)
self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos)
self.notify.debug('panRenderPos = %s' % panRenderPos)
prosecutionLocatorPos = self.prosecutionLocator.getPos()
prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin)
locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin)
self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos)
self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos)
self.notify.debug('locatorRenderPos = %s' % locatorRenderPos)
beamPos = self.beamNodePath.getPos()
beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin)
beamRenderPos = render.getRelativePoint(self.beamNodePath, origin)
self.notify.debug('beamPos = %s' % beamPos)
self.notify.debug('beamRelPos = %s' % beamRelPos)
self.notify.debug('beamRenderPos = %s' % beamRenderPos)
beamBoundsCenter = self.beamNodePath.getBounds().getCenter()
self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter)
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
def loadScaleNew(self):
self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale')
self.beamNodePath = self.scaleNodePath.find('**/scaleBeam')
self.defensePanNodePath = self.scaleNodePath.find('**/defensePan')
self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan')
self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol')
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol')
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
self.standNodePath = self.scaleNodePath.find('**/scaleStand')
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator')
defenseLocBounds = self.defenseLocator.getBounds()
defenseLocPos = defenseLocBounds.getCenter()
self.notify.debug('defenseLocatorPos = %s' % defenseLocPos)
self.defensePanNodePath.setPos(defenseLocPos)
self.defensePanNodePath.reparentTo(self.beamNodePath)
self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos())
self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator')
prosecutionLocBounds = self.prosecutionLocator.getBounds()
prosecutionLocPos = prosecutionLocBounds.getCenter()
self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos)
self.prosecutionPanNodePath.setPos(prosecutionLocPos)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
self.beamLocator = self.scaleNodePath.find('**/StandLocator1')
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
negBeamLocatorPos = -beamLocatorPos
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos)
self.beamNodePath.setPos(beamLocatorPos)
self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale)
self.scaleNodePath.wrtReparentTo(self.geom)
self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol')
oldBitMask = self.baseHighCol.getCollideMask()
newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask
newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask
self.baseHighCol.setCollideMask(newBitMask)
self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol')
self.defenseHighCol.stash()
self.defenseHighCol.setCollideMask(newBitMask)
self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision')
self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col')
self.defenseLocator.hide()
self.prosecutionLocator.hide()
self.beamLocator.hide()
def loadScaleOld(self):
startingTilt = 0
self.scaleNodePath = NodePath('injusticeScale')
beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0)
self.beamNodePath = NodePath('scaleBeam')
self.beamNodePath.attachNewNode(beamGeom)
self.beamNodePath.setPos(0, 0, 3)
self.beamNodePath.reparentTo(self.scaleNodePath)
defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25)
self.defensePanNodePath = NodePath('defensePan')
self.defensePanNodePath.attachNewNode(defensePanGeom)
self.defensePanNodePath.setPos(0, -2, 0)
self.defensePanNodePath.reparentTo(self.beamNodePath)
defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
defenseTube.setTangible(1)
defenseCollNode = CollisionNode('DefenseCol')
defenseCollNode.addSolid(defenseTube)
self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode)
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0)
self.prosecutionPanNodePath = NodePath('prosecutionPan')
self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom)
self.prosecutionPanNodePath.setPos(0, 2, 0)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
prosecutionTube.setTangible(1)
prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol'))
prosecutionCollNode.addSolid(prosecutionTube)
self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode)
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3)
self.standNodePath = NodePath('scaleStand')
self.standNodePath.attachNewNode(standGeom)
self.standNodePath.reparentTo(self.scaleNodePath)
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.scaleNodePath.setScale(5.0)
self.scaleNodePath.wrtReparentTo(self.geom)
self.setScaleTilt(startingTilt)
def setScaleTilt(self, tilt):
self.beamNodePath.setP(tilt)
if self.useProgrammerScale:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
else:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
def stashBaseCol(self):
if not self.baseColStashed:
self.notify.debug('stashBaseCol')
self.baseTopCol.stash()
self.baseSideCol.stash()
self.baseColStashed = True
def unstashBaseCol(self):
if self.baseColStashed:
self.notify.debug('unstashBaseCol')
self.baseTopCol.unstash()
self.baseSideCol.unstash()
self.baseColStashed = False
def makeScaleReflectDamage(self):
diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage
diffDamage *= 1.0
if diffDamage >= 0:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
else:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
self.setScaleTilt(tilt)
if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85:
self.unstashBaseCol()
else:
self.stashBaseCol()
def unloadEnvironment(self):
self.notify.debug('----- unloadEnvironment')
DistributedBossCog.DistributedBossCog.unloadEnvironment(self)
self.geom.removeNode()
del self.geom
def __loadMopaths(self):
self.notify.debug('----- __loadMopaths')
self.toonsEnterA = Mopath.Mopath()
self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA')
self.toonsEnterA.fFaceForward = 1
self.toonsEnterA.timeScale = 35
self.toonsEnterB = Mopath.Mopath()
self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB')
self.toonsEnterB.fFaceForward = 1
self.toonsEnterB.timeScale = 35
def __unloadMopaths(self):
self.notify.debug('----- __unloadMopaths')
self.toonsEnterA.reset()
self.toonsEnterB.reset()
def enterOff(self):
self.notify.debug('----- enterOff')
DistributedBossCog.DistributedBossCog.enterOff(self)
if self.witnessToon:
self.witnessToon.clearChat()
def enterWaitForToons(self):
self.notify.debug('----- enterWaitForToons')
DistributedBossCog.DistributedBossCog.enterWaitForToons(self)
self.geom.hide()
self.witnessToon.removeActive()
def exitWaitForToons(self):
self.notify.debug('----- exitWaitForToons')
DistributedBossCog.DistributedBossCog.exitWaitForToons(self)
self.geom.show()
self.witnessToon.addActive()
def enterElevator(self):
self.notify.debug('----- enterElevator')
DistributedBossCog.DistributedBossCog.enterElevator(self)
self.witnessToon.removeActive()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.__hideWitnessToon()
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
camera.reparentTo(self.elevatorModel)
camera.setPosHpr(0, 30, 8, 180, 0, 0)
def exitElevator(self):
self.notify.debug('----- exitElevator')
DistributedBossCog.DistributedBossCog.exitElevator(self)
self.witnessToon.removeActive()
def enterIntroduction(self):
self.notify.debug('----- enterIntroduction')
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.stopAnimate()
self.__hideWitnessToon()
DistributedBossCog.DistributedBossCog.enterIntroduction(self)
base.playMusic(self.promotionMusic, looping=1, volume=0.9)
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
def exitIntroduction(self):
self.notify.debug('----- exitIntroduction')
DistributedBossCog.DistributedBossCog.exitIntroduction(self)
self.promotionMusic.stop()
if not self.mainDoor.isEmpty():
pass
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.unstash()
if not self.elevatorEntrance.isEmpty():
pass
def enterBattleOne(self):
self.notify.debug('----- LawbotBoss.enterBattleOne ')
DistributedBossCog.DistributedBossCog.enterBattleOne(self)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.clearChat()
self.loop('Ff_neutral')
self.notify.debug('self.battleANode = %s' % self.battleANode)
self.__hideWitnessToon()
if self.battleA == None or self.battleB == None:
pass
return
def exitBattleOne(self):
self.notify.debug('----- exitBattleOne')
DistributedBossCog.DistributedBossCog.exitBattleOne(self)
def stashBoss(self):
self.stash()
def unstashBoss(self, task):
self.unstash()
self.reparentTo(render)
def enterRollToBattleTwo(self):
self.notify.debug('----- enterRollToBattleTwo')
self.releaseToons(finalBattle=1)
self.stashBoss()
self.toonsToBattlePosition(self.involvedToons, self.battleANode)
self.stickBossToFloor()
intervalName = 'RollToBattleTwo'
seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss')
def __onToPrepareBattleTwo(self):
self.notify.debug('----- __onToPrepareBattleTwo')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.doneBarrier('RollToBattleTwo')
def exitRollToBattleTwo(self):
self.notify.debug('----- exitRollToBattleTwo')
self.unstickBoss()
intervalName = 'RollToBattleTwo'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleTwo(self):
self.notify.debug('----- enterPrepareBattleTwo')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
self.__showWitnessToon()
prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie()
intervalName = 'prepareBattleTwo'
seq = Sequence(prepareBattleTwoMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.acceptOnce('doneChatPage', self.__showCannonsAppearing)
base.playMusic(self.stingMusic, looping=0, volume=1.0)
def __showCannonsAppearing(self, elapsedTime = 0):
allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar))
multiCannons = Parallel()
index = 0
self.involvedToons.sort()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
if index in self.cannons:
cannon = self.cannons[index]
cannonSeq = cannon.generateCannonAppearTrack(toon)
multiCannons.append(cannonSeq)
index += 1
else:
self.notify.warning('No cannon %d but we have a toon =%d' % (index, toonId))
allCannonsAppear.append(multiCannons)
intervalName = 'prepareBattleTwoCannonsAppear'
seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleTwo(self, elapsedTime = 0):
self.notify.debug('----- __onToBattleTwo')
self.doneBarrier('PrepareBattleTwo')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleTwo(self):
self.notify.debug('----- exitPrepareBattleTwo')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
self.__clearOnscreenMessage()
self.stingMusic.stop()
def enterBattleTwo(self):
self.notify.debug('----- enterBattleTwo')
self.cleanupIntervals()
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2)
localAvatar.inventory.setBattleCreditMultiplier(mult)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.clearChat()
self.witnessToon.clearChat()
self.releaseToons(finalBattle=1)
self.__showWitnessToon()
if not self.useCannons:
self.toonsToBattlePosition(self.toonsA, self.battleANode)
self.toonsToBattlePosition(self.toonsB, self.battleBNode)
base.playMusic(self.battleTwoMusic, looping=1, volume=0.9)
self.startJuryBoxMoving()
for index in xrange(len(self.cannons)):
cannon = self.cannons[index]
cannon.cannon.show()
def getChairParent(self):
return self.juryBox
def startJuryBoxMoving(self):
if self.juryBoxIval:
self.juryBoxIval.finish()
self.juryBoxIval = None
self.juryBox.setPos(-30, 0, -12.645)
self.reflectedJuryBox.setPos(-30, 0, 0)
curPos = self.juryBox.getPos()
endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
curReflectedPos = self.reflectedJuryBox.getPos()
reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0))
self.juryBoxIval.start()
self.juryTimer = ToontownTimer.ToontownTimer()
self.juryTimer.posInTopRightCorner()
self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime)
def exitBattleTwo(self):
self.notify.debug('----- exitBattleTwo')
intervalName = self.uniqueName('Drop')
self.clearInterval(intervalName)
self.cleanupBattles()
self.battleTwoMusic.stop()
localAvatar.inventory.setBattleCreditMultiplier(1)
if self.juryTimer:
self.juryTimer.destroy()
del self.juryTimer
self.juryTimer = None
for chair in self.chairs.values():
chair.stopCogsFlying()
return
def enterRollToBattleThree(self):
self.notify.debug('----- enterRollToBattleThree')
self.reparentTo(render)
self.stickBossToFloor()
intervalName = 'RollToBattleThree'
seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
def __onToPrepareBattleThree(self):
self.notify.debug('----- __onToPrepareBattleThree')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.doneBarrier('RollToBattleThree')
def exitRollToBattleThree(self):
self.notify.debug('----- exitRollToBattleThree')
self.unstickBoss()
intervalName = 'RollToBattleThree'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleThree(self):
self.notify.debug('----- enterPrepareBattleThree')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
self.__showWitnessToon()
prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie()
self.acceptOnce('doneChatPage', self.__onToBattleThree)
intervalName = 'prepareBattleThree'
seq = Sequence(prepareBattleThreeMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleThree(self, elapsed):
self.notify.debug('----- __onToBattleThree')
self.doneBarrier('PrepareBattleThree')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleThree(self):
self.notify.debug('----- exitPrepareBattleThree')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
intervalName = 'PrepareBattleThree'
self.clearInterval(intervalName)
self.__clearOnscreenMessage()
self.betweenBattleMusic.stop()
def enterBattleThree(self):
DistributedBossCog.DistributedBossCog.enterBattleThree(self)
self.scaleNodePath.unstash()
localAvatar.setPos(-3, 0, 0)
base.localAvatar.orbitalCamera.start()
self.clearChat()
self.witnessToon.clearChat()
self.reparentTo(render)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.accept('enterWitnessStand', self.__touchedWitnessStand)
self.accept('pieSplat', self.__pieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
self.accept('begin-pie', self.__foundPieButton)
self.accept('enterDefenseCol', self.__enterDefenseCol)
self.accept('enterProsecutionCol', self.__enterProsecutionCol)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice'))
self.stickBossToFloor()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
self.__showWitnessToon()
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage)
if diffSettings[4]:
localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()
localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight)
def __doneBattleThree(self):
self.notify.debug('----- __doneBattleThree')
self.setState('NearVictory')
self.unstickBoss()
def exitBattleThree(self):
self.notify.debug('----- exitBattleThree')
DistributedBossCog.DistributedBossCog.exitBattleThree(self)
NametagGlobals.setMasterArrowsOn(1)
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.ignore(bossDoneEventName)
taskMgr.remove(self.uniqueName('StandUp'))
self.ignore('enterWitnessStand')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.ignore('begin-pie')
self.ignore('enterDefenseCol')
self.ignore('enterProsecutionCol')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
if self.bossDamageMovie:
self.bossDamageMovie.finish()
self.bossDamageMovie = None
self.unstickBoss()
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
return
def enterNearVictory(self):
self.cleanupIntervals()
self.reparentTo(render)
self.setPos(*ToontownGlobals.LawbotBossDeathPos)
self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr)
self.clearChat()
self.releaseToons(finalBattle=1)
self.accept('pieSplat', self.__finalPieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.happy = 0
self.raised = 0
self.forward = 1
self.doAnimate()
self.setDizzy(1)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def exitNearVictory(self):
self.notify.debug('----- exitNearVictory')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.setDizzy(0)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterVictory(self):
self.notify.debug('----- enterVictory')
self.cleanupIntervals()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.loop('neutral')
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.clearChat()
self.witnessToon.clearChat()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.happy = 1
self.raised = 1
self.forward = 1
intervalName = 'VictoryMovie'
seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.bossHealthBar.deinitialize()
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueVictory(self):
self.notify.debug('----- __continueVictory')
self.stopAnimate()
self.doneBarrier('Victory')
def exitVictory(self):
self.notify.debug('----- exitVictory')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterDefeat(self):
self.notify.debug('----- enterDefeat')
self.cleanupIntervals()
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.reparentTo(render)
self.clearChat()
self.releaseToons(finalBattle=1)
self.happy = 0
self.raised = 0
self.forward = 1
intervalName = 'DefeatMovie'
seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueDefeat(self):
self.notify.debug('----- __continueDefeat')
self.stopAnimate()
self.doneBarrier('Defeat')
def exitDefeat(self):
self.notify.debug('----- exitDefeat')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterReward(self):
self.cleanupIntervals()
self.clearChat()
self.witnessToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
panelName = self.uniqueName('reward')
self.rewardPanel = RewardPanel.RewardPanel(panelName)
victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)
ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))
intervalName = 'RewardMovie'
delayDeletes = []
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
| |
from input channels to output chanels.
bias: Boolean, whether the layer uses a bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
self.in_channels = in_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
super(Conv1D, self).__init__(in_layers, **kwargs)
def _build_layer(self):
return nn.Conv1D(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=self.bias)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("Conv1D layer must have exactly one parent")
parent = inputs[0]
if len(parent.Size()) == 2:
parent = torch.unsqueeze(parent, 2)
elif len(parent.Size()) != 3:
raise ValueError("Parent tensor must be (batch, width, channel)")
if torch.cuda.is_available():
if not self._built:
self._layer = self._build_layer()
self._non_pickle_fields.append('_layer')
layer = self._layer
else:
layer = self._build_layer()
out_tensor = layer(parent)
if set_tensors:
self._record_variable_scope(self.name)
self.out_tensor = out_tensor
if torch.cuda.is_available() and not self._built:
self._built = True
self.variables = self._layer.variables
return out_tensor
class Dense(Layer):
def __init__(
self,
out_channels,
activation_fn=None,
biases_initializer=torch.zeros,
weights_initializer=nn.init.kaiming_normal_,
time_series=False,
**kwargs):
"""Create a dense layer.
The weight and bias initializers are specified by callable objects that construct
and return a PyTorch initializer when invoked with no arguments. This will typically
be either the initializer class itself (if the constructor does not require arguments),
or a TorchWrapper (if it does).
Parameters
----------
out_channels: int
the number of output values
activation_fn: object
the PyTorch activation function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
time_series: bool
if True, the dense layer is applied to each element of a batch in sequence
"""
super(Dense, self).__init__(**kwargs)
self.out_channels = out_channels
self.out_tensor = None
self.activation_fn = activation_fn
self.biases_initializer = biases_initializer
self.weights_initializer = weights_initializer
self.time_series = time_series
try:
parent_shape = self.in_layers[0].shape
self._shape = tuple(parent_shape[:-1]) + (out_channels,)
except:
pass
def _build_layer(self, reuse):
if self.biases_initializer is None:
biases_initializer = None
else:
biases_initializer = self.biases_initializer()
return Denselayer(
self.out_channels,
activation=self.activation_fn,
use_bias=biases_initializer is not None,
kernel_initializer=self.weights_initializer(),
bias_initializer=biases_initializer,
_reuse=reuse)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("Dense layer can only have one input")
parent = inputs[0]
for reuse in (self._reuse, False):
if torch:
if not self._built:
self._layer = self._build_layer(False)
self._non_pickle_fields.append('_layer')
layer = self._layer
else:
layer = self._build_layer(reuse)
try:
if self.time_series:
out_tensor = torch.Tensor.map_(layer, parent)
else:
out_tensor = layer(parent)
break
except ValueError:
if reuse:
# This probably means the variable hasn't been created yet, so try again
# with reuse set to false.
continue
raise
if set_tensors:
self._record_variable_scope(self._get_scope_name())
self.out_tensor = out_tensor
if torch.cuda.is_available() and not self._built:
self._built = True
self.variables = self._layer.variables
return out_tensor
class Highway(Layer):
""" Create a highway layer. y = H(x) * T(x) + x * (1 - T(x))
H(x) = activation_fn(matmul(W_H, x) + b_H) is the non-linear transformed output
T(x) = sigmoid(matmul(W_T, x) + b_T) is the transform gate
reference: https://arxiv.org/pdf/1505.00387.pdf
This layer expects its input to be a two dimensional tensor of shape (batch size, # input features).
Outputs will be in the same shape.
"""
def __init__(
self,
activation_fn=nn.ReLU,
biases_initializer=torch.zeros,
weights_initializer=nn.init.kaiming_normal_,
**kwargs):
"""
Parameters
----------
activation_fn: object
the PyTorch activation function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
"""
super(Highway, self).__init__(**kwargs)
self.activation_fn = activation_fn
self.biases_initializer = biases_initializer
self.weights_initializer = weights_initializer
try:
self._shape = self.in_layers[0].shape
except:
pass
def _build_layers(self, out_channels):
if self.biases_initializer is None:
biases_initializer = None
else:
biases_initializer = self.biases_initializer()
dense_H = Denselayer(
out_channels,
activation=self.activation_fn,
bias_initializer=biases_initializer,
kernel_initializer=self.weights_initializer())
dense_T = Denselayer(
out_channels,
activation=torch.sigmoid,
bias_initializer=nn.init.constant_(-1),
kernel_initializer=self.weights_initializer())
return (dense_H, dense_T)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent = inputs[0]
out_channels = parent.get_shape().as_list()[1]
if torch:
if not self._built:
self._layers = self._build_layers(out_channels)
self._non_pickle_fields.append('_layers')
layers = self._layers
else:
layers = self._build_layers(out_channels)
dense_H = layers[0](parent)
dense_T = layers[1](parent)
out_tensor = torch.mul(dense_H, dense_T) + torch.mul(
parent, 1 - dense_T)
if set_tensors:
self.out_tensor = out_tensor
if torch and not self._built:
self._built = True
self.variables = self._layers[0].variables + self._layers[1].variables
return out_tensor
class Flatten(Layer):
"""Flatten every dimension except the first"""
def __init__(self, in_layers=None, **kwargs):
super(Flatten, self).__init__(in_layers, **kwargs)
try:
parent_shape = self.in_layers[0].shape
s = list(parent_shape[:2])
for x in parent_shape[2:]:
s[1] *= x
self._shape = tuple(s)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("Only One Parent to Flatten")
parent = inputs[0]
parent_shape = parent.get_shape()
vector_size = 1
for i in range(1, len(parent_shape)):
vector_size *= parent_shape[i].value
parent_tensor = parent
out_tensor = torch.reshape(parent_tensor, shape=(-1, vector_size))
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Reshape(Layer):
def __init__(self, shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self._new_shape = tuple(-1 if x is None else x for x in shape)
try:
parent_shape = self.in_layers[0].shape
s = tuple(None if x == -1 else x for x in shape)
if None in parent_shape or None not in s:
self._shape = s
else:
# Calculate what the new shape will be.
t = 1
for x in parent_shape:
t *= x
for x in s:
if x is not None:
t //= x
self._shape = tuple(t if x is None else x for x in s)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
out_tensor = torch.reshape(parent_tensor, self._new_shape)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Cast(Layer):
"""
Wrapper around cast. Changes the dtype of a single layer
"""
def __init__(self, in_layers=None, dtype=None, **kwargs):
"""
Parameters
----------
dtype: torch.DType
the dtype to cast the in_layer to
e.x. torch.int32
"""
if dtype is None:
raise ValueError("Must cast to a dtype")
self.dtype = dtype
super(Cast, self).__init__(in_layers, **kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = parent_shape
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
out_tensor = torch.unbind(parent_tensor, self.dtype)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Squeeze(Layer):
def __init__(self, in_layers=None, squeeze_dims=None, **kwargs):
self.squeeze_dims = squeeze_dims
super(Squeeze, self).__init__(in_layers, **kwargs)
try:
parent_shape = self.in_layers[0].shape
if squeeze_dims is None:
self._shape = [i for i in parent_shape if i != 1]
else:
self._shape = [
parent_shape[i]
for i in range(len(parent_shape))
if i not in squeeze_dims
]
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
out_tensor = torch.squeeze(parent_tensor, squeeze_dims=self.squeeze_dims)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Transpose(Layer):
def __init__(self, perm, **kwargs):
super(Transpose, self).__init__(**kwargs)
self.perm = perm
try:
parent_shape = self.in_layers[0].shape
self._shape = tuple(parent_shape[i] for i in perm)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("Only One Parent to Transpose over")
out_tensor = torch.t(inputs[0]).permute(self.perm)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class CombineMeanStd(Layer):
"""Generate Gaussian nose."""
def __init__(self,
in_layers=None,
training_only=False,
noise_epsilon=0.01,
**kwargs):
"""Create a CombineMeanStd layer.
This layer should have two inputs with the same shape, and its output also has the
same shape. Each element of the output is a Gaussian distributed random number
whose mean is the corresponding element of the first input, and whose standard
deviation is the corresponding element of the second input.
Parameters
----------
in_layers: list
the input layers. The first one specifies the mean, and the second one specifies
the standard deviation.
training_only: bool
if True, noise is only generated during training. During prediction, the output
is simply equal to the first input (that is, the mean of the distribution used
during training).
noise_epsilon: float
The standard deviation of the random noise
"""
super(CombineMeanStd, self).__init__(in_layers, **kwargs)
self.training_only = training_only
self.noise_epsilon = noise_epsilon
try:
self._shape = self.in_layers[0].shape
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 2:
raise ValueError("Must have two in_layers")
mean_parent, std_parent = inputs[0], inputs[1]
sample_noise = torch.randn(
mean_parent.get_shape(), 0, self.noise_epsilon, dtype=torch.float32)
if self.training_only and 'training' in kwargs:
sample_noise *= kwargs['training']
out_tensor = mean_parent + torch.exp(std_parent * 0.5) * sample_noise
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Repeat(Layer):
def __init__(self, n_times, **kwargs):
self.n_times = n_times
super(Repeat, | |
# coding: utf-8
"""
DocumentStateApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DocumentStateApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_document_state(self, document, **kwargs):
"""
Create some documentStates
Create one or more documentStates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_document_state(document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param DocumentState document: Create a document by sending the paths to be added in the request body. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:return: DocumentState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document', 'select', 'populate', 'sort']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_document_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `create_document_state`")
resource_path = '/documentStates'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentState',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_by_ids(self, document, **kwargs):
"""
Delete all the objects matching the ids provided.
Delete a set of object in one shot.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_by_ids(document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] document: Array of Ids to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_by_ids" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `delete_by_ids`")
resource_path = '/documentStates/deleteByIds'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_document_state_by_id(self, id, **kwargs):
"""
Delete a documentState by its unique ID
Deletes an existing documentState by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_document_state_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: DocumentState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_document_state_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_document_state_by_id`")
resource_path = '/documentStates/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentState',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_document_state_by_query(self, **kwargs):
"""
Delete some documentStates by query
Delete all documentStates matching the specified query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_document_state_by_query(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[DocumentState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_document_state_by_query" % key
)
params[key] = val
del params['kwargs']
resource_path = '/documentStates'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
| |
0:
if self.hdr.rhr_xcsi >= 0:
ndims += 1
if self.hdr.rhr_ycsi >= 0:
ndims += 1
if self.hdr.rhr_zcsi >= 0:
ndims += 1
if ndims == 2:
is2D = True
return is2D
@property
def is_chop_on(self):
""" Is data chopped? """
chop = False
nex = self.hdr.rhi_nex
necho = self.hdr.rhi_numecho
if ( math.ceil(nex) * necho ) <= 1:
chop = True
return chop
@property
def get_frequency_offset(self):
""" Returns the spectral frquency offset """
if self.version > 9:
return 0.0
else:
return self.hdr.rhr_rh_user13
@property
def get_center_from_raw_file(self):
"""
Gets the center of the acquisition grid. May vary between sequences.
"""
center = np.array([0.0, 0.0, 0.0])
if self.version < 11:
center[0] = 0
center[1] = 0
center[2] = self.hdr.rhi_user13
else:
center[0] = -1 * self.hdr.rhi_user11
center[1] = -1 * self.hdr.rhi_user12
center[2] = self.hdr.rhi_user13
return center
@property
def get_num_coils(self):
""" Determine number of coils of data in the PFile. """
ncoils = 0
for i in range(4):
start_rcv = getattr(self.hdr, "rhr_rh_dab["+str(i)+"]_start_rcv")
stop_rcv = getattr(self.hdr, "rhr_rh_dab["+str(i)+"]_stop_rcv")
if ( start_rcv != 0) or (stop_rcv != 0):
ncoils += ( stop_rcv - start_rcv ) + 1
# Otherwise 1
if ncoils == 0:
ncoils = 1
return int(ncoils)
@property
def get_num_time_points(self):
"""
Determine number of time points in the PFile.
Number of time points is determined from the file size,
number of voxels and number of coils.
"""
passSize = float(self.hdr.rhr_rh_raw_pass_size)
numCoils = float(self.get_num_coils)
numVoxels = float(self.get_num_voxels_in_vol)
dataWordSize = float(self.hdr.rhr_rh_point_size)
numFreqPoints = float(self.hdr.rhr_rh_frame_size)
kSpacePoints = float(self.get_num_kspace_points)
numTimePoints = int( ( passSize ) / ( numCoils * 2 * dataWordSize * numFreqPoints ) - 1 ) / kSpacePoints
# bjs - added this after Pom's fidcsi 13C data came up with 0 here
if numTimePoints <= 0:
numTimePoints = 1
return int(numTimePoints)
@property
def get_num_dummy_scans(self):
"""
Determine number of dummy scans (FIDs) in the data block.
This is the difference between the raw pass size and the
expected size of the data based on numCoils, numTimePts, numKSpacePts
and numFreqPts.
"""
passSize = self.hdr.rhr_rh_raw_pass_size
numCoils = self.get_num_coils
numTimePoints = self.get_num_time_points
numSampledVoxels = self.get_num_kspace_points
numFreqPoints = self.hdr.rhr_rh_frame_size
dataWordSize = self.hdr.rhr_rh_point_size
dataRepresentation = "COMPLEX" # this was hard set in DcmHeader code
if ( dataRepresentation == "COMPLEX" ):
numComponents = 2
else:
numComponents = 1
# Calc the diff between the size of the data buffer and the number of real data points
# then divide by the number of bytes in a single fid to get the number of dummy FIDs
numDummyScans = passSize - ( numCoils * numTimePoints * numSampledVoxels * numFreqPoints * numComponents * dataWordSize )
numDummyScans = numDummyScans / ( numFreqPoints * numComponents * dataWordSize)
return int(numDummyScans)
@property
def get_num_frames(self):
""" Number of frames is number of slices * numCoils * numTimePoints """
nvox = self.get_num_voxels
nframes = nvox[2] * self.get_num_coils * self.get_num_time_points
return int(nframes)
@property
def get_num_voxels_in_vol(self):
nvox = self.get_num_voxels
return int(nvox[0] * nvox[1] * nvox[2])
@property
def get_num_kspace_points(self):
"""
Determine the number of sampled k-space points in the data set.
This may differ from the number of voxels in the rectalinear grid,
for example if elliptical or another non rectangular acquisition
sampling strategy was employed. GE product sequences pad the
reduced k-space data with zeros so the number of k-space points
is the same as the number of voxels, but that may not be true for
custom sequences.
"""
return int(self.get_num_voxels_in_vol)
@property
def was_index_sampled(self):
"""
Determines whether a voxel (index) was sampled (or a zero padded
point is present in the data set), or not, i.e. was it within
the elliptical sampling volume if reduced k-space elliptical sampling
was used. Could be extended to support other sparse sampling
trajectories. Note that for product sequences this always returns true
since GE zero-pads reduced k-space data to a full rectilinear grid.
"""
return True
@property
def get_number_unsuppressed_acquisitions(self):
"""
For single voxel acquisitions, return the number of
unsuppressed acquisitions.
"""
nex = self.hdr.rhi_nex
return int(16 / nex)
@property
def get_number_suppressed_acquisitions(self):
"""
For single voxel acquisitions, return the number of
suppressed acquisitions.
"""
nex = self.hdr.rhi_nex
user4 = self.hdr.rhi_user4
return int( user4 / nex )
def add_dummy(self, offset, coilNum, timePt):
"""
Determine whether to add a dummy scan. The assumption is that
the number of dummy scans should be equal to the number of coils
or numCoils * numTimePts (e.g. for a spectral editing sequence).
If true, then the an FID worth of data should be skipped over when
reading data (e.g. frame_size * numComponents, or numFreqPts * numComponents)
"""
numDummyScans = self.get_num_dummy_scans
numCoils = self.get_num_coils
numTimePoints = self.get_num_time_points
numSampledVoxels = self.get_num_kspace_points
numFreqPoints = self.hdr.rhr_rh_frame_size
numComponents = 2
numPointsPerFID = numFreqPoints * numComponents
# subtract the number of dummy words from the current offset to see if another
# dummy scan should be skipped or not
if numDummyScans == numCoils:
numWordsBetweenDummies = numSampledVoxels * numPointsPerFID * numTimePoints
offset = offset - (coilNum * numPointsPerFID)
# additional time points have an offset that includes the per-coil dummy
if timePt > 1:
offset = offset - numPointsPerFID
elif ( numDummyScans == (numCoils * numTimePoints) ):
numWordsBetweenDummies = numSampledVoxels * numPointsPerFID
offset = offset - (coilNum * numPointsPerFID) - ( ( coilNum + timePt ) * numPointsPerFID )
elif numDummyScans == 0: # bjs - added for fidcsi 13C data from Pom
return False
else:
pass
# "ERROR: Can not determine placement of dummy scans in raw file reader. \n"
addDummy = False
if ( ( offset % numWordsBetweenDummies ) == 0 ):
addDummy = True
return addDummy
def get_xyz_indices(self, dataIndex):
"""
If swapping is turned on, the data will need to get mapped correctly
from the input data buffer read from disk (specData) to the correct
svkImageData arrays. If swap is true, then the data indices are swapped
and ky is flipped.
"""
numVoxels = self.get_num_voxels
z = int( dataIndex/(numVoxels[0] * numVoxels[1]) )
if self.is_swap_on:
# If swap is on use numVoxels[1] for x dimension and numVoxels[0] for y dimension
x = int((dataIndex - (z * numVoxels[0] * numVoxels[1]))/numVoxels[1])
# In addition to swapping reverse the y direction
y = numVoxels[1] - int( dataIndex % numVoxels[1] ) - 1
else:
x = int( dataIndex % numVoxels[0] )
y = int((dataIndex - (z * numVoxels[0] * numVoxels[1]))/numVoxels[0])
return x, y, z
def get_center_from_origin(self, origin, numVoxels, voxelSpacing, dcos):
"""
Calculates the LPS center from the origin(toplc).
"""
center = np.array([0.0, 0.0, 0.0])
for i in range(3):
center[i] = origin[i]
for j in range(3):
center[i] += dcos[j][i] * voxelSpacing[j] * ( numVoxels[j] / 2.0 - 0.5 )
def get_origin_from_center(self, center, numVoxels, voxelSpacing, dcos):
"""
Calculates the LPS origin (toplc) from the center.
"""
origin = np.array([0.0, 0.0, 0.0])
for i in range(3):
origin[i] = center[i]
for j in range(3):
origin[i] -= dcos[j][i] * voxelSpacing[j] * ( numVoxels[j] / 2.0 - 0.5 )
def read_data(self):
"""
This method reads data from the pfile and puts the data into
the CellData arrays. If elliptical k-space sampling was used,
the data is zero-padded. Other reduced k-space sampling
strategies aren't supported yet.
"""
numCoils = self.get_num_coils
numTimePts = self.get_num_time_points
numSpecPts = self.hdr.rhr_rh_frame_size
numFreqPts = numSpecPts
numComponents = 2
dataWordSize = self.hdr.rhr_rh_point_size
numBytesInVol = self.get_num_kspace_points * numSpecPts * numComponents * dataWordSize
numBytesPerCoil = numBytesInVol * numTimePts
numPtsPerSpectrum = numSpecPts * numComponents
# one dummy spectrum per volume/coil:
numDummyBytes = self.get_num_dummy_scans * numPtsPerSpectrum * dataWordSize
numDummyBytesPerCoil = int(numDummyBytes/numCoils)
numBytesPerCoil += numDummyBytesPerCoil
# Only read in one coil at a time | |
the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._add_disks_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
add_disks_input=add_disks_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_add_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/addDisks'} # type: ignore
def _apply_recovery_point_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
apply_recovery_point_input, # type: "_models.ApplyRecoveryPointInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._apply_recovery_point_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(apply_recovery_point_input, 'ApplyRecoveryPointInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_apply_recovery_point_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/applyRecoveryPoint'} # type: ignore
def begin_apply_recovery_point(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
apply_recovery_point_input, # type: "_models.ApplyRecoveryPointInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Change or apply recovery point.
The operation to change the recovery point of a failed over replication protected item.
:param fabric_name: The ARM fabric name.
:type fabric_name: str
:param protection_container_name: The protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: The replicated protected item name.
:type replicated_protected_item_name: str
:param apply_recovery_point_input: The ApplyRecoveryPointInput.
:type apply_recovery_point_input: ~azure.mgmt.recoveryservicessiterecovery.models.ApplyRecoveryPointInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._apply_recovery_point_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
apply_recovery_point_input=apply_recovery_point_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_apply_recovery_point.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/applyRecoveryPoint'} # type: ignore
def _failover_cancel_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._failover_cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_failover_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCancel'} # type: ignore
def begin_failover_cancel(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute cancel failover.
Operation to cancel the failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._failover_cancel_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCancel'} # type: ignore
def _failover_commit_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._failover_commit_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == | |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import random
import argparse
# our
import copy
import gc
from src.slurm import init_signal_handler, init_distributed_mode
from src.data.loader import check_data_params, load_data
from src.utils import bool_flag, initialize_exp, set_sampling_probs, shuf_order
from src.model import check_model_params, build_model
from src.model.memory import HashingMemory
from src.trainer import SingleTrainer, EncDecTrainer
from src.evaluation.evaluator import SingleEvaluator, EncDecEvaluator
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Language transfer")
# main parameters
parser.add_argument("--dump_path", type=str, default="./dumped/",
help="Experiment dump path")
parser.add_argument("--exp_name", type=str, default="",
help="Experiment name")
parser.add_argument("--save_periodic", type=int, default=0,
help="Save the model periodically (0 to disable)")
parser.add_argument("--exp_id", type=str, default="",
help="Experiment ID")
# float16 / AMP API
parser.add_argument("--fp16", type=bool_flag, default=False,
help="Run model with float16")
parser.add_argument("--amp", type=int, default=-1,
help="Use AMP wrapper for float16 / distributed / gradient accumulation. Level of optimization. -1 to disable.")
# only use an encoder (use a specific decoder for machine translation)
parser.add_argument("--encoder_only", type=bool_flag, default=True,
help="Only use an encoder")
# model parameters
parser.add_argument("--emb_dim", type=int, default=512,
help="Embedding layer size")
parser.add_argument("--n_layers", type=int, default=4,
help="Number of Transformer layers")
parser.add_argument("--n_heads", type=int, default=8,
help="Number of Transformer heads")
parser.add_argument("--dropout", type=float, default=0,
help="Dropout")
parser.add_argument("--attention_dropout", type=float, default=0,
help="Dropout in the attention layer")
parser.add_argument("--gelu_activation", type=bool_flag, default=False,
help="Use a GELU activation instead of ReLU")
parser.add_argument("--share_inout_emb", type=bool_flag, default=True,
help="Share input and output embeddings")
parser.add_argument("--sinusoidal_embeddings", type=bool_flag, default=False,
help="Use sinusoidal embeddings")
parser.add_argument("--use_lang_emb", type=bool_flag, default=True,
help="Use language embedding")
# memory parameters
parser.add_argument("--use_memory", type=bool_flag, default=False,
help="Use an external memory")
if parser.parse_known_args()[0].use_memory:
HashingMemory.register_args(parser)
parser.add_argument("--mem_enc_positions", type=str, default="",
help="Memory positions in the encoder ('4' for inside layer 4, '7,10+' for inside layer 7 and after layer 10)")
parser.add_argument("--mem_dec_positions", type=str, default="",
help="Memory positions in the decoder. Same syntax as `mem_enc_positions`.")
# adaptive softmax
parser.add_argument("--asm", type=bool_flag, default=False,
help="Use adaptive softmax")
if parser.parse_known_args()[0].asm:
parser.add_argument("--asm_cutoffs", type=str, default="8000,20000",
help="Adaptive softmax cutoffs")
parser.add_argument("--asm_div_value", type=float, default=4,
help="Adaptive softmax cluster sizes ratio")
# causal language modeling task parameters
parser.add_argument("--context_size", type=int, default=0,
help="Context size (0 means that the first elements in sequences won't have any context)")
# masked language modeling task parameters
parser.add_argument("--word_pred", type=float, default=0.15,
help="Fraction of words for which we need to make a prediction")
parser.add_argument("--sample_alpha", type=float, default=0,
help="Exponent for transforming word counts to probabilities (~word2vec sampling)")
parser.add_argument("--word_mask_keep_rand", type=str, default="0.8,0.1,0.1",
help="Fraction of words to mask out / keep / randomize, among the words to predict")
# input sentence noise
parser.add_argument("--word_shuffle", type=float, default=0,
help="Randomly shuffle input words (0 to disable)")
parser.add_argument("--word_dropout", type=float, default=0,
help="Randomly dropout input words (0 to disable)")
parser.add_argument("--word_blank", type=float, default=0,
help="Randomly blank input words (0 to disable)")
# data
parser.add_argument("--data_path", type=str, default="",
help="Data path")
parser.add_argument("--lgs", type=str, default="",
help="Languages (lg1-lg2-lg3 .. ex: en-fr-es-de)")
parser.add_argument("--max_vocab", type=int, default=-1,
help="Maximum vocabulary size (-1 to disable)")
parser.add_argument("--min_count", type=int, default=0,
help="Minimum vocabulary count")
parser.add_argument("--lg_sampling_factor", type=float, default=-1,
help="Language sampling factor")
# batch parameters
parser.add_argument("--bptt", type=int, default=256,
help="Sequence length")
parser.add_argument("--max_len", type=int, default=100,
help="Maximum length of sentences (after BPE)")
parser.add_argument("--group_by_size", type=bool_flag, default=True,
help="Sort sentences by size during the training")
parser.add_argument("--batch_size", type=int, default=32,
help="Number of sentences per batch")
parser.add_argument("--max_batch_size", type=int, default=0,
help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)")
parser.add_argument("--tokens_per_batch", type=int, default=-1,
help="Number of tokens per batch")
# training parameters
parser.add_argument("--split_data", type=bool_flag, default=False,
help="Split data across workers of a same node")
parser.add_argument("--optimizer", type=str, default="adam,lr=0.0001",
help="Optimizer (SGD / RMSprop / Adam, etc.)")
parser.add_argument("--clip_grad_norm", type=float, default=5,
help="Clip gradients norm (0 to disable)")
parser.add_argument("--epoch_size", type=int, default=100000,
help="Epoch size / evaluation frequency (-1 for parallel data size)")
parser.add_argument("--max_epoch", type=int, default=100000,
help="Maximum epoch size")
parser.add_argument("--stopping_criterion", type=str, default="",
help="Stopping criterion, and number of non-increase before stopping the experiment")
parser.add_argument("--validation_metrics", type=str, default="",
help="Validation metrics")
parser.add_argument("--accumulate_gradients", type=int, default=1,
help="Accumulate model gradients over N iterations (N times larger batch sizes)")
# training coefficients
parser.add_argument("--lambda_mlm", type=str, default="1",
help="Prediction coefficient (MLM)")
parser.add_argument("--lambda_clm", type=str, default="1",
help="Causal coefficient (LM)")
parser.add_argument("--lambda_pc", type=str, default="1",
help="PC coefficient")
parser.add_argument("--lambda_ae", type=str, default="1",
help="AE coefficient")
parser.add_argument("--lambda_mt", type=str, default="1",
help="MT coefficient")
parser.add_argument("--lambda_bt", type=str, default="1",
help="BT coefficient")
# training steps
parser.add_argument("--clm_steps", type=str, default="",
help="Causal prediction steps (CLM)")
parser.add_argument("--mlm_steps", type=str, default="",
help="Masked prediction steps (MLM / TLM)")
parser.add_argument("--mt_steps", type=str, default="",
help="Machine translation steps")
parser.add_argument("--ae_steps", type=str, default="",
help="Denoising auto-encoder steps")
parser.add_argument("--bt_steps", type=str, default="",
help="Back-translation steps")
parser.add_argument("--pc_steps", type=str, default="",
help="Parallel classification steps")
# reload pretrained embeddings / pretrained model / checkpoint
parser.add_argument("--reload_emb", type=str, default="",
help="Reload pretrained word embeddings")
parser.add_argument("--reload_model", type=str, default="",
help="Reload a pretrained model")
parser.add_argument("--reload_checkpoint", type=str, default="",
help="Reload a checkpoint")
# beam search (for MT only)
parser.add_argument("--beam_size", type=int, default=1,
help="Beam size, default = 1 (greedy decoding)")
parser.add_argument("--length_penalty", type=float, default=1,
help="Length penalty, values < 1.0 favor shorter sentences, while values > 1.0 favor longer ones.")
parser.add_argument("--early_stopping", type=bool_flag, default=False,
help="Early stopping, stop as soon as we have `beam_size` hypotheses, although longer ones may have better scores.")
# evaluation
parser.add_argument("--eval_bleu", type=bool_flag, default=False,
help="Evaluate BLEU score during MT training")
parser.add_argument("--eval_only", type=bool_flag, default=False,
help="Only run evaluations")
# debug
parser.add_argument("--debug_train", type=bool_flag, default=False,
help="Use valid sets for train sets (faster loading)")
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug multi-GPU / multi-node within a SLURM job")
parser.add_argument("--debug", help="Enable all debug flags",
action="store_true")
# multi-gpu / multi-node
parser.add_argument("--local_rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument("--master_port", type=int, default=-1,
help="Master port (for multi-node SLURM jobs)")
# our
# These three parameters will always be rounded to an integer number of batches, so don't be surprised if you see different values than the ones provided.
parser.add_argument("--train_n_samples", type=int, default=0,
help="Just consider train_n_sample train data")
parser.add_argument("--valid_n_samples", type=int, default=0,
help="Just consider valid_n_sample validation data")
parser.add_argument("--test_n_samples", type=int, default=0,
help="Just consider test_n_sample test data for")
parser.add_argument("--remove_long_sentences_train", type=bool_flag, default=True,
help="remove long sentences in train dataset")
parser.add_argument("--remove_long_sentences_valid", type=bool_flag, default=False,
help="remove long sentences in valid dataset")
parser.add_argument("--remove_long_sentences_test", type=bool_flag, default=False,
help="remove long sentences in test dataset")
parser.add_argument("--same_data_path", type=bool_flag, default=True,
help="In the case of metalearning, this parameter, when passed to False, the data are searched for each task in a folder with the name of the task and located in data_path, otherwise all the data are searched in data_path.")
parser.add_argument("--meta_learning", type=bool_flag, default=False,
help="meta_learning")
return parser
def main(params):
# initialize the multi-GPU / multi-node training
init_distributed_mode(params)
# initialize the experiment
meta_params = copy.deepcopy(params).meta_params
params.meta_params = "..." # to long to be log
logger = initialize_exp(params)
params.meta_params = meta_params
# initialize SLURM signal handler for time limit / pre-emption
init_signal_handler()
# load data
data = load_data(params)
print(params.meta_params.keys())
print(data.keys())
# todo : good params.n_words (We take the one from the first task have this parameter for the moment.)
"""
But we think that if all the task data are based on the same vocabulary, all these parameters will be the same,
and therefore no problem if we choose one at random.
"""
p = params.meta_params[data['key']]
# build model
if params.encoder_only:
model = build_model(params = p, dico = data['dico'])
else:
encoder, decoder = build_model(params = p, dico = data['dico'])
# todo : good pad_index and eos_index and ... (I'll take the one from the first task for the moment.)
"""
But we think that if all the task data are based on the same vocabulary, all these parameters will be the same,
and therefore no problem if we choose one at random.
"""
params.n_words = p.n_words
params.bos_index = p.bos_index
params.eos_index = p.eos_index
params.pad_index = p.pad_index
params.unk_index = p.unk_index
params.mask_index = p.mask_index
# build trainer, reload potential checkpoints / build evaluator
if params.encoder_only:
trainer = SingleTrainer(model, data, params)
evaluator = SingleEvaluator(trainer, data, params)
else:
trainer = EncDecTrainer(encoder, decoder, data, params)
evaluator = EncDecEvaluator(trainer, data, params)
# evaluation
if params.eval_only:
scores = evaluator.run_all_evals(trainer)
if not params.meta_learning :
for k, v in scores.items():
logger.info("%s -> %.6f" % (k, v))
else :
for lgs in params.meta_params.keys() :
logger.info("============ task : %s " % lgs)
for k, v in scores[lgs].items():
if k != "epoch":
logger.info("%s -> %.6f" % (k, v))
logger.info("============ all")
for k, v in scores.items():
if not (k in (list(params.meta_params.keys())+['epoch'])) :
logger.info("%s -> %.6f" % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
exit()
# set sampling probabilities for training
set_sampling_probs(data, params)
# language model training
for _ in range(params.max_epoch):
logger.info("============ | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class Column(Model):
"""Query result column descriptor.
All required parameters must be populated in order to send to Azure.
:param name: Required. Column name.
:type name: str
:param type: Required. Column data type. Possible values include:
'string', 'integer', 'number', 'boolean', 'object'
:type type: str or ~azure.mgmt.resourcegraph.models.ColumnDataType
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'ColumnDataType'},
}
def __init__(self, *, name: str, type, **kwargs) -> None:
super(Column, self).__init__(**kwargs)
self.name = name
self.type = type
class Error(Model):
"""Error info.
Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code identifying the specific error.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: Error details
:type details: list[~azure.mgmt.resourcegraph.models.ErrorDetails]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
}
def __init__(self, *, code: str, message: str, details=None, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
class ErrorDetails(Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param code: Required. Error code identifying the specific error.
:type code: str
:param message: Required. A human readable error message.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, code: str, message: str, additional_properties=None, **kwargs) -> None:
super(ErrorDetails, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.code = code
self.message = message
class ErrorFieldContract(Model):
"""Error Field contract.
:param code: Property level error code.
:type code: str
:param message: Human-readable representation of property-level error.
:type message: str
:param target: Property name.
:type target: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, *, code: str=None, message: str=None, target: str=None, **kwargs) -> None:
super(ErrorFieldContract, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
class ErrorResponse(Model):
"""Error response.
An error response from the API.
All required parameters must be populated in order to send to Azure.
:param error: Required. Error information.
:type error: ~azure.mgmt.resourcegraph.models.Error
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(self, *, error, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class Facet(Model):
"""A facet containing additional statistics on the response of a query. Can be
either FacetResult or FacetError.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FacetResult, FacetError
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the
corresponding facet request.
:type expression: str
:param result_type: Required. Constant filled by server.
:type result_type: str
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
}
_subtype_map = {
'result_type': {'FacetResult': 'FacetResult', 'FacetError': 'FacetError'}
}
def __init__(self, *, expression: str, **kwargs) -> None:
super(Facet, self).__init__(**kwargs)
self.expression = expression
self.result_type = None
class FacetError(Facet):
"""A facet whose execution resulted in an error.
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the
corresponding facet request.
:type expression: str
:param result_type: Required. Constant filled by server.
:type result_type: str
:param errors: Required. An array containing detected facet errors with
details.
:type errors: list[~azure.mgmt.resourcegraph.models.ErrorDetails]
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
'errors': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorDetails]'},
}
def __init__(self, *, expression: str, errors, **kwargs) -> None:
super(FacetError, self).__init__(expression=expression, **kwargs)
self.errors = errors
self.result_type = 'FacetError'
class FacetRequest(Model):
"""A request to compute additional statistics (facets) over the query results.
All required parameters must be populated in order to send to Azure.
:param expression: Required. The column or list of columns to summarize by
:type expression: str
:param options: The options for facet evaluation
:type options: ~azure.mgmt.resourcegraph.models.FacetRequestOptions
"""
_validation = {
'expression': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'options': {'key': 'options', 'type': 'FacetRequestOptions'},
}
def __init__(self, *, expression: str, options=None, **kwargs) -> None:
super(FacetRequest, self).__init__(**kwargs)
self.expression = expression
self.options = options
class FacetRequestOptions(Model):
"""The options for facet evaluation.
:param sort_by: The column name or query expression to sort on. Defaults
to count if not present.
:type sort_by: str
:param sort_order: The sorting order by the selected column (count by
default). Possible values include: 'asc', 'desc'. Default value: "desc" .
:type sort_order: str or ~azure.mgmt.resourcegraph.models.FacetSortOrder
:param filter: Specifies the filter condition for the 'where' clause which
will be run on main query's result, just before the actual faceting.
:type filter: str
:param top: The maximum number of facet rows that should be returned.
:type top: int
"""
_validation = {
'top': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'sort_by': {'key': 'sortBy', 'type': 'str'},
'sort_order': {'key': 'sortOrder', 'type': 'FacetSortOrder'},
'filter': {'key': 'filter', 'type': 'str'},
'top': {'key': '$top', 'type': 'int'},
}
def __init__(self, *, sort_by: str=None, sort_order="desc", filter: str=None, top: int=None, **kwargs) -> None:
super(FacetRequestOptions, self).__init__(**kwargs)
self.sort_by = sort_by
self.sort_order = sort_order
self.filter = filter
self.top = top
class FacetResult(Facet):
"""Successfully executed facet containing additional statistics on the
response of a query.
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the
corresponding facet request.
:type expression: str
:param result_type: Required. Constant filled by server.
:type result_type: str
:param total_records: Required. Number of total records in the facet
results.
:type total_records: long
:param count: Required. Number of records returned in the facet response.
:type count: int
:param data: Required. A table containing the desired facets. Only present
if the facet is valid.
:type data: object
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
'total_records': {'required': True},
'count': {'required': True},
'data': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'total_records': {'key': 'totalRecords', 'type': 'long'},
'count': {'key': 'count', 'type': 'int'},
'data': {'key': 'data', 'type': 'object'},
}
def __init__(self, *, expression: str, total_records: int, count: int, data, **kwargs) -> None:
super(FacetResult, self).__init__(expression=expression, **kwargs)
self.total_records = total_records
self.count = count
self.data = data
self.result_type = 'FacetResult'
class GraphQueryError(Model):
"""Error message body that will indicate why the operation failed.
:param code: Service-defined error code. This code serves as a sub-status
for the HTTP error code specified in the response.
:type code: str
:param message: Human-readable representation of the error.
:type message: str
:param details: The list of invalid fields send in request, in case of
validation error.
:type details: list[~azure.mgmt.resourcegraph.models.ErrorFieldContract]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorFieldContract]'},
}
def __init__(self, *, code: str=None, message: str=None, details=None, **kwargs) -> None:
super(GraphQueryError, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
class GraphQueryErrorException(HttpOperationError):
"""Server responsed with exception of type: 'GraphQueryError'.
:param deserialize: A deserializer
:param response: Server response | |
{\n'
' nextToken\n'
' }\n'
' }\n'
' }\n',
'getconnectors': '\n'
' query GetConnectors($id: ID!) {\n'
' getConnectors(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' category\n'
' name\n'
' owner\n'
' description\n'
' detaileddescription\n'
' iconPath\n'
' version\n'
' md5\n'
' source\n'
' configuration\n'
' commands\n'
' commandsType\n'
' script\n'
' scriptType\n'
' scriptPath\n'
' scope\n'
' PeopleCanView\n'
' PeopleCanAccess\n'
' GroupsCanView\n'
' GroupsCanAccess\n'
' }\n'
' }\n',
'getdashboardconfig': '\n'
' query GetDashboardConfig($id: ID!) {\n'
' getDashboardConfig(id: $id) {\n'
' id\n'
' url\n'
' labels {\n'
' nextToken\n'
' }\n'
' author\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' }\n',
'getdashboardconfiglabellink': '\n'
' query GetDashboardConfigLabelLink($id: ID!) '
'{\n'
' getDashboardConfigLabelLink(id: $id) {\n'
' id\n'
' dashboardConfigID\n'
' labelID\n'
' dashboardConfig {\n'
' id\n'
' url\n'
' author\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' label {\n'
' id\n'
' name\n'
' color\n'
' type\n'
' }\n'
' }\n'
' }\n',
'getecubesandboxexecution': '\n'
' query GetEcubeSandboxExecution($id: ID!) {\n'
' getEcubeSandboxExecution(id: $id) {\n'
' id\n'
' owner\n'
' output\n'
' outputType\n'
' returnCode\n'
' status\n'
' createdAt\n'
' updatedAt\n'
' E3One\n'
' E3Two\n'
' E3Three\n'
' }\n'
' }\n',
'getepicautocomplete': '\n'
' query GetEpicAutoComplete($id: ID!) {\n'
' getEpicAutoComplete(id: $id) {\n'
' id\n'
' author\n'
' cliCommand\n'
' command\n'
' createdAt\n'
' updatedAt\n'
' investigationId\n'
' times\n'
' }\n'
' }\n',
'getepiclicommand': '\n'
' query GetEpicliCommand($id: ID!) {\n'
' getEPICLICommand(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' content\n'
' type\n'
' }\n'
' }\n',
'getexecutedrunbooks': '\n'
' query GetExecutedRunbooks($id: ID!) {\n'
' getExecutedRunbooks(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' runbookID\n'
' investigationId\n'
' output\n'
' outputType\n'
' title\n'
' state\n'
' author\n'
' rbVars\n'
' callbackUrl\n'
' callbackData\n'
' source\n'
' investigation {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' channelId\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' meetingType\n'
' meetingId\n'
' }\n'
' runbook {\n'
' id\n'
' name\n'
' description\n'
' content\n'
' author\n'
' commands\n'
' createdAt\n'
' updatedAt\n'
' RunBookConnectors\n'
' type\n'
' longSave\n'
' rbVars\n'
' scope\n'
' PeopleCanView\n'
' PeopleCanAccess\n'
' GroupsCanView\n'
' GroupsCanAccess\n'
' }\n'
' }\n'
' }\n',
'getexecutedworkflowstate': '\n'
' query GetExecutedWorkflowState($id: ID!) {\n'
' getExecutedWorkflowState(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' name\n'
' state\n'
' output\n'
' type\n'
' allworkflowID\n'
' investigationID\n'
' investigation {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' channelId\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' meetingType\n'
' meetingId\n'
' }\n'
' }\n'
' }\n',
'getgroupmember': '\n'
' query GetGroupMember($id: ID!) {\n'
' getGroupMember(id: $id) {\n'
' id\n'
' hostId\n'
' groupId\n'
' host {\n'
' id\n'
' inventoryId\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' host\n'
' port\n'
' vars\n'
' }\n'
' group {\n'
' id\n'
' inventoryId\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' name\n'
' groups\n'
' port\n'
' vars\n'
' }\n'
' }\n'
' }\n',
'getguardrail': '\n'
' query GetGuardRail($id: ID!) {\n'
' getGuardRail(id: $id) {\n'
' id\n'
' timeOut\n'
' message\n'
' labels {\n'
' nextToken\n'
' }\n'
' author\n'
' active\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' }\n',
'getguardraillabellink': '\n'
' query GetGuardRailLabelLink($id: ID!) {\n'
' getGuardRailLabelLink(id: $id) {\n'
' id\n'
' guardRailID\n'
' labelID\n'
' guardRail {\n'
' id\n'
' timeOut\n'
' message\n'
' author\n'
' active\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' label {\n'
' id\n'
' name\n'
' color\n'
' type\n'
' }\n'
' }\n'
' }\n',
'gethint': '\n'
' query GetHint($id: ID!) {\n'
' getHint(id: $id) {\n'
' id\n'
' investigationId\n'
' createdAt\n'
' updatedAt\n'
' type\n'
' connection\n'
' pastInvestigationID\n'
' title\n'
' importance\n'
' starrted\n'
' applied\n'
' url\n'
' why\n'
' }\n'
' }\n',
'getinventory': '\n'
' query GetInventory($id: ID!) {\n'
' getInventory(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' name\n'
' all {\n'
' nextToken\n'
' }\n'
' groups {\n'
' nextToken\n'
' }\n'
' vars\n'
' }\n'
' }\n',
'getinventorygroup': '\n'
' query GetInventoryGroup($id: ID!) {\n'
' getInventoryGroup(id: $id) {\n'
' id\n'
' inventoryId\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' name\n'
' hosts {\n'
' nextToken\n'
' }\n'
' groups\n'
' port\n'
' vars\n'
' inventory {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' name\n'
' vars\n'
' }\n'
' }\n'
' }\n',
'getinventoryhost': '\n'
' query GetInventoryHost($id: ID!) {\n'
' getInventoryHost(id: $id) {\n'
' id\n'
' inventoryId\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' host\n'
' port\n'
' vars\n'
' groups {\n'
' nextToken\n'
' }\n'
' inventory {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' name\n'
' vars\n'
' }\n'
' }\n'
' }\n',
'getinvestigation': '\n'
' query GetInvestigation($id: ID!) {\n'
' getInvestigation(id: $id) {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' labels {\n'
' nextToken\n'
' }\n'
' channelId\n'
' investigationChannel {\n'
' id\n'
' name\n'
' title\n'
' members\n'
' membersState\n'
' createdAt\n'
' updatedAt\n'
' memberLocation\n'
' }\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' MMNodeLinks {\n'
' nextToken\n'
' }\n'
' CheckListInvLink {\n'
' nextToken\n'
' }\n'
' meetingType\n'
' meetingId\n'
' }\n'
' }\n',
'getinvestigationevent': '\n'
' query GetInvestigationEvent($id: ID!) {\n'
' getInvestigationEvent(id: $id) {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' content\n'
' type\n'
' links\n'
' investigationId\n'
' investigation {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' channelId\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' meetingType\n'
' meetingId\n'
' }\n'
' }\n'
' }\n',
'getinvestigationguestuserlink': '\n'
' query GetInvestigationGuestUserLink($id: '
'ID!) {\n'
' getInvestigationGuestUserLink(id: $id) '
'{\n'
' id\n'
' userName\n'
' createdAt\n'
' updatedAt\n'
' investigationID\n'
' channelID\n'
' }\n'
' }\n',
'getlabel': '\n'
' query GetLabel($id: ID!) {\n'
' getLabel(id: $id) {\n'
' id\n'
' name\n'
' color\n'
' type\n'
' investigations {\n'
' nextToken\n'
' }\n'
' userSettings {\n'
' nextToken\n'
' }\n'
' }\n'
' }\n',
'getlabellink': '\n'
' query GetLabelLink($id: ID!) {\n'
' getLabelLink(id: $id) {\n'
' id\n'
' investigationID\n'
' labelID\n'
' label {\n'
' id\n'
' name\n'
' color\n'
' type\n'
' }\n'
' investigation {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' channelId\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' meetingType\n'
' meetingId\n'
' }\n'
' }\n'
' }\n',
'getleadboard': '\n'
' query GetLeadBoard($id: ID!) {\n'
' getLeadBoard(id: $id) {\n'
' id\n'
' investigationId\n'
' content\n'
' createdAt\n'
' updatedAt\n'
' investigation {\n'
' id\n'
' title\n'
' from\n'
' createdAt\n'
' currentState\n'
' HumanID\n'
' currentStateID\n'
' severity\n'
' priority\n'
' location\n'
' channelId\n'
' updatedAt\n'
' resolvedTime\n'
' resolvedComment\n'
' Symptoms\n'
' meetingType\n'
' meetingId\n'
' }\n'
' }\n'
' }\n',
'getleadboardnode': '\n'
' query GetLeadBoardNode($id: ID!) {\n'
' getLeadBoardNode(id: $id) {\n'
' id\n'
' localID\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' content\n'
' title\n'
' notes\n'
' associatedPlaybooks\n'
' associatedCommands\n'
' InvestLinks {\n'
' nextToken\n'
' }\n'
' }\n'
' }\n',
'getmessage': '\n'
' query GetMessage($id: ID!) {\n'
' getMessage(id: $id) {\n'
' id\n'
' authorId\n'
' content\n'
' type\n'
' channel {\n'
' id\n'
' name\n'
' title\n'
' members\n'
' membersState\n'
' createdAt\n'
' updatedAt\n'
' memberLocation\n'
' }\n'
' messageChannelId\n'
' createdAt\n'
' updatedAt\n'
' author {\n'
' id\n'
' username\n'
' email\n'
' firstname\n'
' lastname\n'
' avatar\n'
' mood\n'
' name\n'
' role\n'
' status\n'
' timezone\n'
' phone\n'
' address\n'
' location\n'
' createdAt\n'
' updatedAt\n'
' owner\n'
' }\n'
' | |
= dr
, start = '16:00'
, end = '18:30'
, work_location = '1'
, wp = '8'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-01-27')
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:00'
, work_location = '1'
, wp = '10'
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '14:00'
, end = '18:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-01-28')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '18:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:15'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-01-29')
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-01-30')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-01-31')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-01')
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '09:30'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '18:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-02')
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '11:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '11:30'
, end = '17:15'
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-03')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-04')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-05')
)
db.time_record.create \
( daily_record = dr
, duration = 7.5
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-06')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-07')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-08')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '18:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-09')
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '19:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '14:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-10')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '17:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '17:00'
, end = '17:45'
, work_location = '1'
, wp = '7'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-11')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '15:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '15:00'
, end = '17:00'
, work_location = '1'
, wp = '9'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-12')
)
db.time_record.create \
( daily_record = dr
, start = '08:45'
, end = '12:15'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-13')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-14')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-15')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '18:15'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-16')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '16:45'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-17')
)
db.time_record.create \
( daily_record = dr
, start = '14:45'
, end = '19:15'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '14:00'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-18')
)
db.time_record.create \
( daily_record = dr
, start = '12:30'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '12:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '16:30'
, end = '17:45'
, work_location = '1'
, wp = '12'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-19')
)
db.time_record.create \
( daily_record = dr
, start = '09:15'
, end = '12:30'
, work_location = '1'
, wp = '6'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-20')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-21')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-22')
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '13:00'
, work_location = '1'
, wp = '5'
)
db.time_record.create \
( daily_record = dr
, start = '16:30'
, end = '18:00'
, work_location = '1'
, wp = '9'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2010-02-23')
)
db.time_record.create \
( daily_record = dr
, | |
<reponame>d-amien-b/simple-getwordpress<filename>Collections-a-installer/community-general-2.4.0/plugins/modules/gitlab_user.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, <NAME> (<EMAIL>)
# Copyright: (c) 2015, <NAME> (<EMAIL>)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
description:
- When the user does not exist in GitLab, it will be created.
- When the user exists and state=absent, the user will be deleted.
- When the user exists and state=blocked, the user will be blocked.
- When changes are made to user, the user will be updated.
notes:
- From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
author:
- <NAME> (@dj-wasabi)
- <NAME> (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module
- administrator rights on the GitLab server
extends_documentation_fragment:
- community.general.auth_basic
options:
api_token:
description:
- GitLab token for logging in.
type: str
name:
description:
- Name of the user you want to create.
- Required only if C(state) is set to C(present).
type: str
username:
description:
- The username of the user.
required: true
type: str
password:
description:
- The password of the user.
- GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
- Required only if C(state) is set to C(present).
type: str
email:
description:
- The email that belongs to the user.
- Required only if C(state) is set to C(present).
type: str
sshkey_name:
description:
- The name of the sshkey
type: str
sshkey_file:
description:
- The ssh key itself.
type: str
group:
description:
- Id or Full path of parent group in the form of group/name.
- Add user as an member to this group.
type: str
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master (alias for maintainer)
- maintainer
- owner
default: guest
type: str
choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
state:
description:
- Create, delete or block a user.
default: present
type: str
choices: ["present", "absent", "blocked", "unblocked"]
confirm:
description:
- Require confirmation.
type: bool
default: yes
isadmin:
description:
- Grant admin privileges to the user.
type: bool
default: no
external:
description:
- Define external parameter for this user.
type: bool
default: no
'''
EXAMPLES = '''
- name: "Delete GitLab User"
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
validate_certs: False
username: myusername
state: absent
- name: "Create GitLab User"
community.general.gitlab_user:
api_url: https://gitlab.example.com/
validate_certs: True
api_username: dj-wasabi
api_password: "<PASSWORD>"
name: My Name
username: myusername
password: <PASSWORD>
email: <EMAIL>
sshkey_name: MySSH
sshkey_file: ssh-rsa AAAAB3NzaC1yc...
state: present
group: super_group/mon_group
access_level: owner
- name: "Block GitLab User"
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
validate_certs: False
username: myusername
state: blocked
- name: "Unblock GitLab User"
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
validate_certs: False
username: myusername
state: unblocked
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the GitLab API
returned: failed
type: str
sample: "400: path is already in use"
user:
description: API object
returned: always
type: dict
'''
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
class GitLabUser(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.userObject = None
self.ACCESS_LEVEL = {
'guest': gitlab.GUEST_ACCESS,
'reporter': gitlab.REPORTER_ACCESS,
'developer': gitlab.DEVELOPER_ACCESS,
'master': gitlab.MAINTAINER_ACCESS,
'maintainer': gitlab.MAINTAINER_ACCESS,
'owner': gitlab.OWNER_ACCESS}
'''
@param username Username of the user
@param options User options
'''
def createOrUpdateUser(self, username, options):
changed = False
# Because we have already call userExists in main()
if self.userObject is None:
user = self.createUser({
'name': options['name'],
'username': username,
'password': options['password'],
'email': options['email'],
'skip_confirmation': not options['confirm'],
'admin': options['isadmin'],
'external': options['external']})
changed = True
else:
changed, user = self.updateUser(self.userObject, {
'name': options['name'],
'email': options['email'],
'is_admin': options['isadmin'],
'external': options['external']})
# Assign ssh keys
if options['sshkey_name'] and options['sshkey_file']:
key_changed = self.addSshKeyToUser(user, {
'name': options['sshkey_name'],
'file': options['sshkey_file']})
changed = changed or key_changed
# Assign group
if options['group_path']:
group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level'])
changed = changed or group_changed
self.userObject = user
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
try:
user.save()
except Exception as e:
self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
return True
else:
return False
'''
@param group User object
'''
def getUserId(self, user):
if user is not None:
return user.id
return None
'''
@param user User object
@param sshkey_name Name of the ssh key
'''
def sshKeyExists(self, user, sshkey_name):
keyList = map(lambda k: k.title, user.keys.list())
return sshkey_name in keyList
'''
@param user User object
@param sshkey Dict containing sshkey infos {"name": "", "file": ""}
'''
def addSshKeyToUser(self, user, sshkey):
if not self.sshKeyExists(user, sshkey['name']):
if self._module.check_mode:
return True
try:
user.keys.create({
'title': sshkey['name'],
'key': sshkey['file']})
except gitlab.exceptions.GitlabCreateError as e:
self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
return True
return False
'''
@param group Group object
@param user_id Id of the user to find
'''
def findMember(self, group, user_id):
try:
member = group.members.get(user_id)
except gitlab.exceptions.GitlabGetError:
return None
return member
'''
@param group Group object
@param user_id Id of the user to check
'''
def memberExists(self, group, user_id):
member = self.findMember(group, user_id)
return member is not None
'''
@param group Group object
@param user_id Id of the user to check
@param access_level GitLab access_level to check
'''
def memberAsGoodAccessLevel(self, group, user_id, access_level):
member = self.findMember(group, user_id)
return member.access_level == access_level
'''
@param user User object
@param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
@param access_level GitLab access_level to assign
'''
def assignUserToGroup(self, user, group_identifier, access_level):
group = findGroup(self._gitlab, group_identifier)
if self._module.check_mode:
return True
if group is None:
return False
if self.memberExists(group, self.getUserId(user)):
member = self.findMember(group, self.getUserId(user))
if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
member.access_level = self.ACCESS_LEVEL[access_level]
member.save()
return True
else:
try:
group.members.create({
'user_id': self.getUserId(user),
'access_level': self.ACCESS_LEVEL[access_level]})
except gitlab.exceptions.GitlabCreateError as e:
self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
return True
return False
'''
@param user User object
@param arguments User attributes
'''
def updateUser(self, user, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if getattr(user, arg_key) != arguments[arg_key]:
setattr(user, arg_key, arguments[arg_key])
changed = True
return (changed, user)
'''
@param arguments User attributes
'''
def createUser(self, arguments):
if self._module.check_mode:
return True
try:
user = self._gitlab.users.create(arguments)
except (gitlab.exceptions.GitlabCreateError) as e:
self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
return user
'''
@param username Username of the user
'''
def findUser(self, username):
users = self._gitlab.users.list(search=username)
for user in users:
if (user.username == username):
return user
'''
@param username Username of the user
'''
def existsUser(self, username):
# When user exists, object will be stored in self.userObject.
user = self.findUser(username)
if user:
self.userObject = user
return True
return False
'''
@param username Username of the user
'''
def isActive(self, username):
user = self.findUser(username)
return user.attributes['state'] == 'active'
def deleteUser(self):
if self._module.check_mode:
return True
user = self.userObject
return user.delete()
def blockUser(self):
if self._module.check_mode:
return True
user = self.userObject
return user.block()
def unblockUser(self):
if self._module.check_mode:
return True
user = self.userObject
return user.unblock()
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_token=dict(type='str', no_log=True),
name=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
username=dict(type='str', required=True),
password=dict(type='str', no_log=True),
email=dict(type='str'),
sshkey_name=dict(type='str'),
sshkey_file=dict(type='str'),
group=dict(type='str'),
access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
confirm=dict(type='bool', default=True),
isadmin=dict(type='bool', default=False),
external=dict(type='bool', default=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_password', 'api_token'],
],
required_together=[
['api_username', 'api_password'],
],
required_one_of=[
['api_username', 'api_token']
],
supports_check_mode=True,
required_if=(
('state', 'present', ['name', 'email', 'password']),
)
)
user_name = module.params['name']
state = module.params['state']
user_username = module.params['username'].lower()
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_path = module.params['group']
access_level = module.params['access_level']
confirm = module.params['confirm']
user_isadmin = module.params['isadmin']
user_external = module.params['external']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
gitlab_instance = gitlabAuthentication(module)
gitlab_user = GitLabUser(module, gitlab_instance)
user_exists = gitlab_user.existsUser(user_username)
if user_exists:
user_is_active = gitlab_user.isActive(user_username)
else:
user_is_active = False
if state == 'absent':
if user_exists:
gitlab_user.deleteUser()
module.exit_json(changed=True, | |
x_ac.shape[1]
x_af = torch.zeros(B*N_a, dim).cuda()
idx_a = idx_a.reshape(-1,1).repeat(1,dim)
x_af.scatter_(dim=0, index=idx_a, src=self.linear_a(x_a.F))
x_af = x_af.reshape([B, N_a, dim])
x_bc, mask_b, idx_b = separate_batch(x_b.C)
B = x_bc.shape[0]
N_b = x_bc.shape[1]
x_bf = torch.zeros(B*N_b, dim).cuda()
idx_b = idx_b.reshape(-1,1).repeat(1,dim)
x_bf.scatter_(dim=0, index=idx_b, src=self.linear_b(x_b.F))
x_bf = x_bf.reshape([B, N_b, dim])
dists, idx = three_nn(x_bc.float(), x_ac.float())
mask = (dists.sum(dim=-1)>0).unsqueeze(-1).repeat(1,1,3)
dist_recip = 1.0 / (dists + 1e-1)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
weight = weight*mask # mask the zeros part
interpolated_points = three_interpolate(x_af.transpose(1,2).contiguous(), idx, weight).transpose(1,2) # [B, N_b, dim]
out = interpolated_points + x_bf
out = torch.gather(out.reshape(B*N_b,dim), dim=0, index=idx_b) # should be the same size with x_a.F
x = ME.SparseTensor(features = out, coordinate_map_key=x_b.coordinate_map_key, coordinate_manager=x_b.coordinate_manager)
else:
if self.SUM_FEATURE:
x_a = self.conv_a(x_a)
x_b = self.conv_b(x_b)
x = x_a + x_b
else:
x_a = self.conv(x_a)
x_a = self.bn(x_a)
x_a = self.relu(x_a)
x = me.cat(x_a, x_b)
x = self.out_conv(x)
x = self.out_bn(x)
x = self.out_relu(x)
return x
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S, [K]]
Return:
new_points:, indexed points data, [B, S, [K], C]
"""
raw_size = idx.size()
idx = idx.reshape(raw_size[0], -1)
res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))
return res.reshape(*raw_size, -1)
class StackedPTBlock(nn.Module):
def __init__(self, in_dim, hidden_dim, is_firstlayer=False, n_sample=16, r=10, skip_knn=False, kernel_size=1):
super().__init__()
self.block1 = PTBlock(in_dim, hidden_dim, is_firstlayer, n_sample, r, skip_knn, kernel_size)
self.block2 = PTBlock(in_dim, hidden_dim, is_firstlayer, n_sample, r, skip_knn, kernel_size)
def forward(self, x : ME.SparseTensor):
x = self.block1(x)
x = self.block2(x)
return x
class PTBlock(nn.Module):
def __init__(self, in_dim, hidden_dim, is_firstlayer=False, n_sample=16, r=10, skip_knn=False, kernel_size=1):
super().__init__()
'''
Point Transformer Layer
in_dim: feature dimension of the input feature x
out_dim: feature dimension of the Point Transformer Layer(currently same with hidden-dim)
'''
self.r = r # neighborhood cube radius
self.skip_knn = skip_knn
self.kernel_size = kernel_size
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = self.hidden_dim
self.vector_dim = self.out_dim // 1
self.n_sample = n_sample
self.KS_1 = True
self.USE_KNN = True
self.use_vector_attn = True # whether to use the vector att or the original attention
self.WITH_POSE_ENCODING = True
self.SKIP_ATTN=False
if self.KS_1:
self.kernel_size = 1
if not self.use_vector_attn:
self.nhead = 4
self.linear_top = nn.Sequential(
ME.MinkowskiConvolution(in_dim, self.hidden_dim, kernel_size=self.kernel_size, dimension=3),
ME.MinkowskiBatchNorm(self.hidden_dim),
)
self.linear_down = nn.Sequential(
ME.MinkowskiConvolution(self.out_dim, self.in_dim, kernel_size=self.kernel_size, dimension=3),
ME.MinkowskiBatchNorm(self.in_dim),
)
# feature transformations
self.phi = nn.Sequential(
ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)
)
self.psi = nn.Sequential(
ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)
)
if self.SKIP_ATTN:
KERNEL_SIZE = 1
self.alpha = nn.Sequential(
nn.Conv1d(self.in_dim, self.in_dim, KERNEL_SIZE),
nn.BatchNorm1d(self.in_dim),
nn.ReLU(),
nn.Conv1d(self.in_dim, self.hidden_dim, KERNEL_SIZE),
nn.BatchNorm1d(self.hidden_dim),
nn.ReLU(),
)
else:
self.alpha = nn.Sequential(
ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)
)
self.gamma = nn.Sequential(
nn.Conv1d(self.out_dim, self.hidden_dim, 1),
nn.BatchNorm1d(self.hidden_dim),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.vector_dim, 1),
nn.BatchNorm1d(self.vector_dim),
)
self.delta = nn.Sequential(
nn.Conv2d(3, self.hidden_dim, 1),
nn.BatchNorm2d(self.hidden_dim),
nn.ReLU(),
nn.Conv2d(self.hidden_dim, self.out_dim, 1),
nn.BatchNorm2d(self.out_dim),
)
def forward(self, x : ME.SparseTensor, aux=None):
'''
input_p: B, 3, npoint
input_x: B, in_dim, npoint
'''
PT_begin = time.perf_counter()
self.B = (x.C[:,0]).max().item() + 1 # batch size
npoint, in_dim = tuple(x.F.size())
self.k = min(self.n_sample, npoint)
if not self.use_vector_attn:
h = self.nhead
res = x
if self.skip_knn:
# --- for debugging only ---
x = self.linear_top(x)
y = self.linear_down(x)
return y+res
else:
self.cube_query = cube_query(r=self.r, k=self.k, knn=self.USE_KNN)
# neighbor: [B*npoint, k, bxyz]
# mask: [B*npoint, k]
# idx: [B_nq], used for scatter/gather
neighbor, mask, idx_ = self.cube_query.get_neighbor(x, x)
self.register_buffer('neighbor_map', neighbor)
self.register_buffer('input_map', x.C)
# check for duplicate neighbor(not enough voxels within radius that fits k)
# CHECK_FOR_DUP_NEIGHBOR=True
# if CHECK_FOR_DUP_NEIGHBOR:
# dist_map = (neighbor - neighbor[:,0,:].unsqueeze(1))[:,1:,:].abs()
# num_different = (dist_map.sum(-1)>0).sum(-1) # how many out of ks are the same, of shape [nvoxel]
# outlier_point = (num_different < int(self.k*1/2)-1).sum()
# if not (outlier_point < max(npoint//10, 10)): # sometimes npoint//100 could be 3
# pass
# logging.info('Detected Abnormal neighbors, num outlier {}, all points {}'.format(outlier_point, x.shape[0]))
x = self.linear_top(x) # [B, in_dim, npoint], such as [16, 32, 4096]
'''
illustration on dimension notations:
- B: batch size
- nvoxel: number of all voxels of the whole batch
- k: k neighbors
- feat_dim: feature dimension, or channel as others call it
- nvoxel_batch: the maximum voxel number of a single SparseTensor in the current batch
'''
'''Gene the pos_encoding'''
relative_xyz = neighbor - x.C[:,None,:].repeat(1,self.k,1) # (nvoxel, k, bxyz), we later pad it to [B, xyz, nvoxel_batch, k]
'''
mask the neighbor when not in the same instance-class
'''
if aux is not None:
neighbor_mask = aux.features_at_coordinates(neighbor.reshape(-1,4).float()).reshape(-1,self.k) # [N, k]
neighbor_mask = (neighbor_mask - neighbor_mask[:,0].unsqueeze(-1) != 0).int()
# logging.info('Cur Mask Ratio {}'.format(neighbor_mask.sum()/neighbor_mask.nelement()))
neighbor_mask = torch.ones_like(neighbor_mask) - neighbor_mask
else:
neighbor_mask = None
if self.WITH_POSE_ENCODING:
relative_xyz[:,0,0] = x.C[:,0] # get back the correct batch index, because we messed batch index in the subtraction above
relative_xyz = pad_zero(relative_xyz, mask) # [B, xyz, nvoxel_batch, k]
pose_tensor = self.delta(relative_xyz.float()) # (B, feat_dim, nvoxel_batch, k)
pose_tensor = make_position_tensor(pose_tensor, mask, idx_, x.C.shape[0]) # (nvoxel, k, feat_dim)S
if self.SKIP_ATTN:
grouped_x = get_neighbor_feature(neighbor, x) # (nvoxel, k, feat_dim)
if self.WITH_POSE_ENCODING:
alpha = self.alpha((grouped_x + pose_tensor).transpose(1,2))
else:
alpha = self.alpha((grouped_x).transpose(1,2))
y = alpha.max(dim=-1)[0]
y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)
y = self.linear_down(y)
return y+res
phi = self.phi(x).F # (nvoxel, feat_dim)
phi = phi[:,None,:].repeat(1,self.k,1) # (nvoxel, k, feat_dim)
psi = get_neighbor_feature(neighbor, self.psi(x)) # (nvoxel, k, feat_dim)
alpha = get_neighbor_feature(neighbor, self.alpha(x)) # (nvoxel, k, feat_dim)
'''The Self-Attn Part'''
if self.use_vector_attn:
'''
the attn_map: [vector_dim];
the alpha: [out_dim]
attn_map = F.softmax(self.gamma(phi - psi + pos_encoding), dim=-1) # [B, in_dim, npoint, k], such as [16, 32, 4096, 16]
y = attn_map.repeat(1, self.out_dim // self.vector_dim,1,1)*(alpha + pos_encoding) # multiplies attention weight
self.out_dim and self.vector_dim are all 32 here, so y is still [16, 32, 4096, 16]
y = y.sum(dim=-1) # feature aggregation, y becomes [B, out_dim, npoint]
'''
if self.WITH_POSE_ENCODING:
attn_map = F.softmax(self.gamma((phi - psi + pose_tensor).transpose(1,2)), dim=-1)
else:
attn_map = F.softmax(self.gamma((phi - psi).transpose(1,2)), dim=-1)
if self.WITH_POSE_ENCODING:
self_feat = (alpha + pose_tensor).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)
else:
self_feat = (alpha).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)
# use aux info and mask the attn_map
if neighbor_mask is not None:
attn_map = attn_map*(neighbor_mask.unsqueeze(1))
y = attn_map.repeat(1, self.out_dim // self.vector_dim, 1, 1) * self_feat # (nvoxel, feat_dim, k)
y = y.sum(dim=-1).view(x.C.shape[0], -1) # feature aggregation, y becomes (nvoxel, feat_dim)
y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)
else:
phi = phi.permute([2,1,0]) # [out_dim, k, npoint]
psi = psi.permute([2,0,1]) # [out_dim. npoint, k]
attn_map = F.softmax(torch.matmul(phi,psi), dim=0) # [out_dim, k, k]
alpha = (alpha+pose_tensor).permute([2,0,1]) # [out_dim, npoint, k]
y = torch.matmul(alpha, attn_map) # [out_dim, npoint, k]
y = y.sum(-1).transpose(0,1) # [out_dim. npoint]
y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)
y = self.linear_down(y)
self.register_buffer('attn_map', attn_map.detach().cpu().data) # pack it with nn parameter to save in state-dict
return y+res
def make_position_tensor(pose_encoding : torch.Tensor, mask : torch.Tensor, idx_: torch.Tensor, nvoxel : int):
"""
Mask positional encoding into k ME.SparseTensors
Input:
pose_encoding: (B, feat_dim, nvoxel_batch, k)
batch_tensor: (B, N)
"""
assert idx_.shape[0] == nvoxel # the idx and the nvoxel should be the same
B, feat_dim, nvoxel_batch, k = pose_encoding.shape
pose_encoding = pose_encoding.permute(0, 2, 3, 1) # (B, feat_dim, nvoxel_batch, k) -> (B, nvoxel_batch, k, feat_dim)
'''use idx to scatter the result'''
masked_encoding = torch.gather(
pose_encoding.reshape(-1, k, feat_dim),
0,
idx_.reshape(-1,1,1).repeat(1, k, feat_dim)
).reshape(nvoxel, k, feat_dim)
return masked_encoding # (nvoxel, k, feat_dim)
def get_neighbor_feature(neighbor: torch.Tensor, x: ME.SparseTensor):
"""
fetch neighbor voxel's feature tensor.
Input:
neighbor: torch.Tensor [B*npoint, k, xyz]
x: ME.SparseTensor
"""
B_npoint, k, _ = tuple(neighbor.size())
neighbor = neighbor.view(-1, 4).float() # [B*npoint*k, bxyz]
features = x.features_at_coordinates(neighbor)
_, dim = features.shape
features = features.view(-1, k, dim)
return features
def pad_zero(tensor : torch.Tensor, mask: torch.Tensor):
'''
input is [B*npoint, k, bxyz], we want [B, xyz, npoint, k]
need to pad zero because each batch may have different voxel number
B = int(max(tensor[:,0,0]).item() + 1)
k = tuple(tensor.shape)[1]
'''
B, N = mask.shape
_, k, bxyz = tensor.shape
result = torch.zeros([B, N, k, 4], dtype=torch.int, device=tensor.device)
pointer = 0
for b_idx in range(B):
nvoxel = mask.sum(-1)[b_idx]
result[b_idx, :nvoxel, :, :] = tensor[pointer:pointer+nvoxel, :, :]
pointer += nvoxel
result = result[:,:,:,1:] # (B, N, k, 3)
result = result.permute(0, 3, 1, 2) | |
# Databricks notebook source
# MAGIC %md
# MAGIC # Putting it all together: Managing the Machine Learning Lifecycle
# MAGIC
# MAGIC Create a workflow that includes pre-processing logic, the optimal ML algorithm and hyperparameters, and post-processing logic.
# MAGIC
# MAGIC ## Instructions
# MAGIC
# MAGIC In this course, we've primarily used Random Forest in `sklearn` to model the Airbnb dataset. In this exercise, perform the following tasks:
# MAGIC <br><br>
# MAGIC 0. Create custom pre-processing logic to featurize the data
# MAGIC 0. Try a number of different algorithms and hyperparameters. Choose the most performant solution
# MAGIC 0. Create related post-processing logic
# MAGIC 0. Package the results and execute it as its own run
# MAGIC
# MAGIC ## Prerequisites
# MAGIC - Web browser: Chrome
# MAGIC - A cluster configured with **8 cores** and **DBR 7.0 ML**
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Setup
# MAGIC
# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>
# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# Adust our working directory from what DBFS sees to what python actually sees
working_path = workingDir.replace("dbfs:", "/dbfs")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Pre-processing
# MAGIC
# MAGIC Take a look at the dataset and notice that there are plenty of strings and `NaN` values present. Our end goal is to train a sklearn regression model to predict the price of an airbnb listing.
# MAGIC
# MAGIC
# MAGIC Before we can start training, we need to pre-process our data to be compatible with sklearn models by making all features purely numerical.
# COMMAND ----------
import pandas as pd
airbnbDF = spark.read.parquet("/mnt/training/airbnb/sf-listings/sf-listings-correct-types.parquet").toPandas()
display(airbnbDF)
# COMMAND ----------
# MAGIC %md
# MAGIC In the following cells we will walk you through the most basic pre-processing step necessary. Feel free to add additional steps afterwards to improve your model performance.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC First, convert the `price` from a string to a float since the regression model will be predicting numerical values.
# COMMAND ----------
# TODO
import numpy as np
airbnbDF["price"] = airbnbDF["price"].str.replace("$", "", regex=True)
airbnbDF["price"] = airbnbDF["price"].str.replace(",", "", regex=True)
airbnbDF["price"] = airbnbDF.price.astype('float32')
print(airbnbDF["price"])
# airbnbDF["price"] = airbnbDF["price"].str.replace('$', '')
# COMMAND ----------
# MAGIC %md
# MAGIC Take a look at our remaining columns with strings (or numbers) and decide if you would like to keep them as features or not.
# MAGIC
# MAGIC Remove the features you decide not to keep.
# COMMAND ----------
# TODO
airbnbDF["trunc_lat"] = airbnbDF.latitude.round(decimals=2)
airbnbDF["trunc_long"] = airbnbDF.longitude.round(decimals=2)
airbnbDF["review_scores_sum"] = airbnbDF[['review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value']].mean(axis=1)
airbnbDF = airbnbDF.drop(["latitude", "longitude", 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', "neighbourhood_cleansed", "property_type", "zipcode"], axis=1)
# COMMAND ----------
airbnbDF.columns
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC For the string columns that you've decided to keep, pick a numerical encoding for the string columns. Don't forget to deal with the `NaN` entries in those columns first.
# COMMAND ----------
# TODO
from sklearn.impute import SimpleImputer
airbnbDF["host_is_superhost"] = airbnbDF["host_is_superhost"].str.replace("t", "0", regex=True)
airbnbDF["host_is_superhost"] = airbnbDF["host_is_superhost"].str.replace("f", "1", regex=True)
airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].str.replace("t", "0", regex=True)
airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].str.replace("f", "1", regex=True)
# airbnbDF["host_is_superhost"] = airbnbDF.host_is_superhost.astype(int)
# airbnbDF["instant_bookable"] = airbnbDF["instant_bookable"].astype(int)
airbnbDF["host_is_superhost"] = pd.to_numeric(airbnbDF["host_is_superhost"])
airbnbDF["instant_bookable"] = pd.to_numeric(airbnbDF["instant_bookable"])
airbnbDF["bed_type"] = np.where(airbnbDF["bed_type"] == "Real Bed", 0, 1)
airbnbDF["room_type"] = np.where(airbnbDF["room_type"] == "Entire home/apt", 0, 1)
airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].str.replace("flexible", "0", regex=True)
airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].str.replace("moderate", "1", regex=True)
airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].str.replace("super_strict_30", "3", regex=True)
airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].str.replace("super_strict_60", "3", regex=True)
airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].str.replace("strict", "2", regex=True)
# airbnbDF["cancellation_policy"] = airbnbDF["cancellation_policy"].astype(int)
airbnbDF["cancellation_policy"] = pd.to_numeric(airbnbDF["cancellation_policy"])
# airbnbDF["zipcode"] = airbnbDF["zipcode"].replace("-- default zip code --", np.nan, regex=True)
airbnbDF = airbnbDF.apply(pd.to_numeric)
impute = SimpleImputer(missing_values=np.nan, strategy='mean')
# airbnbDF = impute.fit(airbnbDF)
airbnbDF = impute.fit_transform(airbnbDF)
airbnbDF = pd.DataFrame(airbnbDF, columns=['host_is_superhost', 'cancellation_policy', 'instant_bookable',
'host_total_listings_count', 'room_type', 'accommodates', 'bathrooms',
'bedrooms', 'beds', 'bed_type', 'minimum_nights', 'number_of_reviews',
'review_scores_rating', 'price', 'trunc_lat', 'trunc_long',
'review_scores_sum'])
print(airbnbDF.head())
# airbnbDF.fillna(-1)
# COMMAND ----------
print(type(airbnbDF["price"][1]))
print(max(airbnbDF["price"]))
print(min(airbnbDF["price"]))
# COMMAND ----------
# MAGIC %md
# MAGIC Before we create a train test split, check that all your columns are numerical. Remember to drop the original string columns after creating numerical representations of them.
# MAGIC
# MAGIC Make sure to drop the price column from the training data when doing the train test split.
# COMMAND ----------
# TODO
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(airbnbDF.drop(["price"], axis=1), airbnbDF[["price"]].values.ravel(), random_state=42)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Model
# MAGIC
# MAGIC After cleaning our data, we can start creating our model!
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Firstly, if there are still `NaN`'s in your data, you may want to impute these values instead of dropping those entries entirely. Make sure that any further processing/imputing steps after the train test split is part of a model/pipeline that can be saved.
# MAGIC
# MAGIC In the following cell, create and fit a single sklearn model.
# COMMAND ----------
# TODO
from sklearn.ensemble import RandomForestRegressor
rfmodel = RandomForestRegressor(n_estimators=100, max_depth=25)
rfmodel.fit(X_train, y_train)
# class RF_with_preprocess(mlflow.pyfunc.PythonModel):
# def __init__(self, trained_rf):
# self.rf = trained_rf
# def preprocess_X(model_input):
# model_input = model_input.fillNA(value=-1)
# model_input["trunc_lat"] = model_input.latitude.round(decimals=2)
# model_input["trunc_long"] = model_input.longitude.round(decimals=2)
# model_input["review_scores_sum"] = model_input[['review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value']].mean(axis=1)
# model_input = model_input.drop(["latitude", "longitude", 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', "neighbourhood_cleansed", "property_type"], axis=1)
# model_input["host_is_superhost"] = model_input["host_is_superhost"].str.replace("t", "0", regex=True)
# model_input["host_is_superhost"] = model_input["host_is_superhost"].str.replace("f", "1", regex=True)
# model_input["instant_bookable"] = model_input["instant_bookable"].str.replace("t", "0", regex=True)
# model_input["instant_bookable"] = model_input["instant_bookable"].str.replace("f", "1", regex=True)
# model_input["host_is_superhost"] = pd.to_numeric(model_input["host_is_superhost"])
# model_input["instant_bookable"] = pd.to_numeric(model_input["instant_bookable"])
# model_input["bed_type"] = np.where(model_input["bed_type"] == "Real Bed", 0, 1)
# model_input["room_type"] = np.where(model_input["room_type"] == "Entire home/apt", 0, 1)
# model_input["cancellation_policy"] = model_input["cancellation_policy"].str.replace("flexible", "0", regex=True)
# model_input["cancellation_policy"] = model_input["cancellation_policy"].str.replace("moderate", "1", regex=True)
# model_input["cancellation_policy"] = model_input["cancellation_policy"].str.replace("super_strict_30", "3", regex=True)
# model_input["cancellation_policy"] = model_input["cancellation_policy"].str.replace("super_strict_60", "3", regex=True)
# model_input["cancellation_policy"] = model_input["cancellation_policy"].str.replace("strict", "2", regex=True)
# model_input["cancellation_policy"] = pd.to_numeric(model_input["cancellation_policy"])
# airbnbDF["zipcode"] = airbnbDF["zipcode"].str.replace("-- default zip code --", "0", regex=True)
# airbnbDF = airbnbDF.apply(pd.to_numeric)
# return
# def preprocess_y(model_input):
# model_input["price"] = model_input["price"].str.replace("$", "", regex=True)
# model_input["price"] = model_input["price"].str.replace(",", "", regex=True)
# model_input["price"] = model_input.price.astype(float)
# return
# preprocess_X(X_train)
# preprocess_X(X_test)
# preprocess_y(y_train)
# preprocess_y(y_test)
# COMMAND ----------
# MAGIC %md
# MAGIC Pick and calculate a regression metric for evaluating your model.
# COMMAND ----------
# TODO
# done below with model
from sklearn.metrics import mean_squared_error
rf_mse = mean_squared_error(y_test, rfmodel.predict(X_test))
rf_mse
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Log your model on MLflow with the same metric you calculated above so we can compare all the different models you have tried! Make sure to also log any hyperparameters that you plan on tuning!
# COMMAND ----------
# TODO
import mlflow.sklearn
params = {
"n_estimators": 100,
"max_depth": 30,
"random_state": 42}
# parameters = {'n_estimators': [10, 100, 1000] ,
# 'max_depth': [5, 10, 25, 50] }
with mlflow.start_run(run_name="RF Model") as run:
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
# grid_rf_model = GridSearchCV(rf, parameters, cv=3)
# grid_rf_model.fit(X_train, y_train)
# best_rf = grid_rf_model.best_estimator_
mlflow.sklearn.log_model(rf, "random-forest-model")
rf_mse = mean_squared_error(y_test, rfmodel.predict(X_test))
mlflow.log_metric("mse", rf_mse)
mlflow.log_params(params)
experimentID = run.info.experiment_id
artifactURI = mlflow.get_artifact_uri()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Change and re-run the above 3 code cells to log different models and/or models with different hyperparameters until you are satisfied with the performance of at least 1 of them.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Look through the MLflow UI for the best model. Copy its `URI` so you can load it as a `pyfunc` model.
# COMMAND ----------
# TODO
import mlflow.pyfunc
from mlflow.tracking import MlflowClient
client = MlflowClient()
runs = client.search_runs(experimentID, order_by=["metrics.mse asc"], max_results=1)
# for i in range(len(runs.data.metrics)):
# print(runs[0].data.metrics)
artifactURI = 'runs:/'+runs[0].info.run_id+"/random-forest-model"
model = mlflow.sklearn.load_model(artifactURI)
model.feature_importances_
# COMMAND ----------
# MAGIC %md
# MAGIC ## Post-processing
# MAGIC
# MAGIC Our model currently gives us the predicted price per night for each Airbnb listing. Now we would like our model to tell us what the price per person would be for each listing, assuming the number of renters is equal to the `accommodates` value.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC Fill in the following model class to add in a post-processing step which will get us from total price per night to **price per person per night**.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Check out <a href="https://www.mlflow.org/docs/latest/models.html#id13" target="_blank">the MLFlow docs for help.</a>
# COMMAND ----------
# TODO
class Airbnb_Model(mlflow.pyfunc.PythonModel):
def __init__(self, model):
self.model = model
def predict(self, context, model_input):
prediction = self.model.predict(model_input.copy())
perperson = [0]*len(prediction)
acc = (model_input['accommodates'].iloc[:].copy()).tolist()
for i in range(len(prediction)):
perperson[i] = prediction[i]/(acc[i])
return perperson
# COMMAND ----------
# MAGIC %md
# MAGIC Construct and save the model to the given `final_model_path`.
# COMMAND ----------
# TODO
final_model_path = f"{working_path}/final-model"
# FILL_IN
dbutils.fs.rm(final_model_path, True) # remove folder if already exists
rf_model = Airbnb_Model(rf)
mlflow.pyfunc.save_model(path=final_model_path.replace("dbfs:", "/dbfs"), | |
(i.e. RichIPythonWidget +
our additions = SMIPythonWidget), which becomes the ipywidget attribute
of this class. We are doing this for several reasons:
1. To add more variables and methods needed to connect the widget to other
Spyder plugins and also increase its funcionality.
2. To make it clear what has been added by us to IPython widgets.
3. To avoid possible name conflicts between our widgets and theirs (e.g.
self.history and self._history, respectively)
"""
CONF_SECTION = 'ipython'
SEPARATOR = '%s##---(%s)---' % (os.linesep*2, time.ctime())
def __init__(self, plugin, connection_file, kernel_widget_id, client_name,
ipywidget, history_filename, menu_actions=None):
super(IPythonClient, self).__init__(plugin)
SaveHistoryMixin.__init__(self)
self.options_button = None
self.connection_file = connection_file
self.kernel_widget_id = kernel_widget_id
self.client_name = client_name
self.ipywidget = ipywidget
self.menu_actions = menu_actions
self.history_filename = get_conf_path(history_filename)
self.history = []
self.namespacebrowser = None
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
hlayout = QHBoxLayout()
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.ipywidget)
self.setLayout(vlayout)
self.exit_callback = lambda: plugin.close_console(client=self)
# Connect the IPython widget to this IPython client:
# (see SMlib/widgets/ipython.py for more details about this)
ipywidget.set_ipyclient(self)
# To save history
self.ipywidget.executing.connect(
lambda c: self.add_to_history(command=c))
# To update history after execution
self.ipywidget.executed.connect(self.update_history)
# To update the Variable Explorer after execution
self.ipywidget.executed.connect(self.auto_refresh_namespacebrowser)
#------ Public API --------------------------------------------------------
def get_name(self):
"""Return client name"""
return _("Console") + " " + self.client_name
def get_control(self):
"""Return the text widget (or similar) to give focus to"""
# page_control is the widget used for paging
page_control = self.ipywidget._page_control
if page_control and page_control.isVisible():
return page_control
else:
return self.ipywidget._control
def get_options_menu(self):
"""Return options menu"""
# Kernel
self.interrupt_action = create_action(self, _("Interrupt kernel"),
icon=get_icon('terminate.png'),
triggered=self.interrupt_kernel)
self.restart_action = create_action(self, _("Restart kernel"),
icon=get_icon('restart.png'),
triggered=self.restart_kernel)
# Help
self.intro_action = create_action(self, _("Intro to IPython"),
triggered=self._show_intro)
self.quickref_action = create_action(self, _("Quick Reference"),
triggered=self._show_quickref)
self.guiref_action = create_action(self, _("Console help"),
triggered=self._show_guiref)
help_menu = QMenu(_("Help"), self)
help_action = create_action(self, _("IPython Help"),
icon=get_std_icon('DialogHelpButton'))
help_action.setMenu(help_menu)
add_actions(help_menu, (self.intro_action, self.guiref_action,
self.quickref_action))
# Main menu
if self.menu_actions is not None:
actions = [self.interrupt_action, self.restart_action, None] +\
self.menu_actions + [None, help_menu]
else:
actions = [self.interrupt_action, self.restart_action, None,
help_menu]
return actions
def get_toolbar_buttons(self):
"""Return toolbar buttons list"""
#TODO: Eventually add some buttons (Empty for now)
# (see for example: SMlib/widgets/externalshell/baseshell.py)
buttons = []
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self,
text=_("Options"), icon=get_icon('tooloptions.png'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
return buttons
def add_actions_to_context_menu(self, menu):
"""Add actions to IPython widget context menu"""
# See SMlib/widgets/ipython.py for more details on this method
inspect_action = create_action(self, _("Inspect current object"),
QKeySequence("Ctrl+I"),
icon=get_std_icon('MessageBoxInformation'),
triggered=self.inspect_object)
clear_line_action = create_action(self, _("Clear line or block"),
QKeySequence("Shift+Escape"),
icon=get_icon('eraser.png'),
triggered=self.clear_line)
clear_console_action = create_action(self, _("Clear console"),
QKeySequence("Ctrl+L"),
icon=get_icon('clear.png'),
triggered=self.clear_console)
quit_action = create_action(self, _("&Quit"), icon='exit.png',
triggered=self.exit_callback)
add_actions(menu, (None, inspect_action, clear_line_action,
clear_console_action, None, quit_action))
return menu
def set_font(self, font):
"""Set IPython widget's font"""
self.ipywidget.font = font
def interrupt_kernel(self):
"""Interrupt the associanted Spyder kernel if it's running"""
self.ipywidget.request_interrupt_kernel()
def restart_kernel(self):
"""Restart the associanted Spyder kernel"""
self.ipywidget.request_restart_kernel()
def inspect_object(self):
"""Show how to inspect an object with our object inspector"""
self.ipywidget._control.inspect_current_object()
def clear_line(self):
"""Clear a console line"""
self.ipywidget._keyboard_quit()
def clear_console(self):
"""Clear the whole console"""
self.ipywidget.execute("%clear")
def if_kernel_dies(self, t):
"""
Show a message in the console if the kernel dies.
t is the time in seconds between the death and showing the message.
"""
message = _("It seems the kernel died unexpectedly. Use "
"'Restart kernel' to continue using this console.")
self.ipywidget._append_plain_text(message + '\n')
def update_history(self):
self.history = self.ipywidget._history
def interrupt_message(self):
"""
Print an interrupt message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot interrupt")
QMessageBox.information(self, "IPython", message)
def restart_message(self):
"""
Print a restart message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot restart.")
QMessageBox.information(self, "IPython", message)
def set_namespacebrowser(self, namespacebrowser):
"""Set namespace browser widget"""
self.namespacebrowser = namespacebrowser
def auto_refresh_namespacebrowser(self):
"""Refresh namespace browser"""
if self.namespacebrowser:
self.namespacebrowser.refresh_table()
#------ Private API -------------------------------------------------------
def _show_rich_help(self, text):
"""Use our Object Inspector to show IPython help texts in rich mode"""
from SMlib.utils.inspector import sphinxify as spx
context = spx.generate_context(name='', argspec='', note='',
math=False)
html_text = spx.sphinxify(text, context)
inspector = self.get_control().inspector
inspector.switch_to_rich_text()
inspector.set_rich_text_html(html_text,
QUrl.fromLocalFile(spx.CSS_PATH))
def _show_plain_help(self, text):
"""Use our Object Inspector to show IPython help texts in plain mode"""
inspector = self.get_control().inspector
inspector.switch_to_plain_text()
inspector.set_plain_text(text, is_code=False)
def _show_intro(self):
"""Show intro to IPython help"""
from IPython.core.usage import interactive_usage
self._show_rich_help(interactive_usage)
def _show_guiref(self):
"""Show qtconsole help"""
from IPython.core.usage import gui_reference
self._show_rich_help(gui_reference)
def _show_quickref(self):
"""Show IPython Cheat Sheet"""
from IPython.core.usage import quick_reference
self._show_plain_help(quick_reference)
#---- Qt methods ----------------------------------------------------------
def closeEvent(self, event):
"""Reimplement Qt method to stop sending the custom_restart_kernel_died
signal"""
if programs.is_module_installed('IPython', '>=1.0'):
kc = self.ipywidget.kernel_client
kc.hb_channel.pause()
else:
self.ipywidget.custom_restart = False
class IPythonConsole(SMPluginWidget):
"""
IPython Console plugin
This is a widget with tabs where each one is an IPythonClient
"""
CONF_SECTION = 'ipython_console'
CONFIGWIDGET_CLASS = IPythonConsoleConfigPage
DISABLE_ACTIONS_WHEN_HIDDEN = False
def __init__(self, parent):
SMPluginWidget.__init__(self, parent)
self.tabwidget = None
self.menu_actions = None
self.extconsole = None # External console plugin
self.inspector = None # Object inspector plugin
self.historylog = None # History log plugin
self.variableexplorer = None # Variable explorer plugin
self.clients = []
# Initialize plugin
self.initialize_plugin()
layout = QVBoxLayout()
self.tabwidget = Tabs(self, self.menu_actions)
if hasattr(self.tabwidget, 'setDocumentMode')\
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the console is detached from the main window
# Fixes Issue 561
self.tabwidget.setDocumentMode(True)
self.connect(self.tabwidget, SIGNAL('currentChanged(int)'),
self.refresh_plugin)
self.connect(self.tabwidget, SIGNAL('move_data(int,int)'),
self.move_tab)
self.tabwidget.set_close_function(self.close_console)
layout.addWidget(self.tabwidget)
# Find/replace widget
self.find_widget = FindReplace(self)
self.find_widget.hide()
self.register_widget_shortcuts("Editor", self.find_widget)
layout.addWidget(self.find_widget)
self.setLayout(layout)
# Accepting drops
self.setAcceptDrops(True)
#------ SpyderPluginWidget API --------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
return _('IPython console')
def get_plugin_icon(self):
"""Return widget icon"""
return get_icon('ipython_console.png')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
client = self.tabwidget.currentWidget()
if client is not None:
return client.get_control()
def get_current_client(self):
"""
Return the currently selected client
"""
client = self.tabwidget.currentWidget()
if client is not None:
return client
def run_script_in_current_client(self, filename, wdir, args, debug):
"""Run script in current client, if any"""
norm = lambda text: remove_backslashes(unicode(text))
client = self.get_current_client()
if client is not None:
# Internal kernels, use runfile
if client.kernel_widget_id is not None:
line = "%s('%s'" % ('debugfile' if debug else 'runfile',
norm(filename))
if args:
line += ", args='%s'" % norm(args)
if wdir:
line += ", wdir='%s'" % norm(wdir)
line += ")"
else: # External kernels, use %run
line = "%run "
if debug:
line += "-d "
line += "\"%s\"" % unicode(filename)
if args:
line += " %s" % norm(args)
self.execute_python_code(line)
self.visibility_changed(True)
self.raise_()
else:
#XXX: not sure it can really happen
QMessageBox.warning(self, _('Warning'),
_("No IPython console is currently available to run <b>%s</b>."
"<br><br>Please open a new one and try again."
) % osp.basename(filename), QMessageBox.Ok)
def execute_python_code(self, lines):
client = self.get_current_client()
if client is not None:
client.ipywidget.execute(unicode(lines))
self.activateWindow()
client.get_control().setFocus()
def write_to_stdin(self, line):
client = self.get_current_client()
if client is not None:
client.ipywidget.write_to_stdin(line)
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
create_client_action = create_action(self,
_("Open an IPython console"),
None, 'ipython_console.png',
triggered=self.main.extconsole.start_ipykernel)
connect_to_kernel_action = create_action(self,
_("Connect to an existing kernel"),
None, 'ipython_console.png',
_("Open a new IPython client connected to an external kernel"),
triggered=self.new_client)
# Add the action to the 'Interpreters' menu on the main window
interact_menu_actions = [create_client_action, None,
connect_to_kernel_action]
self.main.interact_menu_actions += interact_menu_actions
# Plugin actions
self.menu_actions = [create_client_action, connect_to_kernel_action]
return self.menu_actions
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
self.main.tabify_plugins(self.main.extconsole, self)
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
self.extconsole = self.main.extconsole
self.inspector = self.main.inspector
self.historylog = self.main.historylog
self.variableexplorer = self.main.variableexplorer
self.connect(self, SIGNAL('focus_changed()'),
self.main.plugin_focus_changed)
if self.main.editor is not None:
self.connect(self, SIGNAL("edit_goto(QString,int,QString)"),
self.main.editor.load)
self.connect(self.main.editor,
SIGNAL('run_in_current_ipyclient(QString,QString,QString,bool)'),
self.run_script_in_current_client)
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window | |
engineer=6)),
("cm_buy_nephrite_earrings",spr_buy_item_flags(17),"nephrite_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_nephrite_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_black_pearl_earrings",spr_buy_item_flags(18),"black_pearl_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_black_pearl_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_ruby_earrings",spr_buy_item_flags(18),"ruby_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_ruby_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_import_piglet",spr_use_time(20),"sack","bo_sack_fixed", spr_import_item_triggers("itm_piglet", pos_offset=(0,50,50), price_multiplier=300, check_script="script_cf_can_spawn_herd_animal")),
("cm_buy_byrnie_a_tunic",spr_buy_item_flags(2),"byrnie_a_tunic","bo_pw_armor_body", spr_buy_item_triggers("itm_byrnie_a_tunic", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_byrnie_a_tunic_c",spr_buy_item_flags(2),"byrnie_a_tunic_c","bo_pw_armor_body", spr_buy_item_triggers("itm_byrnie_a_tunic_c", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_rich_tunic_e",spr_buy_item_flags(2),"rich_tunic_e","bo_pw_armor_body", spr_buy_item_triggers("itm_rich_tunic_e", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_sar_pants",spr_buy_item_flags(2),"sar_pants","bo_pw_armor_body", spr_buy_item_triggers("itm_sar_pants", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_shirt_e",spr_buy_item_flags(2),"shirt_e","bo_pw_armor_body", spr_buy_item_triggers("itm_shirt_e", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_ribaude_dress",spr_buy_item_flags(2),"ribaude_dress","bo_pw_armor_body", spr_buy_item_triggers("itm_ribaude_dress", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_tavern_keep_shirt",spr_buy_item_flags(2),"tavern_keep_shirt","bo_pw_armor_body", spr_buy_item_triggers("itm_tavern_keep_shirt", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_red_dress2",spr_buy_item_flags(2),"red_dress2","bo_pw_armor_body", spr_buy_item_triggers("itm_red_dress2", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_rich_blue_dress",spr_buy_item_flags(2),"rich_blue_dress","bo_pw_armor_body", spr_buy_item_triggers("itm_rich_blue_dress", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_nobleman_outfit_charles",spr_buy_item_flags(2),"nobleman_outfit_charles","bo_pw_armor_body", spr_buy_item_triggers("itm_nobleman_outfit_charles", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_nobleman_outfit_anglais",spr_buy_item_flags(2),"nobleman_outfit_anglais","bo_pw_armor_body", spr_buy_item_triggers("itm_nobleman_outfit_anglais", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_new_noble_tunic_a",spr_buy_item_flags(2),"new_noble_tunic_a","bo_pw_armor_body", spr_buy_item_triggers("itm_new_noble_tunic_a", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_new_noble_tunic_b",spr_buy_item_flags(2),"new_noble_tunic_b","bo_pw_armor_body", spr_buy_item_triggers("itm_new_noble_tunic_b", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_maid_dress",spr_buy_item_flags(2),"maid_dress","bo_pw_armor_body", spr_buy_item_triggers("itm_maid_dress", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_peasant_dress_c",spr_buy_item_flags(2),"peasant_dress_c","bo_pw_armor_body", spr_buy_item_triggers("itm_peasant_dress_c", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_executioners_axe",spr_buy_item_flags(11),"executioners_axe","bo_pw_weapon", spr_buy_item_triggers("itm_executioners_axe", resources=["itm_wood_pole_short", "itm_iron_bar"], engineer=5)),
("cm_buy_practice_arrows",spr_buy_item_flags(2),"arrow","bo_pw_weapon", spr_buy_item_triggers("itm_bent_arrows", resources=["itm_stick"], engineer=1)),
("cm_buy_practice_bolts",spr_buy_item_flags(2),"bolt","bo_pw_weapon", spr_buy_item_triggers("itm_crude_bolts", resources=["itm_stick"], engineer=1)),
("cm_buy_letter",spr_buy_item_flags(6),"letter","bo_pw_weapon_small", spr_buy_item_triggers("itm_letter", pos_offset=(0,0,15), resources=["itm_leather_roll"], engineer=3)),
# Adornos scene props
("cm_ado_wood_arena_barrier" ,0,"cm_ado_wood_arena_barrier","bo_cm_ado_wood_arena_barrier",[]),
("cm_ado_wood_arena_training" ,0,"cm_ado_wood_arena_training","bo_cm_ado_wood_arena_training",[]),
("cm_ado_wood_barn" ,0,"cm_ado_wood_barn","bo_cm_ado_wood_barn",[]),
("cm_sitable_ado_wood_bench_fine" ,spr_use_time(1),"cm_ado_wood_bench_fine","bo_cm_ado_wood_bench_fine", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_sitable_ado_wood_bench_plain" ,spr_use_time(1),"cm_ado_wood_bench_plain","bo_cm_ado_wood_bench_plain", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_sitable_ado_stone_bench_roman" ,spr_use_time(1),"cm_ado_stone_bench_roman","bo_cm_ado_stone_bench_roman", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_ado_wood_bridge_long" ,0,"cm_ado_wood_bridge_long","bo_cm_ado_wood_bridge_long",[]),
("cm_ado_wood_bridge_long_ramp" ,0,"cm_ado_wood_bridge_long_ramp","bo_cm_ado_wood_bridge_long_ramp",[]),
("cm_ado_wood_bridge_tall" ,0,"cm_ado_wood_bridge_tall","bo_cm_ado_wood_bridge_tall",[]),
("cm_ado_wood_cabin_log" ,0,"cm_ado_wood_cabin_log","bo_cm_ado_wood_cabin_log",[]),
("cm_ado_wood_cart" ,0,"cm_ado_wood_cart","bo_cm_ado_wood_cart",[]),
("cm_ado_wood_cart_2" ,0,"cm_ado_wood_cart_2","bo_cm_ado_wood_cart_2",[]),
("cm_ado_wood_cart_2_broken" ,0,"cm_ado_wood_cart_2_broken","bo_cm_ado_wood_cart_2_broken",[]),
("cm_ado_wood_cart_3" ,0,"cm_ado_wood_cart_3","bo_cm_ado_wood_cart_3",[]),
("cm_ado_wood_cart_wheel" ,0,"cm_ado_wood_cart_wheel","bo_cm_ado_wood_cart_wheel",[]),
("cm_sitable_ado_wood_chair1" ,spr_use_time(1),"cm_ado_wood_chair1","bo_cm_ado_wood_chair1", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_sitable_ado_wood_chair2" ,spr_use_time(1),"cm_ado_wood_chair2","bo_cm_ado_wood_chair2", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_sitable_ado_wood_chair_fine" ,spr_use_time(1),"cm_ado_wood_chair_fine","bo_cm_ado_wood_chair_fine", spr_chairs("anim_sitting_leg_crossed", "anim_sitting")),
("cm_ado_wood_coffin_closed" ,0,"cm_ado_wood_coffin_closed","bo_cm_ado_wood_coffin_closed",[]),
("cm_ado_wood_coffin_lid" ,0,"cm_ado_wood_coffin_lid","bo_cm_ado_wood_coffin_lid",[]),
("cm_ado_wood_coffin_no_lid" ,0,"cm_ado_wood_coffin_no_lid","bo_cm_ado_wood_coffin_no_lid",[]),
("cm_ado_wood_coffin_with_lid_off" ,0,"cm_ado_wood_coffin_with_lid_off","bo_cm_ado_wood_coffin_with_lid_off",[]),
("cm_ado_wood_cradle" ,0,"cm_ado_wood_cradle","bo_cm_ado_wood_cradle",[]),
("cm_ado_wood_crate_1" ,0,"cm_ado_wood_crate_1","bo_cm_ado_wood_crate_1",[]),
("cm_ado_wood_crate_2" ,0,"cm_ado_wood_crate_2","bo_cm_ado_wood_crate_2",[]),
("cm_ado_wood_crate_broken" ,0,"cm_ado_wood_crate_broken","bo_cm_ado_wood_crate_broken",[]),
("cm_ado_wood_crate_closed" ,0,"cm_ado_wood_crate_closed","bo_cm_ado_wood_crate_closed",[]),
("cm_ado_wood_crate_lid" ,0,"cm_ado_wood_crate_lid","bo_cm_ado_wood_crate_lid",[]),
("cm_ado_wood_crate_tall" ,0,"cm_ado_wood_crate_tall","bo_cm_ado_wood_crate_tall",[]),
("cm_ado_wood_desk" ,0,"cm_ado_wood_desk","bo_cm_ado_wood_desk",[]),
("cm_ado_wood_desk_tall" ,0,"cm_ado_wood_desk_tall","bo_cm_ado_wood_desk_tall",[]),
("cm_ado_wood_dovecote_tall" ,0,"cm_ado_wood_dovecote_tall","bo_cm_ado_wood_dovecote_tall",[]),
("cm_ado_wood_fence_short" ,0,"cm_ado_wood_fence_short","bo_cm_ado_wood_fence_short",[]),
("cm_ado_wood_fence_long" ,0,"cm_ado_wood_fence_long","bo_cm_ado_wood_fence_long",[]),
("cm_ado_wood_gatehouse" ,0,"cm_ado_wood_gatehouse","bo_cm_ado_wood_gatehouse",[]),
("cm_ado_wood_gatehouse_large" ,0,"cm_ado_wood_gatehouse_large","bo_cm_ado_wood_gatehouse_large",[]),
("cm_ado_wood_gate_left" ,0,"cm_ado_wood_gate_left","bo_cm_ado_wood_gate_left",[]),
("cm_ado_wood_gate_right" ,0,"cm_ado_wood_gate_right","bo_cm_ado_wood_gate_right",[]),
("cm_ado_wood_gate_lock" ,0,"cm_ado_wood_gate_lock","bo_cm_ado_wood_gate_lock",[]),
("cm_ado_wood_gates_locked" ,0,"cm_ado_wood_gates_locked","bo_cm_ado_wood_gates_locked",[]),
("cm_ado_wood_gatehouse_twins" ,0,"cm_ado_wood_gatehouse_twins","bo_cm_ado_wood_gatehouse_twins",[]),
("cm_ado_wood_house" ,0,"cm_ado_wood_house","bo_cm_ado_wood_house",[]),
("cm_ado_wood_house_planks" ,0,"cm_ado_wood_house_planks","bo_cm_ado_wood_house_planks",[]),
("cm_ado_wood_house_small" ,0,"cm_ado_wood_house_small","bo_cm_ado_wood_house_small",[]),
("cm_ado_wood_house_small_v2" ,0,"cm_ado_wood_house_small_v2","bo_cm_ado_wood_house_small_v2",[]),
("cm_ado_wood_house_small_shingles" ,0,"cm_ado_wood_house_small_shingles","bo_cm_ado_wood_house_small_shingles",[]),
("cm_ado_wood_judas_cradle" ,0,"cm_ado_wood_judas_cradle","bo_cm_ado_wood_judas_cradle",[]),
("cm_ado_wood_ladder_simple" ,0,"cm_ado_wood_ladder_simple","bo_cm_ado_wood_ladder_simple",[]),
("cm_ado_wood_ladder_simple_broken" ,0,"cm_ado_wood_ladder_simple_broken","bo_cm_ado_wood_ladder_simple_broken",[]),
("cm_ado_wood_log_1" ,0,"cm_ado_wood_log_1","bo_cm_ado_wood_log_1",[]),
("cm_ado_wood_log_2" ,0,"cm_ado_wood_log_2","bo_cm_ado_wood_log_2",[]),
("cm_ado_wood_log_3" ,0,"cm_ado_wood_log_3","bo_cm_ado_wood_log_3",[]),
("cm_ado_wood_log_4" ,0,"cm_ado_wood_log_4","bo_cm_ado_wood_log_4",[]),
("cm_ado_wood_logs_pile" ,0,"cm_ado_wood_logs_pile","bo_cm_ado_wood_logs_pile",[]),
("cm_ado_wood_logs_pile_2" ,0,"cm_ado_wood_logs_pile_2","bo_cm_ado_wood_logs_pile_2",[]),
("cm_ado_wood_palisade_long" ,0,"cm_ado_wood_palisade_long","bo_cm_ado_wood_palisade_long",[]),
("cm_ado_wood_palisade_long_decay" ,0,"cm_ado_wood_palisade_long_decay","bo_cm_ado_wood_palisade_long_decay",[]),
("cm_ado_wood_palisade_long_destroyed" ,0,"cm_ado_wood_palisade_long_destroyed","bo_cm_ado_wood_palisade_long_destroyed",[]),
("cm_ado_wood_palisade_plain" ,0,"cm_ado_wood_palisade_plain","bo_cm_ado_wood_palisade_plain",[]),
("cm_ado_wood_palisade_short" ,0,"cm_ado_wood_palisade_short","bo_cm_ado_wood_palisade_short",[]),
("cm_ado_wood_palisade_stake" ,0,"cm_ado_wood_palisade_stake","bo_cm_ado_wood_palisade_stake",[]),
("cm_ado_wood_palisade_stake_broken" ,0,"cm_ado_wood_palisade_stake_broken","bo_cm_ado_wood_palisade_stake_broken",[]),
("cm_ado_wood_pile" ,0,"cm_ado_wood_pile","bo_cm_ado_wood_pile",[]),
("cm_ado_wood_pillory" ,0,"cm_ado_wood_pillory","bo_cm_ado_wood_pillory",[]),
("cm_ado_wood_platform" ,0,"cm_ado_wood_platform","bo_cm_ado_wood_platform",[]),
("cm_ado_wood_platform_raised" ,0,"cm_ado_wood_platform_raised","bo_cm_ado_wood_platform_raised",[]),
("cm_ado_wood_spanish_donkey" ,0,"cm_ado_wood_spanish_donkey","bo_cm_ado_wood_spanish_donkey",[]),
("cm_ado_wood_stable" ,0,"cm_ado_wood_stable","bo_cm_ado_wood_stable",[]),
("cm_ado_wood_stable_simple" ,0,"cm_ado_wood_stable_simple","bo_cm_ado_wood_stable_simple",[]),
("cm_ado_wood_stairs" ,0,"cm_ado_wood_stairs","bo_cm_ado_wood_stairs",[]),
("cm_ado_wood_stairs_long" ,0,"cm_ado_wood_stairs_long","bo_cm_ado_wood_stairs_long",[]),
("cm_ado_wood_stairs_short" ,0,"cm_ado_wood_stairs_short","bo_cm_ado_wood_stairs_short",[]),
("cm_ado_wood_stockade_bending" ,0,"cm_ado_wood_stockade_bending","bo_cm_ado_wood_stockade_bending",[]),
("cm_ado_wood_stockade_long" ,0,"cm_ado_wood_stockade_long","bo_cm_ado_wood_stockade_long",[]),
("cm_ado_wood_stockade_long_broken" ,0,"cm_ado_wood_stockade_long_broken","bo_cm_ado_wood_stockade_long_broken",[]),
("cm_ado_wood_stockade_short" ,0,"cm_ado_wood_stockade_short","bo_cm_ado_wood_stockade_short",[]),
("cm_ado_wood_stockade_plank" ,0,"cm_ado_wood_stockade_plank","bo_cm_ado_wood_stockade_plank",[]),
("cm_ado_wood_stockade_plank_broken" ,0,"cm_ado_wood_stockade_plank_broken","bo_cm_ado_wood_stockade_plank_broken",[]),
("cm_ado_wood_stool" ,0,"cm_ado_wood_stool","bo_cm_ado_wood_stool",[]),
("cm_ado_wood_table_plain" ,0,"cm_ado_wood_table_plain","bo_cm_ado_wood_table_plain",[]),
("cm_ado_wood_table_round" ,0,"cm_ado_wood_table_round","bo_cm_ado_wood_table_round",[]),
("cm_ado_wood_table_round_2" ,0,"cm_ado_wood_table_round_2","bo_cm_ado_wood_table_round_2",[]),
("cm_ado_wood_table_round_small" ,0,"cm_ado_wood_table_round_small","bo_cm_ado_wood_table_round_small",[]),
("cm_ado_wood_tourney_arch" ,0,"cm_ado_wood_tourney_arch","bo_cm_ado_wood_tourney_arch",[]),
("cm_ado_wood_tourney_arch_painted" ,0,"cm_ado_wood_tourney_arch_painted","bo_cm_ado_wood_tourney_arch_painted",[]),
("cm_ado_wood_tourney_arch_painted_dark" ,0,"cm_ado_wood_tourney_arch_painted_dark","bo_cm_ado_wood_tourney_arch_painted_dark",[]),
("cm_ado_wood_tourney_barrier" ,0,"cm_ado_wood_tourney_barrier","bo_cm_ado_wood_tourney_barrier",[]),
("cm_ado_wood_tourney_barrier_painted" ,0,"cm_ado_wood_tourney_barrier_painted","bo_cm_ado_wood_tourney_barrier_painted",[]),
("cm_ado_wood_tourney_barrier_painted_dark" ,0,"cm_ado_wood_tourney_barrier_painted_dark","bo_cm_ado_wood_tourney_barrier_painted_dark",[]),
("cm_ado_wood_tourney_barrier_post" ,0,"cm_ado_wood_tourney_barrier_post","bo_cm_ado_wood_tourney_barrier_post",[]),
("cm_ado_wood_tourney_barrier_short" ,0,"cm_ado_wood_tourney_barrier_short","bo_cm_ado_wood_tourney_barrier_short",[]),
("cm_ado_wood_tourney_barrier_short_painted" ,0,"cm_ado_wood_tourney_barrier_short_painted","bo_cm_ado_wood_tourney_barrier_short_painted",[]),
("cm_ado_wood_tourney_barrier_short_painted_dark",0,"cm_ado_wood_tourney_barrier_short_painted_dark","bo_cm_ado_wood_tourney_barrier_short_painted_dark",[]),
("cm_ado_wood_tourney_fence" ,0,"cm_ado_wood_tourney_fence","bo_cm_ado_wood_tourney_fence",[]),
("cm_ado_wood_tourney_fence_post" ,0,"cm_ado_wood_tourney_fence_post","bo_cm_ado_wood_tourney_fence_post",[]),
("cm_ado_wood_tourney_fence_short" ,0,"cm_ado_wood_tourney_fence_short","bo_cm_ado_wood_tourney_fence_short",[]),
("cm_ado_wood_tourney_podium" ,0,"cm_ado_wood_tourney_podium","bo_cm_ado_wood_tourney_podium",[]),
("cm_ado_wood_tourney_podium_painted" ,0,"cm_ado_wood_tourney_podium_painted","bo_cm_ado_wood_tourney_podium_painted",[]),
("cm_ado_wood_tourney_podium_painted_dark" ,0,"cm_ado_wood_tourney_podium_painted_dark","bo_cm_ado_wood_tourney_podium_painted_dark",[]),
("cm_ado_wood_tourney_post" ,0,"cm_ado_wood_tourney_post","bo_cm_ado_wood_tourney_post",[]),
("cm_ado_wood_tourney_scaffold_short" ,0,"cm_ado_wood_tourney_scaffold_short","bo_cm_ado_wood_tourney_scaffold_short",[]),
("cm_ado_wood_tourney_scaffold_short_combined",0,"cm_ado_wood_tourney_scaffold_short_combined","bo_cm_ado_wood_tourney_scaffold_short_combined",[]),
("cm_ado_wood_tourney_scaffold_tall" ,0,"cm_ado_wood_tourney_scaffold_tall","bo_cm_ado_wood_tourney_scaffold_tall",[]),
("cm_ado_wood_tourney_scaffold_tall_painted" ,0,"cm_ado_wood_tourney_scaffold_tall_painted","bo_cm_ado_wood_tourney_scaffold_tall_painted",[]),
("cm_ado_wood_tourney_scaffold_tall_painted_dark",0,"cm_ado_wood_tourney_scaffold_tall_painted_dark","bo_cm_ado_wood_tourney_scaffold_tall_painted_dark",[]),
("cm_ado_wood_tower_lookout_defence" ,0,"cm_ado_wood_tower_lookout_defence","bo_cm_ado_wood_tower_lookout_defence",[]),
("cm_ado_wood_wall_section" ,0,"cm_ado_wood_wall_section","bo_cm_ado_wood_wall_section",[]),
("cm_ado_wood_wall_section_2" ,0,"cm_ado_wood_wall_section_2","bo_cm_ado_wood_wall_section_2",[]),
("cm_ado_wood_wall_section_destroyed" ,0,"cm_ado_wood_wall_section_destroyed","bo_cm_ado_wood_wall_section_destroyed",[]),
("cm_ado_wood_wall_section_bending" ,0,"cm_ado_wood_wall_section_bending","bo_cm_ado_wood_wall_section_bending",[]),
("cm_ado_wood_wall_section_corner" ,0,"cm_ado_wood_wall_section_corner","bo_cm_ado_wood_wall_section_corner",[]),
("cm_ado_wood_wall_section_short" ,0,"cm_ado_wood_wall_section_short","bo_cm_ado_wood_wall_section_short",[]),
("cm_ado_wood_wall_tower" ,0,"cm_ado_wood_wall_tower","bo_cm_ado_wood_wall_tower",[]),
("cm_ado_wood_well_stone_village" ,spr_use_time(5),"cm_ado_wood_well_stone_village","bo_cm_ado_wood_well_stone_village",spr_well_triggers()),
("cm_ado_wood_well_village" ,spr_use_time(5),"cm_ado_wood_well_village","bo_cm_ado_wood_well_village",spr_well_triggers()),
("cm_ado_wood_wheelbarrow" ,0,"cm_ado_wood_wheelbarrow","bo_cm_ado_wood_wheelbarrow",[]),
("cm_ado_wood_windmill_post" ,0,"cm_ado_wood_windmill_post","bo_cm_ado_wood_windmill_post",[]),
("cm_ado_wood_windmill_post_full_sail" ,0,"cm_ado_wood_windmill_post_full_sail","bo_cm_ado_wood_windmill_post_full_sail",[]),
("cm_ado_wood_windmill_post_full_sail_red" ,0,"cm_ado_wood_windmill_post_full_sail_red","bo_cm_ado_wood_windmill_post_full_sail_red",[]),
("cm_ado_wood_windmill_post_sails_off" ,0,"cm_ado_wood_windmill_post_sails_off","bo_cm_ado_wood_windmill_post_sails_off",[]),
# Simple geometric props
("cm_basic_1_corner" ,0,"cm_basic_1_corner","bo_basic_corner",[]),
("cm_basic_1_cylinder" ,0,"cm_basic_1_cylinder","bo_basic_cylinder",[]),
("cm_basic_1_cylinder_flat" ,0,"cm_basic_1_cylinder_flat","bo_basic_cylinder_flat",[]),
("cm_basic_1_flat_cube" ,0,"cm_basic_1_flat_cube","bo_basic_flat_cube",[]),
("cm_basic_1_long_cube" ,0,"cm_basic_1_long_cube","bo_basic_long_cube",[]),
("cm_basic_1_small_wall" ,0,"cm_basic_1_small_wall","bo_basic_small_wall",[]),
("cm_basic_1_stairs" ,0,"cm_basic_1_stairs","bo_basic_stairs",[]),
("cm_basic_1_stairs_arch" ,0,"cm_basic_1_stairs_arch","bo_basic_stairs_arch",[]),
("cm_basic_1_trapezoid" ,0,"cm_basic_1_trapezoid","bo_basic_trapezoid",[]),
("cm_basic_1_triangle" ,0,"cm_basic_1_triangle","bo_basic_triangle",[]),
("cm_basic_2_corner" ,0,"cm_basic_2_corner","bo_basic_corner",[]),
("cm_basic_2_cylinder" ,0,"cm_basic_2_cylinder","bo_basic_cylinder",[]),
("cm_basic_2_cylinder_flat" ,0,"cm_basic_2_cylinder_flat","bo_basic_cylinder_flat",[]),
("cm_basic_2_flat_cube" ,0,"cm_basic_2_flat_cube","bo_basic_flat_cube",[]),
("cm_basic_2_long_cube" ,0,"cm_basic_2_long_cube","bo_basic_long_cube",[]),
("cm_basic_2_small_wall" ,0,"cm_basic_2_small_wall","bo_basic_small_wall",[]),
("cm_basic_2_stairs" ,0,"cm_basic_2_stairs","bo_basic_stairs",[]),
("cm_basic_2_stairs_arch" ,0,"cm_basic_2_stairs_arch","bo_basic_stairs_arch",[]),
("cm_basic_2_trapezoid" ,0,"cm_basic_2_trapezoid","bo_basic_trapezoid",[]),
("cm_basic_2_triangle" ,0,"cm_basic_2_triangle","bo_basic_triangle",[]),
# BeefBacon's Terrain and Landscaping Pack
("cm_terrain_forest_plane_01" ,0,"terrain_forest_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_forest_cliff_01" ,0,"terrain_forest_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_forest_cliff_02" ,0,"terrain_forest_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_forest_cliff_03" ,0,"terrain_forest_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_forest_cliff_04" ,0,"terrain_forest_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_forest_cliff_05" ,0,"terrain_forest_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_forest_cliff_06" ,0,"terrain_forest_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_forest_rockpile_01" ,0,"terrain_forest_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_forest_mound_01" ,0,"terrain_forest_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_earth_plane_01" ,0,"terrain_earth_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_earth_cliff_01" ,0,"terrain_earth_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_earth_cliff_02" ,0,"terrain_earth_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_earth_cliff_03" ,0,"terrain_earth_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_earth_cliff_04" ,0,"terrain_earth_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_earth_cliff_05" ,0,"terrain_earth_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_earth_cliff_06" ,0,"terrain_earth_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_earth_rockpile_01" ,0,"terrain_earth_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_earth_mound_01" ,0,"terrain_earth_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_steppe_plane_01" ,0,"terrain_steppe_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_steppe_cliff_01" ,0,"terrain_steppe_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_steppe_cliff_02" ,0,"terrain_steppe_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_steppe_cliff_03" ,0,"terrain_steppe_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_steppe_cliff_04" ,0,"terrain_steppe_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_steppe_cliff_05" ,0,"terrain_steppe_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_steppe_cliff_06" ,0,"terrain_steppe_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_steppe_rockpile_01" ,0,"terrain_steppe_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_steppe_mound_01" ,0,"terrain_steppe_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_desert_plane_01" ,0,"terrain_desert_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_desert_cliff_01" ,0,"terrain_desert_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_desert_cliff_02" ,0,"terrain_desert_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_desert_cliff_03" ,0,"terrain_desert_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_desert_cliff_04" ,0,"terrain_desert_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_desert_cliff_05" ,0,"terrain_desert_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_desert_cliff_06" ,0,"terrain_desert_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_desert_rockpile_01" ,0,"terrain_desert_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_desert_mound_01" ,0,"terrain_desert_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_desert_drift_01" ,0,"terrain_desert_drift_01","bo_terrain_drift_01",[]),
("cm_terrain_desert_drift_02" ,0,"terrain_desert_drift_02","bo_terrain_drift_02",[]),
("cm_terrain_desert_drift_03" ,0,"terrain_desert_drift_03","bo_terrain_drift_03",[]),
("cm_terrain_roots_01a" ,0,"terrain_roots_01a","0",[]),
("cm_terrain_roots_01b" ,0,"terrain_roots_01b","0",[]),
("cm_terrain_roots_01c" ,0,"terrain_roots_01c","0",[]),
("cm_terrain_roots_02a" ,0,"terrain_roots_02a","0",[]),
("cm_terrain_roots_02b" ,0,"terrain_roots_02b","0",[]),
("cm_terrain_roots_02c" ,0,"terrain_roots_02c","0",[]),
("cm_terrain_rock_large_01" ,0,"terrain_rock_large_01","bo_terrain_rock_large_01",[]),
("cm_terrain_rock_large_02" ,0,"terrain_rock_large_02","bo_terrain_rock_large_02",[]),
("cm_terrain_rock_large_03" ,0,"terrain_rock_large_03","bo_terrain_rock_large_03",[]),
("cm_terrain_rockside_01" ,0,"terrain_rockside_01","bo_terrain_rockside_01",[]),
("cm_terrain_rockside_02" ,0,"terrain_rockside_02","bo_terrain_rockside_02",[]),
("cm_terrain_rock_huge_01" ,0,"terrain_rock_huge_01","bo_terrain_rock_huge_01",[]),
("cm_terrain_rock_huge_02" ,0,"terrain_rock_huge_02","bo_terrain_rock_huge_02",[]),
("cm_terrain_rocks_small_01a" ,0,"terrain_rocks_small_01a","0",[]),
("cm_terrain_rocks_small_02a" ,0,"terrain_rocks_small_02a","0",[]),
("cm_terrain_path_plane_01" ,0,"terrain_path_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_village_plane_01" ,0,"terrain_village_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_rock_plane_01" ,0,"terrain_rock_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_rock_plane_02" ,0,"terrain_rock_plane_02","bo_terrain_plane_01",[]),
("cm_terrain_grass_cluster_01" ,0,"terrain_grass_cluster_01","0",[]),
("cm_terrain_grass_plane_01" ,0,"terrain_grass_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_grass_cliff_01" ,0,"terrain_grass_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_grass_cliff_02" ,0,"terrain_grass_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_grass_cliff_03" ,0,"terrain_grass_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_grass_cliff_04" ,0,"terrain_grass_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_grass_cliff_05" ,0,"terrain_grass_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_grass_cliff_06" ,0,"terrain_grass_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_grass_rockpile_01" ,0,"terrain_grass_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_grass_mound_01" ,0,"terrain_grass_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_snow_plane_01" ,0,"terrain_snow_plane_01","bo_terrain_plane_01",[]),
("cm_terrain_snow_cliff_01" ,0,"terrain_snow_cliff_01","bo_terrain_cliff_01",[]),
("cm_terrain_snow_cliff_02" ,0,"terrain_snow_cliff_02","bo_terrain_cliff_02",[]),
("cm_terrain_snow_cliff_03" ,0,"terrain_snow_cliff_03","bo_terrain_cliff_03",[]),
("cm_terrain_snow_cliff_04" ,0,"terrain_snow_cliff_04","bo_terrain_cliff_04",[]),
("cm_terrain_snow_cliff_05" ,0,"terrain_snow_cliff_05","bo_terrain_cliff_05",[]),
("cm_terrain_snow_cliff_06" ,0,"terrain_snow_cliff_06","bo_terrain_cliff_06",[]),
("cm_terrain_snow_rockpile_01" ,0,"terrain_snow_rockpile_01","bo_terrain_rockpile_01",[]),
("cm_terrain_snow_mound_01" ,0,"terrain_snow_mound_01","bo_terrain_mound_01",[]),
("cm_terrain_snow_drift_01" ,0,"terrain_snow_drift_01","bo_terrain_drift_01",[]),
("cm_terrain_snow_drift_02" ,0,"terrain_snow_drift_02","bo_terrain_drift_02",[]),
("cm_terrain_snow_drift_03" ,0,"terrain_snow_drift_03","bo_terrain_drift_03",[]),
("pw_destroy_heap",spr_use_time(2),"destroy_heap","bo_destroy_heap", spr_destroy_heap_triggers()),
("pw_rest_bed_a",spr_use_time(30),"bed_a","bo_bed_a", spr_rest_triggers(40, min_health_pct=35, use_time=30)),
("pw_rest_bed_b",spr_use_time(18),"bed_b","bo_bed_b", spr_rest_triggers(20, min_health_pct=50, use_time=18)),
("pw_rest_bed_c",spr_use_time(22),"bed_c","bo_bed_c", spr_rest_triggers(30, min_health_pct=40, use_time=22)),
("pw_rest_bed_e",spr_use_time(30),"bed_e","bo_bed_e", spr_rest_triggers(50, min_health_pct=30, use_time=30)),
("pw_rest_bed_f",spr_use_time(15),"bed_f","bo_bed_f", spr_rest_triggers(15, min_health_pct=55, use_time=15)),
("pw_rest_dungeon_bed_a",spr_use_time(15),"dungeon_bed_a","bo_bed_b", spr_rest_triggers(10, min_health_pct=60, use_time=15)),
("pw_rest_dungeon_bed_b",spr_use_time(15),"dungeon_bed_b","bo_dungeon_bed_b", spr_rest_triggers(8, min_health_pct=70, use_time=15)),
("pw_rest_pillow_a",spr_use_time(18),"pillow_a","bo_pillow", spr_rest_triggers(20, min_health_pct=45, use_time=18)),
("pw_rest_pillow_b",spr_use_time(24),"pillow_b","bo_pillow", spr_rest_triggers(30, min_health_pct=40, use_time=24)),
("pw_rest_invisible",sokf_invisible|spr_use_time(15),"pw_invisible_door","bo_pw_invisible_door", spr_rest_triggers(10, min_health_pct=60, use_time=15)),
("pw_rest_horse_trough",spr_use_time(15),"feeding_trough_a","bo_feeding_trough_a", spr_rest_triggers(30, min_health_pct=30, horse=1, use_string="str_rest_horse", use_time=15)),
("pw_rest_horse_hay",spr_use_time(30),"pw_horse_hay","bo_pw_horse_hay", spr_rest_triggers(70, min_health_pct=30, horse=1, use_string="str_rest_horse", use_time=30)),
("pw_rest_horse_manger",spr_use_time(22),"wall_manger_a","bo_wall_manger_a", spr_rest_triggers(60, min_health_pct=25, horse=1, use_string="str_rest_horse", use_time=22)),
("pw_clean_blood",spr_use_time(3),"cloth_a","bo_cloth_a", spr_clean_blood_triggers()),
("code_spawn_marker",0,"0","0", []),
("pw_change_troop_peasant",spr_use_time(15),"wooden_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_peasant", cost=50, after_respawn=True, use_string="str_troop_become")),
("pw_change_troop_serf",spr_use_time(30),"trident","bo_pw_weapon_big", spr_change_troop_triggers("trp_serf", cost=150)),
("pw_change_troop_militia",spr_use_time(30),"practice_sword","bo_pw_weapon", spr_change_troop_triggers("trp_militia", cost=500)),
("pw_change_troop_huntsman",spr_use_time(30),"short_bow","bo_pw_weapon", spr_change_troop_triggers("trp_huntsman", cost=500)),
("pw_change_troop_craftsman",spr_use_time(50),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_craftsman", cost=800)),
("pw_change_troop_healer",spr_use_time(60),"package","bobaggage", spr_change_troop_triggers("trp_healer", cost=3000)),
("pw_change_troop_footman",spr_use_time(60),"shield_kite_i","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_footman", cost=3000)),
("pw_change_troop_archer",spr_use_time(60),"hunting_bow","bo_pw_weapon", spr_change_troop_triggers("trp_archer", cost=3500)),
("pw_change_troop_crossbowman",spr_use_time(60),"crossbow_a","bo_pw_weapon", spr_change_troop_triggers("trp_crossbowman", cost=3500)),
("pw_change_troop_lancer",spr_use_time(60),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_lancer", cost=3000)),
("pw_change_troop_man_at_arms",spr_use_time(60),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_man_at_arms", cost=5000)),
("pw_change_troop_knight",spr_use_time(60),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_knight", cost=7000)),
("pw_change_troop_sergeant",spr_use_time(60),"shield_kite_i","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_sergeant", cost=5000)),
("pw_change_troop_engineer",spr_use_time(60),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_engineer", cost=2500)),
("pw_change_troop_master_smith",spr_use_time(60),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_master_smith", cost=7500)),
("pw_change_troop_doctor",spr_use_time(60),"package","bobaggage", spr_change_troop_triggers("trp_doctor", cost=3500)),
("pw_change_troop_sailor",spr_use_time(60),"scimeter","bo_pw_weapon", spr_change_troop_triggers("trp_sailor", cost=2000)),
("pw_change_troop_traveler",spr_use_time(60),"quarter_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_traveler", cost=1300, use_string="str_troop_become")),
("pw_change_troop_herdsman",spr_use_time(40),"quarter_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_herdsman", cost=900)),
("pw_change_troop_lord",spr_use_time(60),"gothic_chair","bogothic_chair", spr_change_troop_triggers("trp_lord", cost=500, use_string="str_troop_assume_role")),
("pw_change_troop_ruffian",spr_use_time(40),"sledgehammer","bo_pw_weapon", spr_change_troop_triggers("trp_ruffian", cost=2000, after_respawn=True, use_string="str_troop_become")),
("pw_change_troop_brigand",spr_use_time(50),"spiked_club","bo_pw_weapon", spr_change_troop_triggers("trp_brigand", cost=1000, after_respawn=True, use_string="str_troop_become")),
("pw_change_troop_mercenary",spr_use_time(50),"spiked_mace","bo_pw_weapon", spr_change_troop_triggers("trp_mercenary", cost=700, mercenary=True, after_respawn=True, use_string="str_troop_become_for")),
("pw_event_change_troop_peasant",spr_use_time(1),"wooden_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_peasant", cost=50, after_respawn=True, use_string="str_troop_become")),
("pw_event_change_troop_serf",spr_use_time(1),"trident","bo_pw_weapon_big", spr_change_troop_triggers("trp_serf", cost=150)),
("pw_event_change_troop_militia",spr_use_time(1),"practice_sword","bo_pw_weapon", spr_change_troop_triggers("trp_militia", cost=500)),
("pw_event_change_troop_huntsman",spr_use_time(1),"short_bow","bo_pw_weapon", spr_change_troop_triggers("trp_huntsman", cost=500)),
("pw_event_change_troop_craftsman",spr_use_time(1),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_craftsman", cost=800)),
("pw_event_change_troop_healer",spr_use_time(1),"package","bobaggage", spr_change_troop_triggers("trp_healer", cost=3000)),
("pw_event_change_troop_footman",spr_use_time(1),"shield_kite_i","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_footman", cost=3000)),
("pw_event_change_troop_archer",spr_use_time(1),"hunting_bow","bo_pw_weapon", spr_change_troop_triggers("trp_archer", cost=3500)),
("pw_event_change_troop_crossbowman",spr_use_time(1),"crossbow_a","bo_pw_weapon", spr_change_troop_triggers("trp_crossbowman", cost=3500)),
("pw_event_change_troop_lancer",spr_use_time(1),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_lancer", cost=3000)),
("pw_event_change_troop_man_at_arms",spr_use_time(1),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_man_at_arms", cost=5000)),
("pw_event_change_troop_knight",spr_use_time(1),"shield_heater_c","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_knight", cost=7000)),
("pw_event_change_troop_sergeant",spr_use_time(1),"shield_kite_i","bo_pw_shield_kite_small", spr_change_troop_triggers("trp_sergeant", cost=5000)),
("pw_event_change_troop_engineer",spr_use_time(1),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_engineer", cost=2500)),
("pw_event_change_troop_master_smith",spr_use_time(1),"pw_repair_hammer","bo_pw_weapon_small", spr_change_troop_triggers("trp_master_smith", cost=7500)),
("pw_event_change_troop_doctor",spr_use_time(1),"package","bobaggage", spr_change_troop_triggers("trp_doctor", cost=3500)),
("pw_event_change_troop_sailor",spr_use_time(1),"scimeter","bo_pw_weapon", spr_change_troop_triggers("trp_sailor", cost=2000)),
("pw_event_change_troop_traveler",spr_use_time(1),"quarter_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_traveler", cost=1300, use_string="str_troop_become")),
("pw_event_change_troop_herdsman",spr_use_time(1),"quarter_staff","bo_pw_weapon_big", spr_change_troop_triggers("trp_herdsman", cost=900)),
("pw_event_change_troop_lord",spr_use_time(1),"gothic_chair","bogothic_chair", spr_change_troop_triggers("trp_lord", cost=500, use_string="str_troop_assume_role")),
("pw_event_change_troop_ruffian",spr_use_time(1),"sledgehammer","bo_pw_weapon", spr_change_troop_triggers("trp_ruffian", cost=2000, after_respawn=True, use_string="str_troop_become")),
("pw_event_change_troop_brigand",spr_use_time(1),"spiked_club","bo_pw_weapon", spr_change_troop_triggers("trp_brigand", cost=1000, after_respawn=True, use_string="str_troop_become")),
("pw_event_change_troop_mercenary",spr_use_time(1),"spiked_mace","bo_pw_weapon", spr_change_troop_triggers("trp_mercenary", cost=700, mercenary=True, after_respawn=True, use_string="str_troop_become_for")),
("pw_door_teleport_small_arch_a",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"tutorial_door_a","bo_tutorial_door_a", spr_teleport_door_triggers(pos_offset=(-55,50,-98))),
("pw_door_teleport_square_a",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"tutorial_door_b","bo_tutorial_door_b", spr_teleport_door_triggers(pos_offset=(70,50,0))),
("pw_door_teleport_arch_a",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"dungeon_door_direction_a","bo_dungeon_door_direction_a", spr_teleport_door_triggers(pos_offset=(100,0,-230))),
("pw_door_teleport_roof",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"house_roof_door","bo_house_roof_door", spr_teleport_door_triggers(pos_offset=(0,0,100))),
("pw_door_teleport_inset_a",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_a","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0))),
("pw_door_teleport_inset_b",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_b","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0))),
("pw_door_teleport_inset_c",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_c","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0))),
("pw_door_teleport_invisible",sokf_dynamic_physics|sokf_missiles_not_attached|sokf_invisible|spr_use_time(1),"pw_invisible_door","bo_pw_invisible_door", spr_teleport_door_triggers(pos_offset=(0,50,0))),
("pw_door_teleport_invisible_not_pickable",sokf_dynamic_physics|sokf_missiles_not_attached|sokf_invisible|spr_use_time(1),"pw_invisible_door","bo_pw_invisible_door", spr_teleport_door_triggers(pos_offset=(0,50,0), pickable=0)),
("pw_door_teleport_small_arch_a_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"tutorial_door_a","bo_tutorial_door_a", spr_teleport_door_triggers(pos_offset=(-55,50,-98), horse_can_tp=1)),
("pw_door_teleport_square_a_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"tutorial_door_b","bo_tutorial_door_b", spr_teleport_door_triggers(pos_offset=(70,50,0), horse_can_tp=1)),
("pw_door_teleport_arch_a_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"dungeon_door_direction_a","bo_dungeon_door_direction_a", spr_teleport_door_triggers(pos_offset=(100,0,-230), horse_can_tp=1)),
("pw_door_teleport_roof_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"house_roof_door","bo_house_roof_door", spr_teleport_door_triggers(pos_offset=(0,0,100), horse_can_tp=1)),
("pw_door_teleport_inset_a_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_a","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0), horse_can_tp=1)),
("pw_door_teleport_inset_b_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_b","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0), horse_can_tp=1)),
("pw_door_teleport_inset_c_horse",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(1),"pw_teleport_door_c","bo_pw_teleport_door_a", spr_teleport_door_triggers(pos_offset=(0,50,0), horse_can_tp=1)),
("pw_door_teleport_invisible_horse",sokf_dynamic_physics|sokf_missiles_not_attached|sokf_invisible|spr_use_time(1),"pw_invisible_door","bo_pw_invisible_door", spr_teleport_door_triggers(pos_offset=(0,50,0), horse_can_tp=1)),
("pw_door_teleport_invisible_not_pickable_horse",sokf_dynamic_physics|sokf_missiles_not_attached|sokf_invisible|spr_use_time(1),"pw_invisible_door","bo_pw_invisible_door", spr_teleport_door_triggers(pos_offset=(0,50,0), pickable=0, horse_can_tp=1)),
("pw_door_rotate_a",spr_rotate_door_flags(1),"castle_f_sally_door_a","bo_castle_f_sally_door_a", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_b",spr_rotate_door_flags(1),"castle_e_sally_door_a","bo_castle_e_sally_door_a_fixed", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_c",spr_rotate_door_flags(1),"castle_f_door_a","bo_castle_f_door_a_fixed", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_d",spr_rotate_door_flags(1),"pw_door_d","bo_pw_door_d", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_a_indestructible",spr_rotate_door_no_hit_flags(1),"castle_f_sally_door_a","bo_castle_f_sally_door_a", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_b_indestructible",spr_rotate_door_no_hit_flags(1),"castle_e_sally_door_a","bo_castle_e_sally_door_a_fixed", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_c_indestructible",spr_rotate_door_no_hit_flags(1),"castle_f_door_a","bo_castle_f_door_a_fixed", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_d_indestructible",spr_rotate_door_no_hit_flags(1),"pw_door_d","bo_pw_door_d", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_viking_left",spr_rotate_door_flags(1),"viking_keep_destroy_sally_door_left","bo_viking_keep_destroy_sally_door_left_fixed", spr_rotate_door_triggers(hit_points=5000, left=1)),
("pw_door_rotate_viking_right",spr_rotate_door_flags(1),"viking_keep_destroy_sally_door_right","bo_viking_keep_destroy_sally_door_right_fixed", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_gatehouse_left",spr_rotate_door_flags(1),"pw_gatehouse_door_left","bo_pw_gatehouse_door_left", spr_rotate_door_triggers(hit_points=7000, left=1)),
("pw_door_rotate_gatehouse_right",spr_rotate_door_flags(1),"pw_gatehouse_door_right","bo_pw_gatehouse_door_right", spr_rotate_door_triggers(hit_points=7000)),
("pw_door_rotate_dungeon_cell_a",spr_rotate_door_no_hit_flags(2),"dungeon_door_cell_a","bo_dungeon_door_cell_a_fixed", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_dungeon_cell_b",spr_rotate_door_no_hit_flags(2),"dungeon_door_cell_b_fixed","bo_dungeon_door_cell_b_fixed", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_dungeon_cell_c",spr_rotate_door_no_hit_flags(2),"dungeon_door_cell_c","bo_dungeon_door_cell_c", spr_rotate_door_no_hit_triggers()),
("pw_door_rotate_dungeon_a",spr_rotate_door_flags(1),"pw_dungeon_door_a","bo_pw_dungeon_door_a", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_dungeon_b",spr_rotate_door_flags(1),"pw_dungeon_door_b","bo_pw_dungeon_door_a", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_dungeon_c",spr_rotate_door_flags(1),"pw_dungeon_door_c","bo_pw_dungeon_door_a", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_e_left",spr_rotate_door_flags(1),"pw_door_e_left","bo_pw_door_left", spr_rotate_door_triggers(hit_points=5000, left=1)),
("pw_door_rotate_e_right",spr_rotate_door_flags(1),"pw_door_e_right","bo_pw_door_right", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_f_left",spr_rotate_door_flags(1),"pw_door_f_left","bo_pw_door_left", spr_rotate_door_triggers(hit_points=5000, left=1)),
("pw_door_rotate_f_right",spr_rotate_door_flags(1),"pw_door_f_right","bo_pw_door_right", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_h_left",spr_rotate_door_flags(1),"pw_door_g_left","bo_pw_door_left", spr_rotate_door_triggers(hit_points=5000, left=1)),
("pw_door_rotate_h_right",spr_rotate_door_flags(1),"pw_door_g_right","bo_pw_door_right", spr_rotate_door_triggers(hit_points=5000)),
("pw_door_rotate_towngate_left",spr_rotate_door_flags(2),"towngate_rectangle_door_left","bo_towngate_rectangle_door_left_fixed", spr_rotate_door_triggers(hit_points=10000, left=1)),
("pw_door_rotate_towngate_right",spr_rotate_door_flags(2),"towngate_rectangle_door_right","bo_towngate_rectangle_door_right_fixed", spr_rotate_door_triggers(hit_points=10000)),
("pw_door_rotate_earth_left",spr_rotate_door_flags(2),"earth_sally_gate_left","bo_earth_sally_gate_left", spr_rotate_door_triggers(hit_points=10000, left=1)),
("pw_door_rotate_earth_right",spr_rotate_door_flags(2),"earth_sally_gate_right","bo_earth_sally_gate_right", spr_rotate_door_triggers(hit_points=10000)),
("pw_door_rotate_stable",spr_rotate_door_flags(1),"pw_full_stable_door_a","bo_pw_full_stable_door_a", spr_rotate_door_triggers(hit_points=1000, left=1)),
("pw_door_rotate_village_a",spr_rotate_door_flags(1),"pw_village_door_a","bo_pw_village_door_a", spr_rotate_door_triggers(hit_points=2000)),
("pw_door_rotate_village_b",spr_rotate_door_flags(1),"pw_village_door_b","bo_pw_village_door_a", spr_rotate_door_triggers(hit_points=2000)),
("cm_door_rotate_akra_left",spr_rotate_door_flags(1),"cm_akra_vorota_b","bo_cm_akra_vorota_b", spr_rotate_door_triggers(hit_points=5000, left=1)),
("cm_door_rotate_akra_right",spr_rotate_door_flags(1),"cm_akra_vorota_a","bo_cm_akra_vorota_a", spr_rotate_door_triggers(hit_points=5000)),
("cm_door_rotate_palisade_gate_left",spr_rotate_door_flags(1),"cm_palisade_gate_b","bo_cm_palisade_gate_b", spr_rotate_door_triggers(hit_points=5000, left=1)),
("cm_door_rotate_palisade_gate_right",spr_rotate_door_flags(1),"cm_palisade_gate_a","bo_cm_palisade_gate_a", spr_rotate_door_triggers(hit_points=5000)),
("pw_wooden_bridge_a",spr_structure_flags(),"bridge_wooden","bo_bridge_wooden_fixed", spr_bridge_triggers("pw_wooden_bridge_a_footing", hit_points=15000)),
("pw_wooden_bridge_a_footing",spr_build_flags(),"pw_build_bridge","bo_pw_build", spr_bridge_footing_triggers()),
("pw_snowy_bridge_a",spr_structure_flags(),"bridge_wooden_snowy","bo_bridge_wooden_fixed", spr_bridge_triggers("pw_snowy_bridge_a_footing", hit_points=15000)),
("pw_snowy_bridge_a_footing",spr_build_flags(),"pw_build_bridge","bo_pw_build", spr_bridge_footing_triggers()),
("pw_rope_bridge",spr_structure_flags(),"rope_bridge_15m","bo_rope_bridge_15m", spr_bridge_triggers("pw_rope_bridge_footing", hit_points=2000)),
("pw_rope_bridge_footing",spr_build_flags(),"castle_f_wall_way_a","bo_castle_f_wall_way_a", spr_bridge_footing_triggers()),
("pw_wooden_palisade",spr_structure_flags(),"pw_wooden_palisade_a","bo_arena_palisade_a", spr_wall_triggers("pw_wooden_palisade_build", hit_points=15000, height=1600)),
("pw_wooden_palisade_b",spr_structure_flags(),"pw_wooden_palisade_b","bo_pw_wooden_palisade_b", spr_wall_triggers("pw_wooden_palisade_build", hit_points=15000, height=1600)),
("pw_wooden_palisade_tower",spr_structure_flags(),"arena_tower_c","bo_arena_tower_c_fixed", spr_wall_triggers("pw_wooden_palisade_build", hit_points=15000, height=2500)),
("pw_wooden_palisade_build",spr_build_flags(),"pw_build_wall","bo_pw_build", spr_build_wall_triggers()),
("pw_siege_stairs_a",spr_structure_flags(),"pw_siege_stairs_a","bo_pw_siege_stairs_a", spr_wall_triggers("pw_siege_stairs_build", hit_points=3000, height=340)),
("pw_siege_stairs_build",spr_build_flags(),"pw_build_wall","bo_pw_build", spr_build_wall_triggers()),
("pw_siege_wall_a",spr_structure_flags(),"siege_wall_a","bo_siege_wall_a_fixed", spr_wall_triggers("pw_siege_wall_a_build", hit_points=5000, height=320)),
("pw_siege_wall_a_build",spr_build_flags(),"pw_build_wall","bo_pw_build", spr_build_wall_triggers()),
("pw_siege_wall_b",spr_structure_flags(),"pw_siege_wall_b","bo_pw_siege_wall_b", spr_wall_triggers("pw_siege_wall_b_build", hit_points=6000, height=560)),
("pw_siege_wall_b2",spr_structure_flags(),"pw_siege_wall_b2","bo_pw_siege_wall_b2", spr_wall_triggers("pw_siege_wall_b_build", hit_points=6000, height=560)),
("pw_siege_wall_b_build",spr_build_flags(),"pw_build_wall","bo_pw_build", spr_build_wall_triggers()),
("pw_siege_shield_a",spr_structure_flags(),"siege_large_shield_a","bo_siege_large_shield_a_fixed", spr_wall_triggers("pw_siege_shield_a_build", hit_points=2000, height=280)),
("pw_siege_shield_a_build",spr_build_flags(),"pw_build_wall","bo_pw_build", spr_build_wall_triggers()),
("pw_siege_ramp_14m",spr_structure_flags(),"pw_siege_ramp_14m","bo_pw_siege_ramp_14m", spr_wall_triggers("pw_siege_ramp_build", hit_points=1500, height=1400, no_move_physics=True)),
("pw_siege_ramp_build",spr_build_flags(),"pw_build_bridge","bo_pw_build", spr_build_wall_triggers()),
("pw_ladder_6m",spr_ladder_flags(),"siege_ladder_move_6m","bo_siege_ladder_move_6m_fixed", spr_wall_triggers("pw_ladder_build", hit_points=600, height=600, no_move_physics=True)),
("pw_ladder_8m",spr_ladder_flags(),"siege_ladder_move_8m","bo_siege_ladder_move_8m_fixed", spr_wall_triggers("pw_ladder_build", hit_points=660, height=800, no_move_physics=True)),
("pw_ladder_10m",spr_ladder_flags(),"siege_ladder_move_10m","bo_siege_ladder_move_10m_fixed", spr_wall_triggers("pw_ladder_build", hit_points=720, height=1000, no_move_physics=True)),
("pw_ladder_12m",spr_ladder_flags(),"siege_ladder_move_12m","bo_siege_ladder_move_12m_fixed", spr_wall_triggers("pw_ladder_build", hit_points=840, height=1200, no_move_physics=True)),
("pw_ladder_14m",spr_ladder_flags(),"siege_ladder_move_14m","bo_siege_ladder_move_14m_fixed", spr_wall_triggers("pw_ladder_build", hit_points=900, height=1400, no_move_physics=True)),
("pw_ladder_build",spr_build_flags(),"pw_build_ladder","bo_pw_build_ladder", spr_build_wall_triggers()),
("pw_construction_box",sokf_static_movement|sokf_destructible,"pw_construction_box","bo_pw_construction_box", spr_construction_box_triggers()),
("pw_winch_frame",0,"winch_stabilizer_a","bo_winch_stabilizer_a", []),
("pw_portcullis_winch",spr_use_time(1),"winch","bo_winch_fixed", spr_portcullis_winch_triggers("pw_portcullis")),
("pw_portcullis",sokf_static_movement,"portculis_new","bo_portculis_new_fixed", []),
("pw_portcullis_winch_a",spr_use_time(1),"winch","bo_winch_fixed", spr_portcullis_winch_triggers("pw_portcullis_a")),
("pw_portcullis_a",sokf_static_movement,"portcullis_a","bo_portcullis_a_fixed", []),
("pw_drawbridge_winch_a",spr_use_time(2),"winch_b","bo_winch_fixed", spr_drawbridge_winch_triggers("pw_drawbridge_a")),
("pw_drawbridge_a",sokf_moveable,"drawbridge","bo_drawbridge", []),
("pw_drawbridge_winch_b",spr_use_time(2),"winch_b","bo_winch_fixed", spr_drawbridge_winch_triggers("pw_drawbridge_b")),
("pw_drawbridge_b",sokf_moveable,"castle_drawbridges_open","bo_castle_drawbridges_open", []),
("pw_trapdoor_winch_a",spr_use_time(1),"winch","bo_winch_fixed", spr_drawbridge_winch_triggers("pw_trapdoor_a", rotation_steps=2, step_size=45, animation_time=50)),
("pw_trapdoor_a",sokf_static_movement,"belfry_b_platform_a","bo_belfry_b_platform_a", []),
("pw_sliding_door_winch_a",spr_use_time(1),"winch","bo_winch_fixed", spr_sliding_door_winch_triggers("pw_sliding_door_a", move_steps=1, step_size=150)),
("pw_sliding_door_a",sokf_moveable,"castle_e_sally_door_a","bo_castle_e_sally_door_a", []),
("pw_lift_platform_winch",spr_use_time(1),"winch_b","bo_winch_fixed", spr_lift_platform_winch_triggers()),
("pw_lift_platform",sokf_moveable,"pw_lift_platform","bo_pw_lift_platform", spr_lift_platform_triggers("pw_lift_platform_winch")),
("pw_cart_a",sokf_static_movement|spr_use_time(1),"pw_cart_a","bo_pw_cart_a", spr_cart_triggers(horse="itm_cart_horse", detach_offset=60, detach_rotation=-20, inventory_count=48, max_item_length=250, access_distance=-180)),
("pw_cart_b",sokf_static_movement|spr_use_time(1),"pw_cart_b","bo_pw_cart_b", spr_cart_triggers(horse="itm_cart_horse", detach_offset=110, detach_rotation=-6, inventory_count=42, max_item_length=250, access_distance=-170)),
("pw_wheelbarrow",sokf_static_movement|spr_use_time(1),"pw_hand_cart_a","bo_pw_hand_cart_a", spr_cart_triggers(detach_offset=47, detach_rotation=15, inventory_count=12, max_item_length=120, access_distance=110)),
("pw_hand_cart",sokf_static_movement|spr_use_time(1),"pw_hand_cart_b","bo_pw_hand_cart_b", spr_cart_triggers(detach_offset=90, inventory_count=24, max_item_length=150, access_distance=-170)),
("pw_back_basket",sokf_static_movement|spr_use_time(2),"pw_back_basket","bo_pw_back_basket", spr_cart_triggers(detach_offset=-12, inventory_count=5, max_item_length=95, access_distance=-60)),
("pw_back_box",sokf_static_movement|spr_use_time(3),"pw_back_box","bo_pw_back_box", spr_cart_triggers(detach_offset=-13, inventory_count=10, max_item_length=80, access_distance=-80)),
("pw_horse_pack",sokf_static_movement|spr_use_time(2),"pw_horse_pack","bo_pw_horse_pack", spr_cart_triggers(horse=1, detach_offset=49, inventory_count=20, max_item_length=100, access_distance=90)),
("cm_civ_cart",sokf_static_movement|spr_use_time(1),"cm_civ_cart","bo_cm_civ_cart", spr_cart_triggers(horse="itm_cart_horse", detach_offset=40, detach_rotation=-18, inventory_count=48, max_item_length=250, access_distance=-220)),
("cm_war_cart",sokf_static_movement|spr_use_time(1),"cm_war_cart","bo_cm_war_cart", spr_cart_triggers(horse="itm_cart_horse", detach_offset=120, inventory_count=24, max_item_length=250, access_distance=-240, store_ammo=1)),
("pw_ship_a",sokf_moveable|sokf_destructible|sokf_show_hit_point_bar,"pw_ship_a","bo_pw_ship_a", spr_ship_triggers(hit_points=5000, length=800, width=150, height=-20, speed=6, sail="pw_ship_a_sail", sail_off="pw_ship_a_sail_off", collision="pw_ship_a_cd")),
("pw_ship_a_sail",sokf_moveable,"pw_ship_a_sail","bo_pw_ship_a_sail", []),
("pw_ship_a_sail_off",sokf_moveable,"pw_ship_a_sail_off","bo_pw_ship_a_sail_off", []),
("pw_ship_a_cd",sokf_invisible|sokf_dont_move_agent_over,"0","bo_pw_ship_a_cd", []),
("pw_ship_b",sokf_moveable|sokf_destructible|sokf_show_hit_point_bar,"pw_ship_b","bo_pw_ship_b", spr_ship_triggers(hit_points=8000, length=1400, width=230, height=100, speed=4, sail="pw_ship_b_sail", collision="pw_ship_b_cd")),
("pw_ship_b_sail",sokf_moveable,"pw_ship_b_sail","bo_pw_ship_b_sail", []),
("pw_ship_b_cd",sokf_invisible|sokf_dont_move_agent_over,"0","bo_pw_ship_b_cd", []),
("pw_ship_c",sokf_moveable|sokf_destructible|sokf_show_hit_point_bar,"pw_ship_c","bo_pw_ship_c", spr_ship_triggers(hit_points=10000, length=1400, width=300, height=300, speed=4, sail="pw_ship_c_sail", sail_off="pw_ship_c_sail_off", ramp="pw_ship_c_ramp", hold="pw_ship_c_hold", collision="pw_ship_c_cd")),
("pw_ship_c_sail",sokf_moveable,"pw_ship_c_sail","bo_pw_ship_c_sail", []),
("pw_ship_c_sail_off",sokf_moveable,"pw_ship_c_sail_off","bo_pw_ship_c_sail_off", []),
("pw_ship_c_ramp",sokf_moveable|spr_use_time(1),"pw_ship_c_ramp","bo_pw_ship_c_ramp", spr_ship_ramp_triggers()),
("pw_ship_c_hold",sokf_moveable|sokf_invisible|spr_use_time(2),"0","bo_pw_ship_c_hold", spr_item_storage_triggers(inventory_count=90, max_item_length=500)),
("pw_ship_c_cd",sokf_invisible|sokf_dont_move_agent_over,"0","bo_pw_ship_c_cd", []),
("pw_ship_d",sokf_moveable|sokf_destructible|sokf_show_hit_point_bar,"pw_ship_d","bo_pw_ship_d", spr_ship_triggers(hit_points=7000, length=900, width=250, height=120, speed=5, sail="pw_ship_d_sail", hold="pw_ship_d_hold", collision="pw_ship_d_cd")),
("pw_ship_d_sail",sokf_moveable,"pw_ship_d_sail","bo_pw_ship_d_sail", []),
("pw_ship_d_hold",sokf_moveable|sokf_invisible|spr_use_time(2),"0","bo_pw_ship_d_hold", spr_item_storage_triggers(inventory_count=64, max_item_length=500)),
("pw_ship_d_cd",sokf_invisible|sokf_dont_move_agent_over,"0","bo_pw_ship_d_cd", []),
("pw_ferry_boat",sokf_moveable,"pw_ferry_boat","bo_pw_ferry_boat", spr_ferry_triggers(platform="pw_ferry_platform", winch="code_ferry_winch", length=470, winch_height=70)),
("code_ferry_winch",sokf_moveable|spr_use_time(2),"pw_ferry_winch","bo_pw_ferry_winch", spr_ferry_winch_triggers()),
("pw_ferry_platform",spr_use_time(2),"pw_ferry_platform","bo_pw_ferry_platform", spr_ferry_winch_triggers(is_platform=1)),
("pw_ferry_chain_10m",0,"pw_ferry_chain_10m","0", []),
("pw_ferry_chain_20m",0,"pw_ferry_chain_20m","0", []),
("pw_ferry_chain_30m",0,"pw_ferry_chain_30m","0", []),
("cm_boat",sokf_moveable|sokf_destructible|sokf_show_hit_point_bar,"cm_boat","bo_cm_boat", spr_ship_triggers(hit_points=3500, length=160, width=50, height=-18, speed=2, sail="cm_boat_oar", sail_off="cm_boat_oar_off", collision="cm_boat_cd")),
("cm_boat_oar",sokf_moveable,"cm_boat_oar","0", []),
("cm_boat_oar_off",sokf_moveable,"cm_boat_oar_off","0", []),
("cm_boat_cd",sokf_invisible|sokf_dont_move_agent_over,"0","bo_cm_boat_cd", []),
("pw_castle_sign",0,"tree_house_guard_a","bo_tree_house_guard_a", [(ti_on_scene_prop_use, [])]),
("pw_castle_capture_point",sokf_dynamic_physics|sokf_missiles_not_attached|spr_use_time(capture_point_use_time),"pw_castle_flag_post","bo_pw_castle_flag_post", spr_capture_castle_triggers()),
("pw_castle_wall_banner",0,"pw_banner_wall_rail","bo_pw_banner_wall_rail", []),
("pw_castle_money_chest",sokf_dynamic_physics|sokf_missiles_not_attached|spr_chest_flags(use_time=2),"pw_chest_b","bo_pw_chest_b", spr_castle_money_chest_triggers(hit_points=6000)),
("pw_item_chest_a",sokf_dynamic_physics|sokf_missiles_not_attached|spr_chest_flags(use_time=1),"pw_chest_c","bo_pw_chest_c", spr_item_chest_triggers(hit_points=7000, inventory_count=48, max_item_length=180)),
("pw_item_chest_b",sokf_dynamic_physics|sokf_missiles_not_attached|spr_chest_flags(use_time=1),"pw_chest_b","bo_pw_chest_b", spr_item_chest_triggers(hit_points=5000, inventory_count=32, max_item_length=100)),
("pw_item_chest_invisible",sokf_dynamic_physics|sokf_missiles_not_attached|sokf_invisible|spr_chest_flags(use_time=1, destructible=False),"pw_invisible_chest","bo_pw_invisible_chest", spr_bank()),
("cm_arrow_holder_bucket",sokf_dynamic_physics|sokf_missiles_not_attached|spr_chest_flags(use_time=1, destructible=False),"pk_arrow_holder_bucket","bo_pk_arrow_holder_bucket", spr_item_chest_triggers(inventory_count=12, store_ammo=1, store_only_ammo=1, destructible=False)),
("pw_signpost_castle",0,"pw_signpost_castle","bo_pw_signpost", []),
("pw_signpost_docks",0,"pw_signpost_docks","bo_pw_signpost", []),
("pw_signpost_market",0,"pw_signpost_market","bo_pw_signpost", []),
("pw_signpost_tavern",0,"pw_signpost_tavern","bo_pw_signpost", []),
("pw_signpost_town",0,"pw_signpost_town","bo_pw_signpost", []),
("pw_dart_board",0,"pw_dart_board","bo_pw_dart_board", []),
("pw_scene_day_time",sokf_invisible,"barrier_box","0", []),
("pw_scene_cloud_haze",sokf_invisible,"barrier_box","0", []),
("pw_scene_skybox",sokf_invisible,"barrier_box","0", []),
("pw_scene_ambient_sound",sokf_invisible,"barrier_cone","0",
[(ti_on_scene_prop_init,
[(store_trigger_param_1, ":instance_id"),
(prop_instance_get_variation_id_2, ":probability", ":instance_id"),
(eq, ":probability", 127),
(prop_instance_get_variation_id, ":sound_id", ":instance_id"),
(val_add, ":sound_id", ambient_sounds_begin),
(is_between, ":sound_id", ambient_sounds_begin, ambient_sounds_end),
(prop_instance_play_sound, ":instance_id", ":sound_id", sf_looping),
]),
]),
("pw_scene_light",sokf_invisible,"light_sphere","0",
[(ti_on_scene_prop_init,
[(store_trigger_param_1, ":instance_id"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_scale, pos1, ":instance_id"),
(position_get_scale_x, ":red", pos1),
(position_get_scale_y, ":green", pos1),
(position_get_scale_z, ":blue", pos1),
| |
<gh_stars>0
#!/usr/bin/env python
"""
LPU output visualization.
"""
import collections
from collections import OrderedDict
import itertools
import os
import h5py
from future.utils import itervalues, iteritems
import matplotlib
from matplotlib import cm
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
plt.ioff() # interactive mode can interfere with frame updates
from matplotlib.animation import FFMpegFileWriter, AVConvFileWriter
from matplotlib.colors import hsv_to_rgb
import networkx as nx
import numpy as np
from scipy.interpolate import griddata
from shutilwhich import which
from . import simpleio as sio
class visualizer(object):
"""
Visualize the output produced by LPU models.
Examples
--------
>>> import neurokernel.LPU.utils.visualizer as vis
>>> V = vis.visualizer()
>>> config1 = {}
>>> config1['type'] = 'image'
>>> config1['shape'] = [32,24]
>>> config1['clim'] = [-0.6,0.5]
>>> config2 = config1.copy()
>>> config2['clim'] = [-0.55,-0.45]
>>> V.add_LPU('lamina_output.h5', 'lamina.gexf.gz','lamina')
>>> V.add_plot(config1, 'lamina', 'R1')
>>> V.add_plot(config2, 'lamina', 'L1')
>>> V.update_interval = 50
>>> V.out_filename = 'test.avi'
>>> V.run()
"""
def __init__(self):
self._xlim = [0,1]
self._ylim = [-1,1]
self._imlim = [-1, 1]
self._update_interval = -1
self._out_file = None
self._fps = 5
self._codec = 'libtheora'
self._config = OrderedDict()
self._rows = 0
self._cols = 0
self._figsize = (16,9)
self._fontsize = 18
self._t = 0
self._dts = {}
self._sample_intervals = {}
self._start_times = {}
self._data = {}
self._uids = {}
self._maxt = None
self._title = None
self._FFMpeg = None
def add_LPU(self, data_file, LPU='', win=None, is_input=False,gexf_file=None,
sample_interval=1, start_time=0, dt=1e-4, transpose_axes = [1,0]):
"""
Add data associated with a specific LPU to a visualization.
To add a plot containing neurons from a particular LPU,
the LPU needs to be added to the visualization using this
function. Note that outputs from multiple neurons can
be visualized using the same visualizer object. The IDs
specified in the arguments passed to `add_plot()` are assumed to be
indices into array stored in the HDF5 file.
Parameters
----------
data_file : str
Location of the HDF5 file generated by neurokernel
containing the output of the LPU
LPU : str
Name of the LPU. Will be used as identifier to add plots.
For input signals, the name of the LPU will be prepended
with 'input_'. For example::
V.add_LPU('vision_in.h5', LPU='vision')
will create the LPU identifier 'input_vision'.
Therefore, adding a plot depicting this input can be done by::
V.add_plot({''type':'image',imlim':[-0.5,0.5]},LPU='input_vision)
win : slice/list (Optional)
Can be used to limit the visualization to a specific time window.
gexf_file : string (Optional)
Location of gexf file containg the graph of the LPU configuration
graph : networkx graph object(Otional)
NetworkX graph object representing the LPU configuration
is_input : Boolean (optional)
Set to true if the data_file represents input
dt, start_time, sample_interval: double, double, int (All Optional)
These arguments will only be used to set these attributes for
input h5 files. For all other cases, these will be read from
the h5 file
All arguments beyond LPU should be considered strictly keyword only
"""
if is_input:
LPU = 'input_' + str(LPU)
self._sample_intervals[LPU] = sample_interval
self._dts[LPU] = dt * sample_interval
self._start_times[LPU] = start_time
f = h5py.File(data_file)
self._uids[LPU] = {}
self._data[LPU] = {}
for k, d in iteritems(f):
self._uids[LPU][k] = [a.decode('utf8') for a in f[k]['uids'].value]
self._data[LPU][k] = np.transpose(f[k]['data'].value,
axes = transpose_axes)
self._config[LPU] = []
if self._maxt:
self._maxt = min(self._maxt,
(self._data[LPU][k].shape[1]-1)*self._dts[LPU])
else:
self._maxt = (self._data[LPU][k].shape[1]-1)*self._dts[LPU]
f.close()
return
self._config[LPU] = []
f = h5py.File(data_file)
self._sample_intervals[LPU] = f['metadata'].attrs['sample_interval']
self._dts[LPU] = f['metadata'].attrs['dt'] * self._sample_intervals[LPU]
self._start_times[LPU] = f['metadata'].attrs['start_time']
self._uids[LPU] = {}
self._data[LPU] = {}
for k, d in iteritems(f):
if k=='metadata': continue
self._uids[LPU][k] = [a.decode('utf8') for a in f[k]['uids'].value]
self._data[LPU][k] = np.transpose(f[k]['data'].value,
axes = transpose_axes)
if win is not None:
for k in self._data[LPU]:
self._data[LPU][k] = self._data[LPU][k][:,win]
k = list(self._data[LPU])[0]
if self._maxt:
self._maxt = min(self._maxt,
(self._data[LPU][k].shape[1]-1)*self._dts[LPU])
else:
self._maxt = (self._data[LPU][k].shape[1]-1)*self._dts[LPU]
f.close()
def run(self, final_frame_name=None, dpi=300):
"""
Starts the visualization process.
If the property `out_filename` is set, the visualization is saved as a
video to the disk; if not, the animation is displayed on screen.
Please refer to documentation of `add_LPU`, `add_plot`
and the properties of this class on how to configure the visualizer
before calling this method. An example can be found in the class doc
string.
Parameters
----------
final_frame_name : str, optional
If specified, the final frame of the animation is saved
to disk.
dpi : int, default=300
Resolution at which final frame is saved to disk if
`final_frame_name` is specified.
Notes
-----
If `update_interval` is set to 0 or None, it will be replaced by the
index of the final time step. As a result, the visualizer will only
generate and save the final frame if `final_frame_name` is set.
"""
self.final_frame_name = final_frame_name
self._initialize()
if not self._update_interval:
self._update_interval = self._maxt
self._t = self._maxt
if self._update_interval == -1:
self._update_interval = max(np.asarray(list(self._dts.values())))*50
for _ in np.arange(self._t,self._maxt*(1+np.finfo(float).eps),
self._update_interval):
self._update()
if final_frame_name is not None:
self.f.savefig(final_frame_name, dpi=dpi)
if self.out_filename:
self._close()
def _set_wrapper(self, obj, name, value):
name = name.lower()
func = getattr(obj, 'set_'+name, None)
if func:
try:
func(value, fontsize=self._fontsize, weight='bold')
except:
try:
func(value)
except:
pass
def _initialize(self):
# Count number of plots to create:
num_plots = 0
for config in itervalues(self._config):
num_plots += len(config)
# Set default grid of plot positions:
if not self._rows*self._cols == num_plots:
self._cols = int(np.ceil(np.sqrt(num_plots)))
self._rows = int(np.ceil(num_plots/float(self._cols)))
self.f, self.axarr = plt.subplots(self._rows, self._cols,
figsize=self._figsize)
# Remove unused subplots:
for i in range(num_plots, self._rows*self._cols):
plt.delaxes(self.axarr[np.unravel_index(i, (self._rows, self._cols))])
cnt = 0
self.handles = []
self.types = []
keywds = ['handle', 'ydata', 'fmt', 'type', 'ids', 'shape', 'norm']
# TODO: Irregular grid in U will make the plot better
U, V = np.mgrid[0:np.pi/2:complex(0, 60),
0:2*np.pi:complex(0, 60)]
X = np.cos(V)*np.sin(U)
Y = np.sin(V)*np.sin(U)
Z = np.cos(U)
self._dome_pos_flat = (X.flatten(), Y.flatten(), Z.flatten())
self._dome_pos = (X, Y, Z)
self._dome_arr_shape = X.shape
if not isinstance(self.axarr, np.ndarray):
self.axarr = np.asarray([self.axarr])
for LPU, configs in iteritems(self._config):
dt = self._dts[LPU]
for plt_id, config in enumerate(configs):
var = config['variable']
ind = np.unravel_index(cnt, self.axarr.shape)
cnt+=1
# Some plot types require specific numbers of
# neuron ID arrays:
if 'type' in config:
if config['type'] == 'quiver':
assert len(config['ids'])==2
config['type'] = 0
elif config['type'] == 'hsv':
assert len(config['ids'])==2
config['type'] = 1
elif config['type'] == 'image':
assert len(config['ids'])==1
config['type'] = 2
elif config['type'] == 'waveform':
config['type'] = 3
elif config['type'] == 'raster':
config['type'] = 4
elif config['type'] == 'rate':
config['type'] = 5
elif config['type'] == 'dome':
config['type'] = 6
assert('lat' in config and 'long' in config)
else:
raise ValueError('Plot type not supported')
else:
if (str(LPU).startswith('input') and not
self._graph[LPU].node[str(config['ids'][0][0])]['spiking']):
config['type'] = 2
else:
config['type'] = 3
if config['type'] < 3:
if not 'shape' in config:
num_neurons = len(config['ids'][0])
config['shape'] = [int(np.ceil(np.sqrt(num_neurons)))]
config['shape'].append(int(np.ceil(num_neurons/float(config['shape'][0]))))
if config['type'] == 0:
config['handle'] = self.axarr[ind].quiver(\
np.reshape(self._data[LPU][var][config['ids'][0],0],config['shape']),\
np.reshape(self._data[LPU][var][config['ids'][1],0],config['shape']))
elif config['type'] == 1:
X = np.reshape(self._data[LPU][var][config['ids'][0],0],config['shape'])
Y = np.reshape(self._data[LPU][var][config['ids'][1],0],config['shape'])
V = (X**2 + Y**2)**0.5
H = (np.arctan2(X,Y)+np.pi)/(2*np.pi)
S = np.ones_like(V)
HSV = np.dstack((H,S,V))
RGB = hsv_to_rgb(HSV)
config['handle'] = self.axarr[ind].imshow(RGB)
elif config['type'] == 2:
if 'trans' in config:
if config['trans'] is True:
to_transpose = True
else:
to_transpose = False
else:
to_transpose = False
config['trans'] = False
if to_transpose:
temp = self.axarr[ind].imshow(np.transpose(np.reshape(\
self._data[LPU][var][config['ids'][0],0], config['shape'])))
else:
temp = self.axarr[ind].imshow(np.reshape(\
self._data[LPU][var][config['ids'][0],0], config['shape']))
temp.set_clim(self._imlim)
temp.set_cmap(plt.cm.gist_gray)
config['handle'] = temp
elif config['type'] == 3:
fmt = config['fmt'] if 'fmt' in config else ''
self.axarr[ind].set_xlim(self._xlim)
self.axarr[ind].set_ylim(self._ylim)
if len(config['ids'][0])==1:
config['handle'] = self.axarr[ind].plot([0], \
[self._data[LPU][var][config['ids'][0][0],0]], fmt)[0]
config['ydata'] = [self._data[LPU][var][config['ids'][0][0],0]]
else:
config['handle'] = self.axarr[ind].plot(self._data[LPU][var][config['ids'][0],0])[0]
elif config['type'] == 4:
config['handle'] = self.axarr[ind]
config['handle'].vlines(0, 0, 0.01)
config['handle'].set_ylim([.5, len(config['ids'][0]) + .5])
config['handle'].set_ylabel('Neurons',
fontsize=self._fontsize-1, weight='bold')
config['handle'].set_xlabel('Time (s)',fontsize=self._fontsize-1, weight='bold')
config['handle'].set_xlim([0,self._data[LPU][var].shape[1]*dt])
config['handle'].axes.set_yticks([])
eps = np.finfo(float).eps
config['handle'].axes.set_xticks(\
np.linspace(0+self._start_times[LPU],
self._start_times[LPU]+self._maxt,11))
elif config['type'] == 6:
self.axarr[ind].axes.set_yticks([])
self.axarr[ind].axes.set_xticks([])
self.axarr[ind] = self.f.add_subplot(self._rows,
self._cols,
cnt,
projection='3d')
config['handle' ] = self.axarr[ind]
config['handle'].axes.set_yticks([])
config['handle'].axes.set_xticks([])
config['handle'].xaxis.set_ticks([])
config['handle'].yaxis.set_ticks([])
config['handle'].zaxis.set_ticks([])
if 'norm' not in config:
config['norm'] = Normalize(vmin=-70, vmax=0, clip=True)
elif config['norm'] == 'auto':
if self._data[LPU][var].shape[1] > 100:
config['norm'] = Normalize(vmin = np.min(self._data[LPU][var][config['ids'][0],100:]),
vmax = np.max(self._data[LPU][var][config['ids'][0],100:]),
clip = True)
else:
config['norm'] = Normalize(vmin = np.min(self._data[LPU][var][config['ids'][0],:]),
vmax = np.max(self._data[LPU][var][config['ids'][0],:]),
clip = True)
node_dict = self._graph[LPU].node
latpositions = config['lat']
longpositions = config['long']
xx = np.cos(longpositions) * np.sin(latpositions)
yy = np.sin(longpositions) * np.sin(latpositions)
zz = np.cos(latpositions)
config['positions'] = (xx, yy, zz)
colors = griddata(config['positions'], self._data[LPU][var][config['ids'][0],0],
self._dome_pos_flat, 'nearest').reshape(self._dome_arr_shape)
colors = config['norm'](colors).data
colors = np.tile(np.reshape(colors,
[self._dome_arr_shape[0],self._dome_arr_shape[1],1])
,[1,1,4])
colors[:,:,3] = 1.0
config['handle'].plot_surface(self._dome_pos[0], self._dome_pos[1],
self._dome_pos[2], rstride=1, cstride=1,
facecolors=colors, antialiased=False,
shade=False)
for key | |
""" @package forcebalance.parser Input file parser for ForceBalance jobs. Additionally, the location for all default options.
Although I will do my best to write good documentation,
for many programs the input parser becomes the most up-to-date
source for documentation. So this is a great place to write
lots of comments for those who implement new functionality.
There are two types of sections for options - GENERAL and TARGET.
Since there can be many fitting targets within a single job (i.e. we
may wish to fit water trimers and hexamers, which constitutes two
fitting targets) the input is organized into sections, like so:
$options\n
gen_option_1 Big\n
gen_option_2 Mao\n
$target\n
tgt_option_1 Sniffy\n
tgt_option_2 Schmao\n
$target\n
tgt_option_1 Nifty\n
tgt_option_2 Jiffy\n
$end
In this case, two sets of target options are generated in addition to the general option.
(Note: "Target" used to be called "Simulation". Backwards compatibility is maintained.)
Each option is meant to be parsed as a certain variable type.
- String option values are read in directly; note that only the first two words in the line are processed
- Some strings are capitalized when they are read in; this is mainly for function tables like OptTab and TgtTab
- List option types will pick up all of the words on the line and use them as values,
plus if the option occurs more than once it will aggregate all of the values.
- Integer and float option types are read in a pretty straightforward way
- Boolean option types are always set to true, unless the second word is '0', 'no', or 'false' (not case sensitive)
- Section option types are meant to treat more elaborate inputs, such
as the user pasting in output parameters from a previous job as input,
or a specification of internal coordinate system. I imagine that for
every section type I would have to write my own parser. Maybe a
ParsTab of parsing functions would work. :)
To add a new option, simply add it to the dictionaries below and give it a default value if desired.
If you add an entirely new type, make sure to implement the interpretation of that type in the parse_inputs function.
@author <NAME>
@date 11/2012
"""
from __future__ import absolute_import
from builtins import str
import os
import re
import sys
import itertools
import traceback
from .nifty import printcool, printcool_dictionary, which, isfloat
from copy import deepcopy
from collections import OrderedDict
from forcebalance.output import getLogger
logger = getLogger(__name__)
## Default general options.
## Note that the documentation is included in part of the key; this will aid in automatic doc-extraction. :)
## In the 5-tuple we have: Default value, priority (larger number means printed first), short docstring, description of scope, list of filter strings for pulling out pertinent targets (MakeInputFile.py)
gen_opts_types = {
'strings' : {"gmxpath" : (which('mdrun'), 60, 'Path for GROMACS executables (if not the default)', 'All targets that use GROMACS', ['GMX']),
"gmxsuffix" : ('', 60, 'The suffix of GROMACS executables', 'All targets that use GROMACS', ['GMX']),
"tinkerpath" : (which('testgrad'), 60, 'Path for TINKER executables (if not the default)', 'All targets that use TINKER', ['TINKER']),
"penalty_type" : ("L2", 100, 'Type of the penalty: L2, L1 or Box', 'All optimizations'),
"scan_vals" : (None, -100, 'Values to scan in the parameter space, given like this: -0.1:0.1:11', 'Job types scan_mvals and scan_pvals'),
"readchk" : (None, -50, 'Name of the restart file we read from', 'Restart jobtype "newton" with "writechk" set'),
"writechk" : (None, -50, 'Name of the restart file we write to (can be same as readchk)', 'Main optimizer'),
"ffdir" : ('forcefield', 100, 'Directory containing force fields, relative to project directory', 'All'),
"amoeba_pol" : (None, 0, 'The AMOEBA polarization type, either direct, mutual, or nonpolarizable.', 'Targets in OpenMM / TINKER that use the AMOEBA force field', ['OPENMM','TINKER']),
"amberhome" : (None, -10, 'Path to AMBER installation directory (leave blank to use AMBERHOME environment variable.', 'Targets that use AMBER', 'AMBER'),
},
'allcaps' : {"jobtype" : ("single", 200, 'The calculation type, defaults to a single-point evaluation of objective function.',
'All (important); choose "single", "gradient", "hessian", "newton" (Main Optimizer), "bfgs", "powell", "simplex", "anneal", "genetic", "conjugategradient", "scan_mvals", "scan_pvals", "fdcheck[gh]"'),
},
'lists' : {"forcefield" : ([], 200, 'The names of force fields, corresponding to directory forcefields/file_name.(itp,xml,prm,frcmod,mol2)', 'All (important)'),
"scanindex_num" : ([], -100, 'Numerical index of the parameter to scan over', 'Job types scan_mvals and scan_pvals'),
"scanindex_name" : ([], -100, 'Parameter name to scan over (should convert to a numerical index)', 'Job types scan_mvals and scan_pvals')
},
'ints' : {"maxstep" : (100, 50, 'Maximum number of steps in an optimization', 'Main Optimizer'),
"objective_history" : (2, 20, 'Number of good optimization steps to average over when checking the objective convergence criterion', 'Main Optimizer (jobtype "newton")'),
"wq_port" : (0, 0, 'The port number to use for Work Queue', 'Targets that use Work Queue (advanced usage)'),
"criteria" : (1, 160, 'The number of convergence criteria that must be met for main optimizer to converge', 'Main Optimizer'),
"rpmd_beads" : (0, -160, 'Number of beads in ring polymer MD (zero to disable)', 'Condensed phase property targets (advanced usage)', 'liquid_openmm'),
"zerograd" : (-1, 0, 'Set to a nonnegative number to turn on zero gradient skipping at that optimization step.', 'All'),
},
'bools' : {"backup" : (1, 10, 'Write temp directories to backup before wiping them'),
"writechk_step" : (1, -50, 'Write the checkpoint file at every optimization step'),
"retain_micro_outputs" : (1, 10, 'Whether to retain the output files of completed micro iterations'),
"converge_lowq" : (0, -50, 'Allow convergence on "low quality" steps'),
"have_vsite" : (0, -150, 'Specify whether there are virtual sites in the simulation (being fitted or not). Enforces calculation of vsite positions.', 'Experimental feature in ESP fitting', ['ABINITIO']),
"constrain_charge" : (0, 10, 'Specify whether to constrain the charges on the molecules.', 'Printing the force field (all calculations)'),
"print_gradient" : (1, 20, 'Print the objective function gradient at every step', 'Main Optimizer'),
"logarithmic_map" : (0, -150, 'Optimize in the space of log-variables', 'Creating the force field (all calculations, advanced usage)'),
"print_hessian" : (0, 20, 'Print the objective function Hessian at every step', 'Main Optimizer'),
"print_parameters" : (1, 20, 'Print the mathematical and physical parameters at every step', 'Main Optimizer'),
"normalize_weights": (1, 100, 'Normalize the weights for the fitting targets', 'Objective function (all calculations)'),
"verbose_options" : (0, 150, 'Set to false to suppress printing options that are equal to their defaults', 'Printing output'),
"rigid_water" : (0, -150, 'Perform calculations using rigid water molecules.', 'Currently used in AMOEBA parameterization (advanced usage)', ['OPENMM','TINKER']),
"constrain_h" : (0, -150, 'Perform calculations with contrained hydrogen bond lengths.', 'Used in liquid-OpenMM', ['OPENMM']),
"vsite_bonds" : (0, -150, 'Generate bonds from virtual sites to host atom bonded atoms.', 'Currently used in AMOEBA parameterization (advanced usage)', ['OPENMM','TINKER']),
"use_pvals" : (0, -150, 'Bypass the transformation matrix and use the physical parameters directly', 'Creating the force field; advanced usage, be careful.'),
"asynchronous" : (0, 0, 'Execute Work Queue tasks and local calculations asynchronously for improved speed', 'Targets that use Work Queue (advanced usage)'),
"reevaluate" : (None, 0, 'Re-evaluate the objective function and gradients when the step is rejected (for noisy objective functions).', 'Main Optimizer'),
"continue" : (0, 140, 'Continue the current run from where we left off (supports mid-iteration recovery).', 'Main Optimizer'),
"duplicate_pnames" : (0, -150, 'Allow duplicate parameter names (only if you know what you are doing!', 'Force Field Parser'),
},
'floats' : {"trust0" : (1e-1, 100, 'Levenberg-Marquardt trust radius; set to negative for nonlinear search', 'Main Optimizer'),
"mintrust" : (0.0, 10, 'Minimum trust radius (if the trust radius is tiny, then noisy optimizations become really gnarly)', 'Main Optimizer'),
"convergence_objective" : (1e-4, 100, 'Convergence criterion of objective function (in MainOptimizer this is the stdev of X2 over [objective_history] steps)', 'Main Optimizer'),
"convergence_gradient" : (1e-3, 100, 'Convergence criterion of gradient norm', 'Main Optimizer'),
"convergence_step" : (1e-4, 100, 'Convergence criterion of step size (just needs to fall below this threshold)', 'Main Optimizer'),
"eig_lowerbound" : (1e-4, 10, 'Minimum eigenvalue for applying steepest descent correction', 'Main Optimizer'),
"step_lowerbound" : (1e-6, 10, 'Optimization will "fail" if step falls below this size', 'Main Optimizer'),
"lm_guess" : (1.0, 9, 'Guess value for bracketing line search in trust radius | |
1
if counter > 10:
raise Exception('Not all velocity vectors are able to be fixed to origin. Are the all joints linked?')
def fix_acceleration(self):
"""
Fixes the accelerations of all the joints assuming that all vectors are defined locally, meaning that the
vector's length, angle, r_dot, omega, r_ddot, and alpha are known.
"""
origin = self.origin
origin.fix_acceleration(0, 0)
attached_to_origin = []
vectors = self.accelerations[:]
for v in vectors:
if v.joints[0] == origin:
v.fix_global_acceleration()
attached_to_origin.append(v)
elif v.joints[1] == origin:
v_rev = v.reverse()
v_rev.fix_global_acceleration()
attached_to_origin.append(v)
for v in attached_to_origin:
vectors.remove(v)
counter = 0
while not self.acceleration_is_fixed():
for v in vectors:
if self.acceleration_is_fixed():
break
for r in attached_to_origin:
sum_ = get_sum(r, v)
if sum_:
attached_to_origin.append(sum_)
sum_.fix_global_acceleration()
break
counter += 1
if counter > 10:
raise Exception('Not all velocity vectors are able to be fixed to origin. Are the all joints linked?')
def position_is_fixed(self):
"""
:return: True if all the positions of the joints are fixed.
"""
for joint in self.joints:
if not joint.position_is_fixed():
return False
return True
def velocity_is_fixed(self):
"""
:return: True if all the velocities of the joints are fixed.
"""
for joint in self.joints:
if not joint.velocity_is_fixed():
return False
return True
def acceleration_is_fixed(self):
"""
:return: True if all the accelerations of the joints are fixed.
"""
for joint in self.joints:
if not joint.acceleration_is_fixed():
return False
return True
def tables(self, position=False, velocity=False, acceleration=False, to_five=False):
"""
Prints a specified data table.
:param position: bool; Print position data if set to True
:param velocity: bool; Print velocity data if set to True
:param acceleration: bool; Print acceleration data if set to True
:param to_five: bool; Print all data to five decimal places if set to True.
"""
if position:
print('POSITION')
print('--------\n')
if not to_five:
mechanism_data = [[v, v.r, np.rad2deg(v.theta), v.x, v.y] for v in self.positions]
joint_data = [[j, j.x_pos, j.y_pos] for j in sorted(self.joints, key=lambda x: x.name)]
else:
mechanism_data = [[v, f'{v.r:.5f}', f'{np.rad2deg(v.theta):.5f}', f'{v.x:.5f}', f'{v.y:.5f}'] for v
in self.positions]
joint_data = [[j, f'{j.x_pos:.5f}', f'{j.y_pos:.5f}'] for j in
sorted(self.joints, key=lambda x: x.name)]
Data(mechanism_data, headers=['Vector', 'R', 'Theta', 'x', 'y']).print(table=True)
print('')
Data(joint_data, headers=['Joint', 'x', 'y']).print(table=True)
print('')
if velocity:
print('VELOCITY')
print('--------\n')
if not to_five:
mechanism_data = [[v, v.get_mag()[0], np.rad2deg(v.get_mag()[1]), v.x, v.y] for v in
self.velocities]
omega_slip_data = [[v, v.omega, v.r_dot] for v in self.velocities]
joint_data = [[j, j.vel_mag()[0], np.rad2deg(j.vel_mag()[1]), j.x_vel, j.y_vel] for j in
sorted(self.joints, key=lambda x: x.name)]
else:
mechanism_data = [[v, f'{v.get_mag()[0]:.5f}', f'{np.rad2deg(v.get_mag()[1]):.5f}', f'{v.x:.5f}',
f'{v.y:.5f}'] for v in self.velocities]
omega_slip_data = [[v, f'{v.omega:.5f}', f'{v.r_dot:.5f}'] for v in self.velocities]
joint_data = [[j, f'{j.vel_mag()[0]:.5f}', f'{np.rad2deg(j.vel_mag()[1]):.5f}', f'{j.x_vel:.5f}',
f'{j.y_vel:.5f}'] for j in sorted(self.joints, key=lambda x: x.name)]
Data(mechanism_data, headers=['Vector', 'Mag', 'Angle', 'x', 'y']).print(table=True)
print('')
Data(omega_slip_data, headers=['Vector', 'Omega', 'R_dot']).print(table=True)
print('')
Data(joint_data, headers=['Joint', 'Mag', 'Angle', 'x', 'y']).print(table=True)
print('')
if acceleration:
print('ACCELERATION')
print('------------\n')
if not to_five:
mechanism_data = [[v, v.get_mag()[0], np.rad2deg(v.get_mag()[1]), v.x, v.y] for v in
self.accelerations]
alpha_slip_data = [[v, v.alpha, v.r_ddot] for v in self.accelerations]
joint_data = [[j, j.acc_mag()[0], np.rad2deg(j.acc_mag()[1]), j.x_acc, j.y_acc] for j in
sorted(self.joints, key=lambda x: x.name)]
else:
mechanism_data = [
[v, f'{v.get_mag()[0]:.5f}', f'{np.rad2deg(v.get_mag()[1]):.5f}', f'{v.x:.5f}', f'{v.y:.5f}'] for v
in self.accelerations]
alpha_slip_data = [[v, f'{v.alpha:.5f}', f'{v.r_ddot:.5f}'] for v in self.accelerations]
joint_data = [[j, f'{j.acc_mag()[0]:.5f}', f'{np.rad2deg(j.acc_mag()[1]):.5f}', f'{j.x_acc:.5f}',
f'{j.y_acc:.5f}'] for j in sorted(self.joints, key=lambda x: x.name)]
Data(mechanism_data, headers=['Vector', 'Mag', 'Angle', 'x', 'y']).print(table=True)
print('')
Data(alpha_slip_data, headers=['Vector', 'Alpha', 'R_ddot']).print(table=True)
print('')
Data(joint_data, headers=['Joint', 'Mag', 'Angle', 'x', 'y']).print(table=True)
def plot(self, velocity=False, acceleration=False, show_joints=True, grid=True, cushion=1):
"""
Plots the instance of the mechanism; calculate() method must be called before calling this method.
:param velocity: bool; Plots velocity vectors if True
:param acceleration: bool; Plots acceleration vectors if True
:param show_joints: Adds joint labels to the plot (only if velocity=False and acceleration=False)
:param grid: bool; Add the grid if true.
:param cushion: int, float; The thickness of the cushion around the plot.
"""
fig, ax = plt.subplots()
ax.set_aspect('equal')
if grid:
ax.grid(zorder=1)
y_values = [j.y_pos for j in self.joints]
x_values = [j.x_pos for j in self.joints]
min_y, max_y = min(y_values), max(y_values)
min_x, max_x = min(x_values), max(x_values)
ax.set_xlim(min_x - cushion, max_x + cushion)
ax.set_ylim(min_y - cushion, max_y + cushion)
for v in self.positions:
if not v.show:
continue
j1, j2 = v.joints
v_x = (j1.x_pos, j2.x_pos)
v_y = (j1.y_pos, j2.y_pos)
ax.plot(v_x, v_y, **v.kwargs)
for j in self.joints:
if velocity:
ax.quiver(j.x_pos, j.y_pos, j.x_vel, j.y_vel, angles='xy', scale_units='xy', color='deepskyblue',
zorder=3)
if acceleration:
ax.quiver(j.x_pos, j.y_pos, j.x_acc, j.y_acc, angles='xy', scale_units='xy', color='orange', zorder=3)
if not velocity and not acceleration and show_joints:
ax.annotate(j.name, (j.x_pos, j.y_pos), size='large', zorder=5)
return fig, ax
def test(self):
"""
Checks the distances between joints.
"""
print('Distances:')
for v in self.vectors:
j1, j2 = v.joints
print(f'- {j1} to {j2}: {np.sqrt((j1.x_pos - j2.x_pos)**2 + (j1.y_pos - j2.y_pos)**2)}')
def calculate(self):
"""
Fixes the position of all the joints and vectors. Also fixes the velocity and acceleration data for all the
vectors and joints if vel and acc for the mechanism is given.
"""
fsolve(self.loops, self.guess[0], args=(self.pos,))
self.fix_position()
if self.vel is not None:
for v in self.vectors:
v.get = v.vel.get
v.update_velocity()
fsolve(self.loops, self.guess[1], args=(self.vel,))
self.fix_velocity()
if self.acc is not None:
assert self.vel is not None, "vel input not defined, but necessary to solve for accelerations."
for v in self.vectors:
v.get = v.acc.get
v.update_acceleration()
fsolve(self.loops, self.guess[2], args=(self.acc,))
self.fix_acceleration()
def iterate(self):
"""
Iterates over each pos, vel, and acc input, solving at each instance. Must be called before creating
an animation. This method must also only be used if pos, vel, and acc are ndarrays. pos argument is a
minimum requirement.
"""
assert isinstance(self.pos, np.ndarray), "pos input is not an ndarray."
guess1 = self.guess[0]
guess2, guess3 = None, None
if self.vel is not None:
guess2 = self.guess[1]
if self.vel is not None and self.acc is not None:
guess3 = self.guess[2]
for i in range(self.pos.shape[0]):
for v in self.vectors:
v.get = v.pos.get
pos = fsolve(self.loops, guess1, args=(self.pos[i],))
guess1 = pos
self.fix_position()
for v in self.vectors:
v.set_position_data(i)
for j in self.joints:
j.set_position_data(i)
if self.vel is not None:
for v in self.vectors:
v.get = v.vel.get
v.update_velocity()
vel = fsolve(self.loops, guess2, args=(self.vel[i],))
guess2 = vel
self.fix_velocity()
for v in self.vectors:
v.set_velocity_data(i)
for j in self.joints:
j.set_velocity_data(i)
if self.acc is not None:
assert self.vel is not None, "vel input not defined, but necessary to solve for accelerations."
for v in self.vectors:
v.get = v.acc.get
v.update_acceleration()
acc = fsolve(self.loops, guess3, args=(self.acc[i],))
guess3 = acc
self.fix_acceleration()
for v in self.vectors:
v.set_acceleration_data(i)
for j in self.joints:
j.set_acceleration_data(i)
self.clear_joints()
def clear_joints(self):
"""
Clears the joint data. Must be called between two different calls of calculate()
"""
for joint in self.joints:
joint.clear()
def get_bounds(self):
"""
:return: Two tuples; the first is the minimum and maximum x position of the mechanism, and the second is the
minimum and maximum y position of the mechanism.
"""
x_positions = [j.x_positions for j in self.joints]
y_positions = [j.y_positions for j in self.joints]
x_min = np.amin(x_positions)
x_max = np.amax(x_positions)
y_min = np.amin(y_positions)
y_max = np.amax(y_positions)
return (x_min, x_max), (y_min, y_max)
def get_animation(self, grid=True, cushion=1):
# Todo: A step value could be added here to adjust speed
"""
:param: cushion: int; Add a cushion around the plot.
:param: grid: bool; Add the grid if true.
:return: An animation, figure, and axes object.
"""
fig, ax = plt.subplots()
ax.set_aspect('equal')
x_limits, y_limits = self.get_bounds()
if grid:
ax.grid(zorder=1)
ax.set_xlim(x_limits[0] - cushion, x_limits[1] + cushion)
ax.set_ylim(y_limits[0] - cushion, y_limits[1] + cushion)
plot_dict = {}
for v in self.vectors:
if not v.pos.show:
continue
plot_dict.update({v.pos: ax.plot([], [], **v.pos.kwargs)[0]})
for j in self.joints:
if j.follow:
ax.plot(j.x_positions, j.y_positions, **j.kwargs)
def init():
for line in plot_dict.values():
line.set_data([], [])
return list(plot_dict.values())
def animate(i):
for vec, line in plot_dict.items():
j1, j2 = vec.joints
line.set_data((j1.x_positions[i], j2.x_positions[i]), (j1.y_positions[i], j2.y_positions[i]))
return list(plot_dict.values())
# noinspection PyTypeChecker
return FuncAnimation(fig, animate, frames=range(self.pos.shape[0]), interval=50, blit=True,
init_func=init), fig, ax
def __getitem__(self, item):
return self.dic[item]
def get_joints(names):
"""
:param names: str; A string with the joint names separated by spaces.
:return: A list of joint objects.
"""
return [Joint(ch) for ch in names.split()]
def get_sum(v1, v2):
"""
This function returns the sum of two vectors. It will reverse the vector(s) in | |
= Var(within=Binary,bounds=(0,1),initialize=0)
m.b598 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b599 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b600 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b601 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b602 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b603 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b604 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b605 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b606 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b607 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x625 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x631 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x674 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x675 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x676 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x677 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x678 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x679 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x680 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x681 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b682 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b683 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b684 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b685 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b686 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b687 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b688 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b689 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b690 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b691 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b692 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b693 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b694 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b695 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b696 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b697 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b698 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b699 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b700 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b701 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - 20*m.x2 - 17*m.x3 - 20*m.x12 - 21*m.x13 - 18*m.x20 - 20*m.x21 - 16*m.x44 - 19*m.x45 + 26*m.x52
+ 31*m.x53 + 30*m.x56 + 29*m.x57 - 20*m.x58 - 18*m.x59 + 2*m.x64 + 2*m.x65 + 3*m.x66 + 2*m.x67
+ 30*m.x68 + 31*m.x69 + 24*m.x70 + 22*m.x71 - 6*m.b546 - 4*m.b547 - 40*m.b548 - 35*m.b549
- 46*m.b550 - 39*m.b551 - 7*m.b554 - 4*m.b555 - 30*m.b556 - 25*m.b557 - 37*m.b558 - 29*m.b559
- 7*m.b562 - 5*m.b563 - 15*m.b564 - 5*m.b565 - 22*m.b566 - 10*m.b567 - 11*m.b570 - 8*m.b571
- 13*m.b572 - 8*m.b573 - 24*m.b574 - 16*m.b575 - 10*m.b578 - 7*m.b579 - 13*m.b580 - 8*m.b581
- 23*m.b582 - 15*m.b583 - 9*m.b586 - 9*m.b587 - 30*m.b588 - 30*m.b589 - 39*m.b590 - 39*m.b591
- 8*m.b594 - 7*m.b595 - 20*m.b596 - 15*m.b597 - 28*m.b598 - 22*m.b599 - 8*m.b602 - 6*m.b603
- 15*m.b604 - 10*m.b605 - 23*m.b606 - 16*m.b607 - m.x608 - m.x609 + 5*m.x620 + 10*m.x621
- 2*m.x630 - m.x631 + 80*m.x632 + 90*m.x633 + 285*m.x634 + 390*m.x635 + 290*m.x636 + 405*m.x637
- 5*m.b692 - 4*m.b693 - 8*m.b694 - 7*m.b695 - 6*m.b696 - 9*m.b697 - 10*m.b698 - 9*m.b699
- 6*m.b700 - 10*m.b701, sense=maximize)
m.c2 = Constraint(expr= m.x2 - 0.2*m.x72 == 0)
m.c3 = Constraint(expr= m.x3 - 0.2*m.x73 == 0)
m.c4 = Constraint(expr= m.x4 - 0.2*m.x74 == 0)
m.c5 = Constraint(expr= m.x5 - 0.2*m.x75 == 0)
m.c6 = Constraint(expr= m.x6 - 0.2*m.x76 == 0)
m.c7 = Constraint(expr= m.x7 - 0.2*m.x77 == 0)
m.c8 = Constraint(expr= m.x8 - 0.2*m.x78 == 0)
m.c9 = Constraint(expr= m.x9 - 0.2*m.x79 == 0)
m.c10 = Constraint(expr= m.x10 - 0.2*m.x80 == 0)
m.c11 = Constraint(expr= m.x11 - 0.2*m.x81 == 0)
m.c12 = Constraint(expr= m.x12 - 0.5*m.x82 == 0)
m.c13 = Constraint(expr= m.x13 - 0.5*m.x83 == 0)
m.c14 = Constraint(expr= m.x14 - 0.5*m.x84 == 0)
m.c15 = Constraint(expr= m.x15 - 0.5*m.x85 == 0)
m.c16 = Constraint(expr= m.x16 - 0.7*m.x86 == 0)
m.c17 = Constraint(expr= m.x17 - 0.7*m.x87 == 0)
m.c18 = Constraint(expr= m.x18 - 0.7*m.x88 == 0)
m.c19 = Constraint(expr= m.x19 - 0.7*m.x89 == 0)
m.c20 = Constraint(expr= m.x20 - 1.2*m.x90 == 0)
m.c21 = Constraint(expr= m.x21 - 1.2*m.x91 == 0)
m.c22 = Constraint(expr= m.x22 - 1.2*m.x92 == 0)
m.c23 = Constraint(expr= m.x23 - 1.2*m.x93 == 0)
m.c24 = Constraint(expr= m.x24 - 0.5*m.x94 == 0)
m.c25 = Constraint(expr= m.x25 - 0.5*m.x95 == 0)
m.c26 = Constraint(expr= m.x26 - 0.7*m.x96 == 0)
m.c27 = Constraint(expr= m.x27 - 0.7*m.x97 == 0)
m.c28 = Constraint(expr= m.x28 - 1.2*m.x98 == 0)
m.c29 = Constraint(expr= m.x29 - 1.2*m.x99 == 0)
m.c30 = Constraint(expr= m.x30 - 1.2*m.x100 == 0)
m.c31 = Constraint(expr= m.x31 - 1.2*m.x101 == 0)
m.c32 = Constraint(expr= m.x32 - 1.2*m.x102 == 0)
m.c33 = Constraint(expr= m.x33 - 1.2*m.x103 == 0)
m.c34 = Constraint(expr= m.x34 - 1.2*m.x104 == 0)
m.c35 = Constraint(expr= m.x35 - 1.2*m.x105 == 0)
m.c36 = Constraint(expr= m.x36 - 0.3*m.x106 == 0)
m.c37 = Constraint(expr= m.x37 - 0.3*m.x107 == 0)
m.c38 = Constraint(expr= m.x38 - 0.9*m.x108 == 0)
m.c39 = Constraint(expr= m.x39 - 0.9*m.x109 == 0)
m.c40 = Constraint(expr= m.x40 - 0.3*m.x110 == 0)
m.c41 = Constraint(expr= m.x41 - 0.3*m.x111 == 0)
m.c42 = Constraint(expr= m.x42 - 0.9*m.x112 == 0)
m.c43 = Constraint(expr= m.x43 - 0.9*m.x113 == 0)
m.c44 = Constraint(expr= m.x44 - 0.4*m.x114 == 0)
m.c45 = Constraint(expr= m.x45 - 0.4*m.x115 == 0)
m.c46 = Constraint(expr= m.x46 - 0.4*m.x116 == 0)
m.c47 = Constraint(expr= m.x47 - 0.4*m.x117 == 0)
m.c48 = Constraint(expr= m.x48 - 0.4*m.x118 == 0)
m.c49 = Constraint(expr= m.x49 - 0.4*m.x119 == 0)
m.c50 = Constraint(expr= m.x50 - 1.6*m.x120 == 0)
m.c51 = Constraint(expr= m.x51 - 1.6*m.x121 == 0)
m.c52 = Constraint(expr= m.x52 - 1.6*m.x122 == 0)
m.c53 = Constraint(expr= m.x53 - 1.6*m.x123 == 0)
m.c54 = Constraint(expr= m.x54 - 1.1*m.x124 == 0)
m.c55 = Constraint(expr= m.x55 - 1.1*m.x125 == 0)
m.c56 = Constraint(expr= m.x56 - 1.1*m.x126 == 0)
m.c57 = Constraint(expr= m.x57 - 1.1*m.x127 == 0)
m.c58 = Constraint(expr= m.x58 - 0.7*m.x128 == 0)
m.c59 = Constraint(expr= m.x59 - 0.7*m.x129 == 0)
m.c60 = Constraint(expr= m.x60 - 0.7*m.x130 == 0)
m.c61 = Constraint(expr= m.x61 - 0.7*m.x131 == 0)
m.c62 = Constraint(expr= m.x62 - 0.7*m.x132 == 0)
m.c63 = Constraint(expr= m.x63 - 0.7*m.x133 == 0)
m.c64 = Constraint(expr= m.x64 - 0.2*m.x134 == 0)
m.c65 = Constraint(expr= m.x65 - 0.2*m.x135 == 0)
m.c66 = Constraint(expr= m.x66 - 0.7*m.x136 == 0)
m.c67 = Constraint(expr= m.x67 - 0.7*m.x137 == 0)
m.c68 = Constraint(expr= m.x68 - 0.3*m.x138 == 0)
m.c69 = Constraint(expr= m.x69 - 0.3*m.x139 == 0)
m.c70 = Constraint(expr= m.x70 - 0.9*m.x140 == 0)
m.c71 = Constraint(expr= m.x71 - 0.9*m.x141 == 0)
m.c72 = Constraint(expr= m.x52 >= 1.2)
m.c73 = Constraint(expr= m.x53 >= 1.15)
m.c74 = Constraint(expr= m.x56 >= 1.2)
m.c75 = Constraint(expr= m.x57 >= 1.15)
m.c76 = Constraint(expr= m.x64 >= 1.1)
m.c77 = Constraint(expr= m.x65 >= 1.1)
m.c78 = Constraint(expr= m.x66 >= 1.1)
m.c79 = Constraint(expr= m.x67 >= 1.1)
m.c80 = Constraint(expr= m.x68 >= 1.4)
m.c81 = Constraint(expr= m.x69 >= 1.3)
m.c82 = Constraint(expr= m.x70 >= 1.3)
m.c83 = Constraint(expr= m.x71 >= 1.2)
m.c84 = Constraint(expr= m.x2 <= 55)
m.c85 = Constraint(expr= m.x3 <= 40)
m.c86 = Constraint(expr= m.x12 <= 46)
m.c87 = Constraint(expr= m.x13 <= 41)
m.c88 = Constraint(expr= m.x20 <= 45)
m.c89 = Constraint(expr= m.x21 <= 62)
m.c90 = Constraint(expr= m.x44 <= 54)
m.c91 = Constraint(expr= m.x45 <= 51)
m.c92 = Constraint(expr= m.x58 <= 40)
m.c93 = Constraint(expr= m.x59 <= 45)
m.c94 = Constraint(expr= m.x2 - m.x4 - m.x6 == 0)
m.c95 = Constraint(expr= m.x3 - m.x5 - m.x7 == 0)
m.c96 = Constraint(expr= m.x8 - m.x10 == 0)
m.c97 = Constraint(expr= m.x9 - m.x11 == 0)
m.c98 = Constraint(expr= m.x12 - m.x14 + m.x24 == 0)
m.c99 = Constraint(expr= m.x13 - m.x15 + m.x25 == 0)
m.c100 = Constraint(expr= m.x16 - m.x18 + m.x26 == 0)
m.c101 = Constraint(expr= m.x17 - m.x19 + m.x27 == 0)
m.c102 = Constraint(expr= m.x20 - m.x22 - m.x28 == 0)
m.c103 = Constraint(expr= m.x21 - m.x23 - m.x29 == 0)
m.c104 = Constraint(expr= m.x30 - m.x32 - m.x34 == 0)
m.c105 = Constraint(expr= m.x31 - m.x33 - m.x35 == 0)
m.c106 = Constraint(expr= m.x36 - m.x40 == 0)
m.c107 = Constraint(expr= m.x37 - m.x41 == 0)
m.c108 = Constraint(expr= m.x38 - m.x42 == 0)
m.c109 = Constraint(expr= m.x39 - m.x43 == 0)
m.c110 = Constraint(expr= m.x44 - m.x46 - m.x48 == 0)
m.c111 = Constraint(expr= m.x45 - m.x47 - m.x49 == 0)
m.c112 = Constraint(expr= m.x50 - m.x52 == 0)
m.c113 = Constraint(expr= m.x51 - m.x53 == 0)
m.c114 = Constraint(expr= m.x54 - m.x56 == 0)
m.c115 = Constraint(expr= m.x55 - m.x57 == 0)
m.c116 = Constraint(expr= m.x58 - m.x60 == 0)
m.c117 = Constraint(expr= m.x59 - m.x61 == 0)
m.c118 = Constraint(expr= m.x4 - m.x8 - m.x142 == 0)
m.c119 = Constraint(expr= m.x5 - m.x9 - m.x143 == 0)
m.c120 = Constraint(expr= m.x6 + m.x14 - m.x16 - m.x144 == 0)
m.c121 = Constraint(expr= m.x7 + m.x15 - m.x17 - m.x145 == 0)
m.c122 = Constraint(expr= m.x22 - m.x24 - m.x26 - m.x146 == 0)
m.c123 = Constraint(expr= m.x23 - m.x25 - m.x27 - m.x147 == 0)
m.c124 = Constraint(expr= m.x28 - m.x30 - m.x148 == 0)
m.c125 = Constraint(expr= m.x29 - m.x31 - m.x149 == 0)
m.c126 = Constraint(expr= m.x34 - m.x36 - m.x38 - m.x150 == 0)
m.c127 | |
<gh_stars>1-10
import numpy as np
import os, sys
import copy
#from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
#import palettable as pal
from matplotlib import cm
from astroML.plotting import hist
from scipy import stats, special
#from visualize import *
import mcmc
#set seed so we get predictable results
np.random.seed( 12 )
from mpl_toolkits.mplot3d import Axes3D
#Profiling / Timing
sys.path.insert(0, '../tools')
from timer import Timer
#sigma_T = 8.0/3.0 # Thomson cross-section = 8 \pi r_e^2 / 3, in units of \pi r_e^2
class Params:
mins = None
maxs = None
lens = None
def randab(a, b):
return a + (b-a)*np.random.rand()
#random 2d location
def randLoc(params):
x = randab(params.mins[0], params.maxs[0])
y = randab(params.mins[1], params.maxs[1])
return (x,y)
#random 3D direction in terms of angles phi and theta
def randUnitSphere():
vphi = randab( 0.0, 2.0*np.pi ) #azimuth
vthe = randab(-np.pi/2.0, np.pi/2.0 ) #latitude
return (vphi, vthe)
def sph2cart(vr, vphi, vthe):
vx = vr * np.sin(vthe) * np.cos(vphi)
vy = vr * np.sin(vthe) * np.sin(vphi)
vz = vr * np.cos(vthe)
return vx, vy, vz
def pol2cart(vr, vphi, vthe):
vx = vr * np.cos(vphi)
vy = vr * np.sin(vphi)
return vx, vy, 0.0
def randVel(vabs):
(vphi, vthe) = randUnitSphere()
vx, vy, vz = sph2cart(vabs, vphi, vthe)
#vx, vy, vz = pol2cart(vabs, vphi, vthe)
return vx, vy, vz
def unitVecX(vec):
return np.array([ 0.0, vec[0] ])
def unitVecY(vec):
return np.array([ 0.0, vec[1] ])
def unitVecZ(vec):
return np.array([ 0.0, vec[2] ])
def scaleVecX(vec):
return np.array([ 0.0, vec[0] ])
def scaleVecY(vec):
return np.array([ 0.0, vec[1] ])
def scaleVecZ(vec):
return np.array([ 0.0, vec[2] ])
def norm(vec):
return np.sqrt( vec[0]**2 + vec[1]**2 + vec[2]**2 )
#unit vector into direction a x b
def uCross(vecA, vecB):
vecC = np.cross(vecA, vecB)
if norm(vecC) == 0.0: return 0.0
return vecC / norm(vecC)
def printVec(vec, name):
print " {}: ({}, {}, {}) |v| = {} ".format(name, vec[0], vec[1], vec[2], norm(vec))
#######################################
#### Sample from blackbody distribution
def bbodySample(kTbb):
xi1=np.random.rand()
xi2=np.random.rand()
xi3=np.random.rand()
xi4=np.random.rand()
#print xi1
if 1.202*xi1 < 1:
xi=1.0
#print xi
else:
jj=2.0
sum=1.0 + jj**(-3)
while (1.202*xi1 <= sum) & (1.202*xi1 > sum+(jj+1)**(-3)):
jj=jj+1.0
sum = sum + jj**(-3)
xi=jj
#print xi
hv = - kTbb * np.log(xi2*xi3*xi4) / xi
return hv
### Sample from Maxwellian distribution
def maxwellSample(kTe):
done = False
if kTe < 0.29:
while (not done):
xi1=np.random.rand()
xi2=np.random.rand()
xip = -1.5 * np.log(xi1)
xi_limit = 0.151 * (1. + kTe * xip)**2 * xip * (2. + kTe * xip) * xi1
if xi2**2 < xi_limit:
pel = np.sqrt( kTe * xip * (2. + kTe * xip) )
done = True
else:
while (not done):
xi1=np.random.rand()
xi2=np.random.rand()
xi3=np.random.rand()
xi4=np.random.rand()
eta = - kTe * np.log( xi1 * xi2 * xi3)
eta = - kTe * np.log( xi1 * xi2 * xi3 * xi4)
if (zeta**2 - eta**2) > 1.0:
pel = eta
done = True
return pel
########################################
# Monte Carlo Compton scattering according to Sobol 1977
# The reference frame where the scattering is calculated is chosen as:
# k = Omega (direction of incoming photon)
# j = k x beta (incoming electron is in the (i,k) plane)
# i - complementing the right-handed basis
def comptonScatter(e, p, axs, plot):
# notes about units
# p.hv = hv/m_e c^2
# e.v = v/c
beta0 = np.array([ e.vx(), e.vy(), e.vz() ]) / e.vmod() # unit vector in direction of electron motion in lab frame
omega = np.array([ p.vx(), p.vy(), p.vz() ]) # unit vector in photon direction in lab frame
#choose scattering frame (i,j,k)
kvec = omega # k vector along photon direction omega
jvec = uCross(kvec, beta0)
ivec = uCross(jvec, kvec)
M = np.array([ ivec, jvec, kvec ]) # transformation matrix between lab frame and ijk frame
#
# To add exception when beta || omega
#
# if norm(jvec) == 0.0:
# jvec = uCross(kvec,beta0) ## TO BE CHANGED!
# ivec = uCross(jvec, kvec)
cosalpha = np.dot( kvec, beta0 ) # cosine of the angle between electron and k-vector
sinalpha = np.sqrt( 1.0 - cosalpha**2 ) # sine of the same angle
##################################################
mu = cosalpha # in ijk frame angle between e and ph equals to the angle between e and k vector
y = p.hv() * e.gamma() * (1.0 - mu*e.vmod() ) # initial photon and electron 4-product
#scatter
done = False
OmegaOmegap = 0.0
while not(done):
z1 = np.random.rand()
z2 = np.random.rand()
z3 = np.random.rand()
# draw new possible angles
mup = (e.vmod() + 2.0*z1 - 1.0)/(1.0 + e.vmod()*(2.0*z1 - 1.0)) # cos(alpha') = k \dot Omega'
phip = 2.0*np.pi*z2 # azimuthal angle calculated from (-j)
sinalphap = np.sqrt( 1.0 - mup**2 ) #
OmegaOmegap = mu * mup - sinalphap * np.sin(phip) * sinalpha # angle between incoming and outgoing photons
yp = y / (1.0 + p.hv() * (1.0 - OmegaOmegap) / ( e.gamma() * (1.0 - mup*e.vmod()) ))
YY = yp/y + (yp/y)**3 + (yp/y)**2 *( (1.0/yp - 1.0/y)**2 - 2.0*( 1.0/yp - 1.0/y) )
if YY > 2.0*z3:
done = True
#now we have scattered successfully
#new energy
hvp = yp / ( e.gamma()*(1.0 - mup*e.vmod()) )
#hvp2 = p.hv() * (1. - e.v() * mu) / (1. - e.v() * mup + p.hv() * (1. - OmegaOmegap)/ e.gamma()) # energy test
#print"hv = {}, hvp2 ={}".format(hvp,hvp2) # compare new photon energy calculated different ways
#print"hvp*p.hv*(1-OOp) = {}, y-yp = {}".format(hvp*p.hv()*(1.-OmegaOmegap),y-yp) # check if the quantities are conserved
#new direction in ijk coordinate system
Omegap_ijk = np.array( [mup*sinalpha + sinalphap * np.sin(phip) * mu, -sinalphap * np.cos(phip),
mup * cosalpha - sinalphap * np.sin(phip) * sinalpha] )
Omegap = np.dot( np.linalg.inv(M), Omegap_ijk ) # transferring back to lab system
phs = mcmc.photon( hvp, Omegap[0], Omegap[1], Omegap[2] )
# scattered electron parameters
gammaes = e.gamma() + p.hv() - hvp
vxes = ( e.gamma() * e.vx() + p.hv() * omega[0] - hvp * Omegap[0] ) / gammaes
vyes = ( e.gamma() * e.vy() + p.hv() * omega[1] - hvp * Omegap[1] ) / gammaes
vzes = ( e.gamma() * e.vz() + p.hv() * omega[2] - hvp * Omegap[2] ) / gammaes
es = mcmc.electron()
es.loadVelComponents( vxes, vyes, vzes )
ves= np.array([ vxes, vyes, vzes ])
#### Plotting 3D vectors of scattered particles
if plot:
axs[3].plot( scaleVecX(hvp*Omegap), scaleVecY(hvp*Omegap), scaleVecZ(hvp*Omegap), alpha=0.2, linestyle='solid', color='red')#, label='Omegap' )
axs[3].plot( scaleVecX(ves), scaleVecY(ves), scaleVecZ(ves), alpha=0.2, linestyle='dashed', color='blue')#, label='Omegap' )
return es, phs ### ! return scattered electron and photon
# return e, phs # return scattered photon and initial electron
# Lorentz transformation to electron rest frame (sign=1) or back to lab frame (sign=-1)
# Lorentz transformation of 4-vector vec1 to the electron frame with 4-vector vec2
# def lorentz(vec1, vec2, intt)
#
# gamma = vec2[0]
# eph = vec1[0]
#
# vec1spacial = (vec1[1], vec1[2], vec1[3])
# vec2spacial = (vec2[1], vec2[2], vec2[3])
#
# pv = matmul(np.transpose(vec1spacial),vec2spacial)
# t = (gamma - 1.0) * pv - eph * np.sqrt(gamma**2 - 1.0)
#
# eph = gamma*eph + pv * np.sqrt(gamma**2 - 1.0)
# vec1spacial = vec1spacial + t * vec2spacial
#
# vec1 = (eph, vec1spacial[0], vec1spacial[1], vec1spacial[2])
#
# return vec1
#
# # Transformation (rotation) of vec2 in the reference frame connected with vec1 to lab frame
# def transfToLab(vec1, vec2)
#
# t = np.sqrt( ve1[0]**2 + vec1[1]**2 )
#
# if t == 0.0:
# mtransf = np.array([0., 0., -1.], [0., 1., 0.], [1., 0., 0.])
# else:
# a=vec[0]
# b=vec[0]
# c=vec[0]
# mtransf = np.array(a, -a*c/t, b/t], [b, -b*c/p, -a/p], [c, p, 0.])
#
# vec2 = np.matmul(mtransf,vec2)
#
# return vec2
# Monte Carlo Compton scattering by Boris
def comptonScatterBoris(e, p, ax, plot):
lorentz(p,e,1) # transformation of the photon vector from lab to electron rest frame
# perform Compton scattering in the electron rest frame
#
omega0 # unit vector in the direction of the incoming photon
omegap # unit vector in the direction of the outgoing photon
vel= np.array( vxes, vyes, vzes )
omegap=transfToLab(omega0,omegap) # transformation (rotation) of the scattered photon vector to lab frame
vel=transfToLab(omega0,vel) # transformation (rotation) of the scattered electron vector to the | |
import numpy as np
import ast
import eccpy.tools as tools
def judge_fit(dfe, sLet, settings):
''' Analyses the fit of the experimental data to a sigmoidal curve.
Parameters
----------
dfe : pandas dataframe
Dataframe for evaluation of a single curve.
The index is a long list of parameter names, e.g. EC50, hill_constants, etc.
There are three columns. A) sLet (the relevant sample letter),
B) sLet_okay (column for labeling if data seems okay)
C) sLet_colour (column for defining text colour in output figures (red for low-quality)
sLet : string
Sample letter.
settings : pandas Series
User-defined settings derived from the settings excel sheet.
Includes all the relevant user-defined thresholds for the judge_fit scripts.
Returns
-------
dfe : pandas dataframe
Original dataframe, with added annotations in sLet_okay and sLet_colour columns concerning data quality.
Notes
-----
How are low-quality EC50 values identified?
1) Does the hillslope parameter from the fitted curve approach 0?
Indicates an exponential rather than a sigmoid curve.
2) Does the curve begin or end with a negative value on the y-axis?
Indicates an exponential curve rather than a sharp Z-shaped curve
3) Is the r_squared value below the user-defined threshold?
Indicates a poor fit to data.
4) Are the number of datapoints before (or after) the EC50 below a user-defined threshold?
Suggests that the dose range is not optimal, curve may not be sigmoidal.
5) Does the standard deviation of the datapoints to the left or right of the EC50 exceed a user-defined value?
Suggests a poor fit to data, or the presence of an outlier.
6) Does the Slope At X-axis Extremes (SAXE) exceed a user-defined threshold?
High slopes at the initial or final datapoints suggest non-sigmoidal curves, usually due to an
inappropriate range of doses used in that experiment.
7) Does the dose stepsize at the EC50 exceed a user-defined threshold?
The higher the dose steps, the less accurate the final EC50 value.
What happens when a low-quality EC50 value or curve is identified?
1) Each of the filters has a neighbouring "_okay" or "_colour" parameter
If the EC50 value seems to be okay, for that particular filter
"_okay" will be labelled True
"_colour" will be labelled as "k" (black)
If the EC50 value seems to be of low quality
"_okay" will be labelled False
"_colour" will be labelled as "r" (red)
2) There will be a final parameter for each EC50 value, "data_seems_okay"
if there are no False values in "_okay" column for all tested filters :
data_seems_okay = True
the EC50 will be coloured black in the output graph with the curve, and in the final barchart
with all data for that day/experiment
else if there is at least one False value in the "_okay" column :
data_seems_okay = False
The EC50 will be coloured red in the output graph with the curve, and in the final barchart
with all data for that day/experiment.
The EC50 value WILL BE IGNORED by the "analysis" scripts that compare results for different days/experiments.
'''
# setup cutoffs for judging data quality
# number datapoints neighbouring the EC50 that are excluded from the highdose and lowdose data selection
# set higher if you use a large number of dose concentrations
n_neighb = settings["n_neighb"]
# maximum standard deviation of the response datapoints at high dose concentration
max_std_resp_highdose_dp = settings["max_std_resp_highdose_dp"]
# maximum standard deviation of the response datapoints at low dose concentration
max_std_resp_lowdose_dp = settings["max_std_resp_lowdose_dp"]
min_flat_lowdose_dp = settings["min_flat_lowdose_dp"]
min_flat_highdose_dp = settings["min_flat_highdose_dp"]
# minimum rsquared of the fit from sigmoidal curve to the data
min_rsquared = settings["min_rsquared"]
# minimum acceptable dase concentration stepsizes. Smaller stepsizes give more accurate EC50 values!
max_acceptable_doseconc_stepsize_at_EC50 = settings["max_acceptable_doseconc_stepsize_at_EC50"]
max_recommended_doseconc_stepsize_at_EC50 = settings["max_recommended_doseconc_stepsize_at_EC50"]
# minimum hillslope of the fit from sigmoidal curve to the data (below 1, tends not to be sigmoidal)
weak_hillslope_range = ast.literal_eval(settings["weak_hillslope_range"])
# minimum value for the end of the curve, on the y-axis (below -1, tends not to be sigmoidal)
min_curve_lowresp = settings["min_curve_lowresp"]
# create a list that contains the database suffixes (_orig for original, _ful for fixed upper limit)
# datasets = ["_orig", "_ful"]
datasets = ast.literal_eval(settings["datasets"])
for d in datasets:
x = np.array(dfe.loc["x{}".format(d), sLet])
y = np.array(dfe.loc["y{}".format(d), sLet])
# identify the datapoints at high dose concentrations
dfe.loc["indices_highdose_datapoints{}".format(d),sLet] = np.where(x > dfe.loc["EC50{}".format(d), sLet])[0]
# remove the datapoint closest to EC50
dfe.loc["indices_highdose_datapoints_excl_nearest_EC50{}".format(d),sLet] = dfe.loc["indices_highdose_datapoints{}".format(d),sLet][n_neighb:]
# slice using the indices to yield the OD600 values for the highdose datapoints
dfe.loc["response_highdose_datapoints{}".format(d),sLet] = y[dfe.loc["indices_highdose_datapoints_excl_nearest_EC50{}".format(d),sLet]]
# count the number of highdose datapoint
dfe.loc["n_highdose_datapoints{}".format(d),sLet] = len(dfe.loc["response_highdose_datapoints{}".format(d),sLet])
# identify the lowdose datapoints, count and measure standard deviation
# identify the lowdose datapoints (x < EC50)
dfe.loc["indices_lowdose_datapoints{}".format(d),sLet] = np.where(x < dfe.loc["EC50{}".format(d), sLet])[0]
# exclude datapoint closest to the EC50
dfe.loc["indices_lowdose_datapoints_excl_nearest_EC50{}".format(d),sLet] = dfe.loc["indices_lowdose_datapoints{}".format(d),sLet][:-n_neighb]
# use index to select the y-axis (response) data
dfe.loc["response_lowdose_datapoints{}".format(d),sLet] = y[dfe.loc["indices_lowdose_datapoints_excl_nearest_EC50{}".format(d),sLet]]
# count the datapoints
dfe.loc["n_lowdose_datapoints{}".format(d),sLet] = len(dfe.loc["response_lowdose_datapoints{}".format(d),sLet])
# indices_highdose_datapoints_excl_nearest_EC50_orig = indices_highdose_datapoints_orig[1:]
# response_highdose_datapoints_orig = y_orig[indices_highdose_datapoints_excl_nearest_EC50_orig]
# # count the number of highdose datapoints
# dfe.loc["n_highdose_datapoints_orig",sLet] = len(response_highdose_datapoints_orig)
# # identify the ful datapoints at high dose concentrations, ignoring datapoint closest to EC50
# indices_highdose_datapoints_ful = np.where(x_orig > EC50_ful)[0]
# indices_highdose_datapoints_excl_nearest_EC50_ful = indices_highdose_datapoints_ful[1:]
# response_highdose_datapoints_ful = y_orig[indices_highdose_datapoints_excl_nearest_EC50_ful]
# # count the number of highdose datapoints
# dfe.loc["n_highdose_datapoints_ful",sLet] = len(response_highdose_datapoints_ful)
#######################################################################################################
# #
# Are the number of datapoints before (or after) the EC50 below a user-defined threshold? #
# #
#######################################################################################################
# judge whether the data contains enough high and lowdose datapoints
if dfe.loc["n_highdose_datapoints{}".format(d),sLet] >= min_flat_highdose_dp:
dfe.loc["n_highdose_datapoints{}".format(d),"%s_okay" % sLet] = True
dfe.loc["n_highdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
else:
dfe.loc["n_highdose_datapoints{}".format(d),"%s_okay" % sLet] = False
dfe.loc["n_highdose_datapoints{}".format(d),"%s_colour" % sLet] = 'r'
# evaluate as "okay" if number of highdose or lowdose datapoints is more than two
if dfe.loc["n_lowdose_datapoints{}".format(d),sLet] >= min_flat_lowdose_dp:
dfe.loc["n_lowdose_datapoints{}".format(d),"%s_okay" % sLet] = True
dfe.loc["n_lowdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
else:
dfe.loc["n_lowdose_datapoints{}".format(d),"%s_okay" % sLet] = False
dfe.loc["n_lowdose_datapoints{}".format(d),"%s_colour" % sLet] = 'r'
#############################################################################################################
# #
# Does the standard deviation of datapoints to the left or right of the EC50 exceed a user-defined value? #
# #
#############################################################################################################
# judge whether the standard deviation of the high and lowdose datapoints is acceptable
if dfe.loc["n_highdose_datapoints{}".format(d),sLet] > 1:
# calculate std of highdose datapoints
dfe.loc["std_resp_highdose_datapoints{}".format(d),sLet] = np.std(dfe.loc["response_highdose_datapoints{}".format(d),sLet])
# evaluate as "okay" if std of highdose datapoints is less than a cutoff value (max_std_resp_highdose_dp)
if dfe.loc["std_resp_highdose_datapoints{}".format(d),sLet] < max_std_resp_highdose_dp:
dfe.loc["std_resp_highdose_datapoints{}".format(d),"%s_okay" % sLet] = True
dfe.loc["std_resp_highdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
else:
dfe.loc["std_resp_highdose_datapoints{}".format(d),"%s_okay" % sLet] = False
dfe.loc["std_resp_highdose_datapoints{}".format(d),"%s_colour" % sLet] = 'r'
else:
# there is either insufficient lowresponse or highresponse datapoints(insuff_lowresp_dp, or insuff_highresp_dp).
# Replace std with 0, and colour black on the figure.
dfe.loc["std_resp_highdose_datapoints{}".format(d),sLet] = 0
dfe.loc["std_resp_highdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
if dfe.loc["n_lowdose_datapoints{}".format(d),sLet] > 1:
# calculate std of lowdose datapoints
dfe.loc["std_resp_lowdose_datapoints{}".format(d),sLet] = np.std(dfe.loc["response_lowdose_datapoints{}".format(d),sLet])
# evaluate as "okay" if std of lowdose datapoints is less than a cutoff value
if dfe.loc["std_resp_lowdose_datapoints{}".format(d),sLet] < max_std_resp_lowdose_dp:
dfe.loc["std_resp_lowdose_datapoints{}".format(d),"%s_okay" % sLet] = True
dfe.loc["std_resp_lowdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
else:
dfe.loc["std_resp_lowdose_datapoints{}".format(d),"%s_okay" % sLet] = False
dfe.loc["std_resp_lowdose_datapoints{}".format(d),"%s_colour" % sLet] = 'r'
else:
# there is either insufficient lowresponse or highresponse datapoints(insuff_lowresp_dp, or insuff_highresp_dp).
# Replace std with 0, and colour black.
dfe.loc["std_resp_lowdose_datapoints{}".format(d),sLet] = 0
dfe.loc["std_resp_lowdose_datapoints{}".format(d),"%s_colour" % sLet] = 'k'
#############################################################################################################
# #
# Does the dose stepsize at the EC50 exceed a user-defined threshold? #
# #
#############################################################################################################
# identify the tested dose concentration below the EC50
indices_lowdose_datapoints = np.where(x < dfe.loc["EC50{}".format(d),sLet])[0]
if indices_lowdose_datapoints.size != 0:
doseconc_before_EC50 = x[indices_lowdose_datapoints[-1]]
# identify the tested dose concentration after the EC50
doseconc_after_EC50 = x[dfe.loc["indices_highdose_datapoints{}".format(d),sLet][0]]
# add values to output dataframe, so that the plot can be annotated
dfe.loc["doseconc_steps_at_EC50{}".format(d),sLet] = (doseconc_before_EC50, doseconc_after_EC50)
# calculate the stepsize at the EC50. Smaller is better!
doseconc_stepsize_at_EC50 = doseconc_after_EC50 - doseconc_before_EC50
dfe.loc["doseconc_stepsize_at_EC50{}".format(d),sLet] = doseconc_stepsize_at_EC50
# evaluate as "okay" if the stepsize at the EC50 is smaller than the min acceptable value
if doseconc_stepsize_at_EC50 <= max_acceptable_doseconc_stepsize_at_EC50:
dfe.loc["doseconc_stepsize_at_EC50{}".format(d),"%s_okay" % sLet] = True
# if the stepsize is small, colour to dark red as a warning that the doseconc should | |
<reponame>columbia-robovision/dsr
import os.path as osp
import collections
import math
import os
import shutil
import cv2
import imageio
import numpy as np
import dominate
from dominate.tags import *
import queue
import threading
# Get rotation matrix from euler angles
def euler2rotm(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def transform_points(pts, transform):
# pts = [3xN] array
# transform: [3x4]
pts_t = np.dot(transform[0:3, 0:3], pts) + np.tile(transform[0:3, 3:], (1, pts.shape[1]))
return pts_t
def project_pts_to_2d(pts, camera_view_matrix, camera_intrisic):
# transformation from word to virtual camera
# camera_intrisic for virtual camera [ [f,0,0],[0,f,0],[0,0,1]] f is focal length
# RT_wrd2cam
pts_c = transform_points(pts, camera_view_matrix[0:3, :])
rot_algix = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0]])
pts_c = transform_points(pts_c, rot_algix)
coord_2d = np.dot(camera_intrisic, pts_c)
coord_2d[0:2, :] = coord_2d[0:2, :] / np.tile(coord_2d[2, :], (2, 1))
coord_2d[2, :] = pts_c[2, :]
return coord_2d
def project_pts_to_3d(color_image, depth_image, camera_intr, camera_pose):
W, H = depth_image.shape
cam_pts, rgb_pts = get_pointcloud(color_image, depth_image, camera_intr)
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
pts = world_pts.reshape([W, H, 3])
pts = np.transpose(pts, [2, 0, 1])
return pts
def get_pointcloud(color_img, depth_img, camera_intrinsics):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - camera_intrinsics[0, 2], depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - camera_intrinsics[1, 2], depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
# Reshape image into colors for 3D point cloud
rgb_pts_r = color_img[:, :, 0]
rgb_pts_g = color_img[:, :, 1]
rgb_pts_b = color_img[:, :, 2]
rgb_pts_r.shape = (im_h * im_w, 1)
rgb_pts_g.shape = (im_h * im_w, 1)
rgb_pts_b.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1)
rgb_pts = np.concatenate((rgb_pts_r, rgb_pts_g, rgb_pts_b), axis=1)
return cam_pts, rgb_pts
def get_heightmap(color_img, depth_img, cam_intrinsics, cam_pose, workspace_limits, heightmap_resolution):
# Compute heightmap size
heightmap_size = np.round(((workspace_limits[1][1] - workspace_limits[1][0]) / heightmap_resolution,
(workspace_limits[0][1] - workspace_limits[0][0]) / heightmap_resolution)).astype(int)
# Get 3D point cloud from RGB-D images
surface_pts, color_pts = get_pointcloud(color_img, depth_img, cam_intrinsics)
# Transform 3D point cloud from camera coordinates to robot coordinates
surface_pts = np.transpose(
np.dot(cam_pose[0:3, 0:3], np.transpose(surface_pts)) + np.tile(cam_pose[0:3, 3:], (1, surface_pts.shape[0])))
# Sort surface points by z value
sort_z_ind = np.argsort(surface_pts[:, 2])
surface_pts = surface_pts[sort_z_ind]
color_pts = color_pts[sort_z_ind]
# Filter out surface points outside heightmap boundaries
heightmap_valid_ind = np.logical_and(np.logical_and(np.logical_and(
np.logical_and(surface_pts[:, 0] >= workspace_limits[0][0], surface_pts[:, 0] < workspace_limits[0][1]),
surface_pts[:, 1] >= workspace_limits[1][0]), surface_pts[:, 1] < workspace_limits[1][1]),
surface_pts[:, 2] < workspace_limits[2][1])
surface_pts = surface_pts[heightmap_valid_ind]
color_pts = color_pts[heightmap_valid_ind]
# Create orthographic top-down-view RGB-D heightmaps
color_heightmap_r = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
color_heightmap_g = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
color_heightmap_b = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
depth_heightmap = np.zeros(heightmap_size)
heightmap_pix_x = np.floor((surface_pts[:, 0] - workspace_limits[0][0]) / heightmap_resolution).astype(int)
heightmap_pix_y = np.floor((surface_pts[:, 1] - workspace_limits[1][0]) / heightmap_resolution).astype(int)
color_heightmap_r[heightmap_pix_y, heightmap_pix_x] = color_pts[:, [0]]
color_heightmap_g[heightmap_pix_y, heightmap_pix_x] = color_pts[:, [1]]
color_heightmap_b[heightmap_pix_y, heightmap_pix_x] = color_pts[:, [2]]
color_heightmap = np.concatenate((color_heightmap_r, color_heightmap_g, color_heightmap_b), axis=2)
depth_heightmap[heightmap_pix_y, heightmap_pix_x] = surface_pts[:, 2]
z_bottom = workspace_limits[2][0]
depth_heightmap = depth_heightmap - z_bottom
depth_heightmap[depth_heightmap < 0] = 0
# depth_heightmap[depth_heightmap == -z_bottom] = np.nan
return color_heightmap, depth_heightmap
def mkdir(path, clean=False):
if clean and os.path.exists(path):
shutil.rmtree(path)
if not os.path.exists(path):
os.makedirs(path)
def imresize(im, dsize, cfirst=False):
if cfirst:
im = im.transpose(1, 2, 0)
im = cv2.resize(im, dsize=dsize)
if cfirst:
im = im.transpose(2, 0, 1)
return im
def imretype(im, dtype):
im = np.array(im)
if im.dtype in ['float', 'float16', 'float32', 'float64']:
im = im.astype(np.float)
elif im.dtype == 'uint8':
im = im.astype(np.float) / 255.
elif im.dtype == 'uint16':
im = im.astype(np.float) / 65535.
else:
raise NotImplementedError('unsupported source dtype: {0}'.format(im.dtype))
assert np.min(im) >= 0 and np.max(im) <= 1
if dtype in ['float', 'float16', 'float32', 'float64']:
im = im.astype(dtype)
elif dtype == 'uint8':
im = (im * 255.).astype(dtype)
elif dtype == 'uint16':
im = (im * 65535.).astype(dtype)
else:
raise NotImplementedError('unsupported target dtype: {0}'.format(dtype))
return im
def imwrite(path, obj):
if not isinstance(obj, (collections.Sequence, collections.UserList)):
obj = [obj]
writer = imageio.get_writer(path)
for im in obj:
im = imretype(im, dtype='uint8').squeeze()
if len(im.shape) == 3 and im.shape[0] == 3:
im = np.transpose(im, (1, 2, 0))
writer.append_data(im)
writer.close()
def flow2im(flow, max=None, dtype='float32', cfirst=False):
flow = np.array(flow)
if np.ndim(flow) == 3 and flow.shape[0] == 2:
x, y = flow[:, ...]
elif np.ndim(flow) == 3 and flow.shape[-1] == 2:
x = flow[..., 0]
y = flow[..., 1]
else:
raise NotImplementedError(
'unsupported flow size: {0}'.format(flow.shape))
rho, theta = cv2.cartToPolar(x, y)
if max is None:
max = np.maximum(np.max(rho), 1e-6)
hsv = np.zeros(list(rho.shape) + [3], dtype=np.uint8)
hsv[..., 0] = theta * 90 / np.pi
hsv[..., 1] = 255
hsv[..., 2] = np.minimum(rho / max, 1) * 255
im = cv2.cvtColor(hsv, code=cv2.COLOR_HSV2RGB)
im = imretype(im, dtype=dtype)
if cfirst:
im = im.transpose(2, 0, 1)
return im
def draw_arrow(image, action, direction_num=8, heightmap_pixel_size=0.004):
# image: [W, H, 3] (color image) or [W, H] (depth image)
def put_in_bound(val, bound):
# output: 0 <= val < bound
val = min(max(0, val), bound - 1)
return val
img = image.copy()
if isinstance(action, tuple):
x_ini, y_ini, direction = action
else:
x_ini, y_ini, direction = action['2'], action['1'], action['0']
pushing_distance = 0.15
angle = direction / direction_num * 2 * np.pi
x_end = put_in_bound(int(x_ini + pushing_distance / heightmap_pixel_size * np.cos(angle)), image.shape[1])
y_end = put_in_bound(int(y_ini + pushing_distance / heightmap_pixel_size * np.sin(angle)), image.shape[0])
if img.shape[0] == 1:
# gray img, white arrow
img = imretype(img[:, :, np.newaxis], 'uint8')
cv2.arrowedLine(img=img, pt1=(x_ini, y_ini), pt2=(x_end, y_end), color=255, thickness=2, tipLength=0.2)
elif img.shape[2] == 3:
# rgb img, red arrow
cv2.arrowedLine(img=img, pt1=(x_ini, y_ini), pt2=(x_end, y_end), color=(255, 0, 0), thickness=2, tipLength=0.2)
return img
def multithreading_exec(num, q, fun, blocking=True):
"""
Multi-threading Execution
:param num: number of threadings
:param q: queue of args
:param fun: function to be executed
:param blocking: blocking or not (default True)
"""
class Worker(threading.Thread):
def __init__(self, q, fun):
super().__init__()
self.q = q
self.fun = fun
self.start()
def run(self):
while True:
try:
args = self.q.get(block=False)
self.fun(*args)
self.q.task_done()
except queue.Empty:
break
thread_list = [Worker(q, fun) for i in range(num)]
if blocking:
for t in thread_list:
if t.is_alive():
t.join()
def html_visualize(web_path, data, ids, cols, others=[], title='visualization', threading_num=10):
"""
:param web_path: (str) directory to save webpage. It will clear the old data!
:param data: (dict of data).
key: {id}_{col}.
value: figure or text
- figure: ndarray --> .png or [ndarrays,] --> .gif
- text: str or [str,]
:param ids: (list of str) name of each row
:param cols: (list of str) name of each column
:param others: (list of dict) other figures
'name': str, name of the data, visualize using h2()
'data': string or ndarray(image)
'height': int, height of the image (default 256)
:param title: (str) title of the webpage
:param threading_num: number of threadings for imwrite (default 10)
"""
figure_path = os.path.join(web_path, 'figures')
mkdir(web_path, clean=True)
mkdir(figure_path, clean=True)
q = queue.Queue()
for key, value in data.items():
if isinstance(value, np.ndarray):
q.put((os.path.join(figure_path, key + '.png'), value))
if not isinstance(value, list) and isinstance(value[0], np.ndarray):
q.put((os.path.join(figure_path, key + '.gif'), value))
multithreading_exec(threading_num, q, imwrite)
with dominate.document(title=title) as web:
dominate.tags.h1(title)
with dominate.tags.table(border=1, style='table-layout: fixed;'):
with dominate.tags.tr():
with dominate.tags.td(style='word-wrap: break-word;', halign='center', align='center', width='64px'):
dominate.tags.p('id')
for col in cols:
with dominate.tags.td(style='word-wrap: break-word;', halign='center', align='center', ):
dominate.tags.p(col)
for id in ids:
with dominate.tags.tr():
bgcolor = 'F1C073' if id.startswith('train') else 'C5F173'
with dominate.tags.td(style='word-wrap: break-word;', halign='center', align='center',
bgcolor=bgcolor):
for part in id.split('_'):
dominate.tags.p(part)
for col in cols:
with dominate.tags.td(style='word-wrap: break-word;', halign='center', align='top'):
value = data.get(f'{id}_{col}', None)
if isinstance(value, str):
dominate.tags.p(value)
elif isinstance(value, list) and isinstance(value[0], str):
for v in value:
dominate.tags.p(v)
else:
dominate.tags.img(style='height:128px',
src=os.path.join('figures', '{}_{}.png'.format(id, col)))
for idx, other in enumerate(others):
dominate.tags.h2(other['name'])
if isinstance(other['data'], str):
dominate.tags.p(other['data'])
else:
imwrite(os.path.join(figure_path, '_{}_{}.png'.format(idx, other['name'])), other['data'])
dominate.tags.img(style='height:{}px'.format(other.get('height', 256)),
src=os.path.join('figures', '_{}_{}.png'.format(idx, other['name'])))
with open(os.path.join(web_path, 'index.html'), 'w') as fp:
fp.write(web.render())
def mask_visualization(mask):
# mask: numpy array, [B, K, W, H, D] or [B, W, H, D]
# Red, Green, Blue, Yellow, Purple
colors = [(255, 87, 89), | |
flag):
cnt += 1
flag, scattered_o, scattered_d, scattered_tm = scatter(m, ray(scattered_o, scattered_d, scattered_tm), p, normal, front_face, u, v)
res *= get_albedo(material_kind[m], material_extra[m], material_albedo[m], u, v, p)
t, p, front_face, normal, u, v, m = bvh_hit(ray(scattered_o, scattered_d), 0.001, float('inf'))
if (cnt > max_depth):
res = ti.Vector([0, 0, 0])
if (material_kind[m] != 3):
unit_direction = scattered_d.normalized()
t = 0.5 * (unit_direction[1] + 1.0)
res *= (1.0 - t) * ti.Vector([1.0, 1.0, 1.0]) + t * ti.Vector([0.5, 0.7, 1.0])
return res
img_filename = []
img = ti.Vector.field(3, float, shape=(TEXTURE_MAXX, TEXTURE_MAXY))
def add_texture(filename):
@ti.kernel
def cpy_img(xx:ti.i32, yy:ti.i32):
for i, j in ti.ndrange(xx, yy):
textures[textures_cnt[None], i, j] = img[i, j]
if img_filename.count(filename) > 0:
return img_filename.index(filename)
img_filename.append(filename)
nimg = ti.imread(filename)
img.from_numpy(nimg)
textures_size[textures_cnt[None]][0], textures_size[textures_cnt[None]][1] = nimg.shape[0], nimg.shape[1]
cpy_img(nimg.shape[0], nimg.shape[1])
textures_cnt[None] += 1
return textures_cnt[None] - 1
mtl_id = []
mtl_name = []
def load_mtl(filename):
fn = ""
tx_nm = ""
first_mtl = True
albedo = ti.Vector([0.0, 0.0, 0.0])
for line in open(filename, "r"):
if line.startswith('#'):continue
values = line.split()
if (not values): continue
if (values[0] == 'newmtl'):
if not first_mtl:
mtl_name.append(fn)
mtl_id.append(materials_add(6, albedo, add_texture(tx_nm)))
else: first_mtl = False
fn = values[1]
elif values[0] == 'map_Kd':
tx_nm = values[1]
elif values[0] == 'Ka':
Ka = list(map(float, values[1:]))
albedo = ti.Vector([Ka[0], Ka[1], Ka[2]])
mtl_name.append(fn)
mtl_id.append(materials_add(6, albedo, add_texture(tx_nm)))
def load_obj(filename, d, tx, ty, tz):
mtl = materials_add(0, ti.Vector([0.75, 0.75, 0.75]), 1.0)
st_tri_cnt = tri_pos_cnt[None] - 1
st_uv_cnt = tri_uv_cnt[None] - 1
st_norm_cnt = tri_norm_cnt[None] - 1
for line in open(filename, "r"):
if (line.startswith('#')): continue
values = line.split()
if (not values): continue
if (values[0] == 'v'):
v = list(map(float, values[1:4]))
tri_pos[tri_pos_cnt[None]] = ti.Vector([d * v[0] + tx, d * v[1] + ty, d * v[2] + tz])
tri_pos_cnt[None] += 1
elif (values[0] == 'vn'):
v = list(map(float, values[1:4]))
tri_norm[tri_norm_cnt[None]] = ti.Vector([v[0], v[1], v[2]])
tri_norm_cnt[None] += 1
elif (values[0] == 'vt'):
v = list(map(float, values[1:3]))
tri_uv[tri_uv_cnt[None]] = ti.Vector([v[0], v[1]])
tri_uv_cnt[None] += 1
elif values[0] in ('usemtl', 'usemat'):
mtl = mtl_id[mtl_name.index(values[1])]
elif (values[0] == 'mtllib'):
load_mtl(values[1])
elif (values[0] == 'f'):
face = []
uv = []
norm = []
for v in values[1:]:
w = v.split('/')
face.append(w[0])
if (len(w) > 1): uv.append(w[1])
if (len(w) > 2): norm.append(w[2])
f = list(map(int, face))
if (len(norm) == 0):
if (len(uv) == 0):
world.add(3, tri_id(f[0] + st_tri_cnt, f[1] + st_tri_cnt, f[2] + st_tri_cnt, -1, -1, -1, -1, -1, -1, mtl))
else:
uuvv = list(map(int, uv))
world.add(3, tri_id(f[0] + st_tri_cnt, f[1] + st_tri_cnt, f[2] + st_tri_cnt, -1, -1, -1, uuvv[0] + st_uv_cnt, uuvv[1] + st_uv_cnt, uuvv[2] + st_uv_cnt, mtl))
else:
nm = list(map(int, norm))
uuvv = list(map(int, uv))
world.add(3, tri_id(f[0] + st_tri_cnt, f[1] + st_tri_cnt, f[2] + st_tri_cnt, nm[0] + st_norm_cnt, nm[1] + st_norm_cnt, nm[2] + st_norm_cnt, uuvv[0] + st_uv_cnt, uuvv[1] + st_uv_cnt, uuvv[2] + st_uv_cnt, mtl))
def gen_objects():
material_ground = materials_add(4, ti.Vector([0.2, 0.3, 0.1]), 1.0)
material_left_wall = materials_add(0, ti.Vector([0.0, 0.6, 0.0]), 1.0)
material_right_wall = materials_add(0, ti.Vector([0.6, 0.0, 0.0]), 1.0)
material_center = materials_add(6, ti.Vector([1.0, 1.0, 1.0]), add_texture('earthmap.jpg'))
material_left = materials_add(2, ti.Vector([1.0, 1.0, 1.0]), 1.5)
material_right = materials_add(1, ti.Vector([0.6, 0.8, 0.8]), 0.2)
material_light = materials_add(3, ti.Vector([10.0, 10.0, 10.0]), 1.0)
world.add(1, sphere(ti.Vector([0, -0.2, -1.5]), 0.3, material_center))
world.add(1, sphere(ti.Vector([0.7, 0.0, -0.5]), 0.5, material_left))
world.add(1, sphere(ti.Vector([-0.8, 0.2, -1.0]), 0.7, material_right))
tri_pos[0] = ti.Vector([0.5, 2.49, -1.0])
tri_pos[1] = ti.Vector([0.5, 2.49, 0.0])
tri_pos[2] = ti.Vector([-0.5, 2.49, -1.0])
tri_pos[3] = ti.Vector([-0.5, 2.49, 0.0])
tri_norm[0] = ti.Vector([0, -1.0, 0])
tri_norm[1] = ti.Vector([0, -1.0, 0])
tri_norm[2] = ti.Vector([0, -1.0, 0])
tri_norm[3] = ti.Vector([0, -1.0, 0])
world.add(3, tri_id(0, 1, 2, 0, 1, 2, 0, 1, 2, material_light))
world.add(3, tri_id(1, 2, 3, 1, 2, 3, 1, 2, 3, material_light))
tri_pos[4] = ti.Vector([1.5, 2.5, -2])
tri_pos[5] = ti.Vector([1.5, 2.5, 1])
tri_pos[6] = ti.Vector([-1.5, 2.5, -2])
tri_pos[7] = ti.Vector([-1.5, 2.5, 1])
tri_norm[4] = ti.Vector([0, -1.0, 0])
tri_norm[5] = ti.Vector([0, -1.0, 0])
tri_norm[6] = ti.Vector([0, -1.0, 0])
tri_norm[7] = ti.Vector([0, -1.0, 0])
world.add(3, tri_id(4, 5, 6, 4, 5, 6, 4, 5, 6, material_ground))
world.add(3, tri_id(5, 6, 7, 5, 6, 7, 5, 6, 7, material_ground))
tri_pos[8] = ti.Vector([1.5, -0.5, 1])
tri_pos[9] = ti.Vector([1.5, 2.5, 1])
tri_pos[10] = ti.Vector([-1.5, -0.5, 1])
tri_pos[11] = ti.Vector([-1.5, 2.5, 1])
tri_norm[8] = ti.Vector([0, 0, -1.0])
tri_norm[9] = ti.Vector([0, 0, -1.0])
tri_norm[10] = ti.Vector([0, 0, -1.0])
tri_norm[11] = ti.Vector([0, 0, -1.0])
world.add(3, tri_id(8, 9, 10, 8, 9, 10, 8, 9, 10, material_ground))
world.add(3, tri_id(9, 10, 11, 9, 10, 11, 9, 10, 11, material_ground))
tri_pos[12] = ti.Vector([1.5, -0.5, -2])
tri_pos[13] = ti.Vector([1.5, -0.5, 1])
tri_pos[14] = ti.Vector([-1.5, -0.5, -2])
tri_pos[15] = ti.Vector([-1.5, -0.5, 1])
tri_norm[12] = ti.Vector([0, 1.0, 0])
tri_norm[13] = ti.Vector([0, 1.0, 0])
tri_norm[14] = ti.Vector([0, 1.0, 0])
tri_norm[15] = ti.Vector([0, 1.0, 0])
world.add(3, tri_id(12, 13, 14, 12, 13, 14, 12, 13, 14, material_ground))
world.add(3, tri_id(13, 14, 15, 13, 14, 15, 13, 14, 15, material_ground))
tri_pos[16] = ti.Vector([1.5, 2.5, -2])
tri_pos[17] = ti.Vector([1.5, 2.5, 1])
tri_pos[18] = ti.Vector([1.5, -0.5, -2])
tri_pos[19] = ti.Vector([1.5, -0.5, 1])
tri_norm[16] = ti.Vector([-1.0, 0.0, 0])
tri_norm[17] = ti.Vector([-1.0, 0.0, 0])
tri_norm[18] = ti.Vector([-1.0, 0.0, 0])
tri_norm[19] = ti.Vector([-1.0, 0.0, 0])
world.add(3, tri_id(16, 17, 18, 16, 17, 18, 16, 17, 18, material_left_wall))
world.add(3, tri_id(17, 18, 19, 17, 18, 19, 17, 18, 19, material_left_wall))
tri_pos[20] = ti.Vector([-1.5, 2.5, -2])
tri_pos[21] = ti.Vector([-1.5, 2.5, 1])
tri_pos[22] = ti.Vector([-1.5, -0.5, -2])
tri_pos[23] = ti.Vector([-1.5, -0.5, 1])
tri_norm[20] = ti.Vector([1.0, 0.0, 0])
tri_norm[21] = ti.Vector([1.0, 0.0, 0])
tri_norm[22] = ti.Vector([1.0, 0.0, 0])
tri_norm[23] = ti.Vector([1.0, 0.0, 0])
world.add(3, tri_id(20, 21, 22, 20, 21, 22, 20, 21, 22, material_right_wall))
world.add(3, tri_id(21, 22, 23, 21, 22, 23, 21, 22, 23, material_right_wall))
tri_pos_cnt[None], tri_norm_cnt[None], tri_uv_cnt[None] = 24, 24, 24
load_obj('assets/bunny.obj', 0.1, 0.5, -0.5, -1.5)
print(tri_pos_cnt[None])
print("Generated Objects")
@ti.kernel
def paint(cnt : int):
for i, j in pixels:
col = ti.Vector.zero(float, 3)
for k in range(samples_per_pixel):
(u, v) = ((i + ti.random()) / image_width, (j + ti.random()) / image_height)
r = cam.get_ray(u, v)
col += ray_color(r, world)
col /= samples_per_pixel
radience[i, j] += col
pixels[i, j] = ti.sqrt(radience[i, j] / ti.cast(cnt, float))
gui = ti.GUI("Tiny Ray Tracer", res = (image_width, image_height))
pixels = ti.Vector.field(3, dtype = float, shape = (image_width, image_height))
radience = ti.Vector.field(3, dtype = float, shape = (image_width, image_height))
gen_perlin_noise()
gen_objects()
pur_objs_cnt[None] = objs_cnt[None]
bvh_tree.append(bvh_node(0.0, 1.0))
build_bvh_tree(0, world.objects)
bvh_init(0, -1)
print("Built BVH")
cam = camera()
cnt = 0
is_recording = False
result_dir = "./output"
video_manager = ti.VideoManager(output_dir = result_dir, framerate = 20, automatic_build = False)
while gui.running:
if gui.get_event(ti.GUI.PRESS):
if gui.event.key == ti.GUI.LMB:
x, y = gui.get_cursor_pos()
lookfrom[None][0] = x * 4.0 - 2.0
lookfrom[None][1] = y * 2.0 - 0.5
print("Lookfrom change to ", lookfrom[None])
look_v = ti.Vector([lookfrom[None][0] - lookat[None][0], lookfrom[None][1] - lookat[None][1], lookfrom[None][2] - lookat[None][2]])
dist_to_focus[None] = ti.sqrt(look_v.dot(look_v))
cnt = 0
radience.fill(0)
cam.reset_view()
elif gui.event.key == ti.GUI.LEFT:
look_v_t = ti.Vector([look_v[0] * ti.cos(look_alpha) - look_v[2] * ti.sin(look_alpha), look_v[1], look_v[0] * ti.sin(look_alpha) + look_v[2] * ti.cos(look_alpha)])
lookfrom[None][0], lookfrom[None][1], lookfrom[None][2] = look_v_t[0] + lookat[None][0], look_v_t[1] + lookat[None][1], look_v_t[2] + lookat[None][2]
print("Lookfrom change to ", lookfrom[None])
look_v = ti.Vector([lookfrom[None][0] - lookat[None][0], lookfrom[None][1] - lookat[None][1], lookfrom[None][2] - lookat[None][2]])
dist_to_focus[None] = ti.sqrt(look_v.dot(look_v))
cnt = 0
radience.fill(0)
cam.reset_view()
elif gui.event.key == ti.GUI.RIGHT:
look_v_t = ti.Vector([look_v[0] * ti.cos(look_alpha) + look_v[2] * ti.sin(look_alpha), look_v[1], - look_v[0] * ti.sin(look_alpha) + look_v[2] * ti.cos(look_alpha)])
lookfrom[None][0], lookfrom[None][1], lookfrom[None][2] = look_v_t[0] + lookat[None][0], look_v_t[1] + lookat[None][1], look_v_t[2] + lookat[None][2]
print("Lookfrom change to ", lookfrom[None])
look_v = ti.Vector([lookfrom[None][0] - lookat[None][0], lookfrom[None][1] - lookat[None][1], lookfrom[None][2] - lookat[None][2]])
dist_to_focus[None] = ti.sqrt(look_v.dot(look_v))
cnt = 0
radience.fill(0)
cam.reset_view()
elif gui.event.key == ti.GUI.UP:
tmp_xz = ti.sqrt(look_v[0] * look_v[0] + look_v[2] * look_v[2])
tmp_xz_t = ti.cos(look_alpha) * tmp_xz - ti.sin(look_alpha) * look_v[1]
look_v_t = ti.Vector([look_v[0] * tmp_xz_t / tmp_xz, ti.sin(look_alpha) * tmp_xz + ti.cos(look_alpha) * look_v[1], look_v[2] * tmp_xz_t / tmp_xz])
lookfrom[None][0], lookfrom[None][1], lookfrom[None][2] = look_v_t[0] + lookat[None][0], look_v_t[1] + lookat[None][1], look_v_t[2] + lookat[None][2]
print("Lookfrom change to ", lookfrom[None])
look_v = ti.Vector([lookfrom[None][0] - lookat[None][0], lookfrom[None][1] - lookat[None][1], lookfrom[None][2] - lookat[None][2]])
dist_to_focus[None] = ti.sqrt(look_v.dot(look_v))
cnt = 0
radience.fill(0)
cam.reset_view()
elif gui.event.key == ti.GUI.DOWN:
tmp_xz = ti.sqrt(look_v[0] * look_v[0] + look_v[2] * look_v[2])
tmp_xz_t = ti.cos(look_alpha) * tmp_xz + ti.sin(look_alpha) * look_v[1]
look_v_t = ti.Vector([look_v[0] * tmp_xz_t / tmp_xz, -ti.sin(look_alpha) * tmp_xz + ti.cos(look_alpha) | |
data = ReturnData(a_return)
return data.species
return None
class ReturnFactory(object):
'''
An Abstract Factory interface that declares a method to create a Return.
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_return(self, request, licence, selected_activity) -> Return:
'''
Method to create a Return.
:param: request is an incoming client request.
:param: licence is a WildlifeLicence.
:param: selected_activity is a ApplicationSelectedActivity.
'''
pass
@singleton
class ReturnGenerator(ReturnFactory):
'''
A spcialised ReturnFactory to generate a Return of different variants ie.
ReturnSheet, ReturnData, ReturnQuestion. Guarantees resulting products are
compatible.
'''
def __init__(self):
pass
def create_return(self, request, licence, selected_activity) -> Return:
'''
Method to create a Return.
NOTE: Will create multiple Return depending on Application Conditions
and return the last compliancy Return created.
:param: request is an incoming client request.
:param: licence is a WildlifeLicence.
:param: selected_activity is a ApplicationSelectedActivity.
:return: the_return the last compliancy Return created.
'''
import six
from wildlifecompliance.components.applications.models import (
ApplicationCondition,
)
FUTURE = Return.RETURN_PROCESSING_STATUS_FUTURE
DRAFT = Return.RETURN_PROCESSING_STATUS_DRAFT
DUE = Return.RETURN_PROCESSING_STATUS_DUE
OVERDUE = Return.RETURN_PROCESSING_STATUS_OVERDUE
DISCARD = Return.RETURN_PROCESSING_STATUS_DISCARDED
WEEKLY = ApplicationCondition.APPLICATION_CONDITION_RECURRENCE_WEEKLY
MONTHLY = ApplicationCondition.APPLICATION_CONDITION_RECURRENCE_MONTHLY
YEARLY = ApplicationCondition.APPLICATION_CONDITION_RECURRENCE_YEARLY
prev_ret = []
def do_create_return(condition, a_date):
'''
An internal function to create the first return.
:param condition is an Application Condition with Return Type.
:param a_date is the due_date expected for the created return.
:return first_return set to Draft for immediate update.
'''
already_generated = False
try:
# NOTE: Must be unique application conditions on the selected
# activity otherwise first return not created.
first_return = Return.objects.get(
condition=condition, due_date=a_date)
already_generated = True
except Return.DoesNotExist:
first_return = Return.objects.create(
application=selected_activity.application,
due_date=a_date,
processing_status=DRAFT,
licence=licence,
condition=condition,
return_type=condition.return_type,
)
# Make first return editable (draft) for applicant but cannot
# submit until due. Establish species list for first return.
first_return.save()
returns_utils = ReturnSpeciesUtility(first_return)
# raw_specie_names is a list of names defined manually by the
# licensing officer at the time of propose/issuance.
raw_specie_names = returns_utils.get_raw_species_list_for(
selected_activity
)
if not already_generated:
returns_utils.set_species_list(raw_specie_names)
# When first return generated is for a renewed application, discard
# previous returns which are draft, due or overdue.
if first_return.is_renewed():
prev_app = selected_activity.application.previous_application
prev_ret = prev_app.returns_application.all()
c = condition
do_discard_return(condition, prev_ret)
discard_returns = [
r for r in prev_ret
if r.condition.licence_activity == c.licence_activity
and r.condition.licence_purpose == c.licence_purpose
and r.condition.return_type == c.return_type
and r.condition.condition_text == c.condition_text
# and r.processing_status in [DISCARD]
]
# When first Return is a Running Sheet copy discarded data.
# ie. retain stock total.
if first_return.has_sheet:
c = first_return.condition
returns = [
r for r in discard_returns
if r.condition.condition_text == c.condition_text
]
if returns:
util = ReturnSpeciesUtility(first_return)
util.copy_sheet_species(returns[0])
# log action
if not already_generated:
first_return.log_user_action(
ReturnUserAction.ACTION_CREATE.format(
first_return.lodgement_number,
first_return.condition.condition,
first_return.condition.licence_activity.short_name,
), request
)
return first_return
def do_amend_return(condition, amend_return_list):
'''
An internal function to amend a previously created return.
:param Condition is an Application Condition with Return Type.
:return Return that was amended.
'''
amended = None
previous_returns = [
r for r in amend_return_list
if r.condition.licence_activity == condition.licence_activity
and r.condition.licence_purpose == condition.licence_purpose
and r.condition.return_type == condition.return_type
# and r.condition.due_date == condition.due_date
and r.condition.condition_text == condition.condition_text
] # previous returns include Future ones.
for previous in previous_returns:
previous.application = selected_activity.application
previous.condition = condition
previous.save()
if (previous.processing_status == DRAFT):
amended = previous
return amended
def do_discard_return(condition, return_list):
'''
An internal function to discard a previously created return.
:param Condition is an Application Condition with Return Type.
:return Return that was amended.
'''
discarded = False
discardable = [
r for r in return_list
if r.condition.licence_activity == condition.licence_activity
and r.condition.licence_purpose == condition.licence_purpose
and r.condition.return_type == condition.return_type
and r.condition.condition_text == condition.condition_text
# and r.condition.due_date == condition.due_date
] # previous returns include Future ones.
for r in discardable:
discarded = True
command = DiscardRequestCommand(request, condition, r)
command.execute()
return discarded
def do_create_return_recurrence(condition, a_date):
'''
An internal function to create FUTURE Return.
:param condition is an Application Condition with Return Type.
:param a_date is the due_date expected for the created return.
'''
# Set the recurrences as future returns.
# NOTE: species list will be determined and built when the
# return is due.
a_return = None
if condition.recurrence:
while a_date < licence_expiry:
# set the due date for recurrence period.
for x in range(condition.recurrence_schedule):
# Weekly
if condition.recurrence_pattern == WEEKLY:
a_date += timedelta(weeks=1)
# Monthly
elif condition.recurrence_pattern == MONTHLY:
a_date += timedelta(weeks=4)
# Yearly
elif condition.recurrence_pattern == YEARLY:
a_date += timedelta(days=365)
if a_date <= licence_expiry:
# Create the Return.
try:
a_return = Return.objects.get(
condition=condition, due_date=a_date)
except Return.DoesNotExist:
a_return = Return.objects.create(
application=selected_activity.application,
due_date=a_date,
processing_status=FUTURE,
licence=licence,
condition=condition,
return_type=condition.return_type,
)
return a_return
'''
Returns are generated at issuing; expiry_date may not be set yet.
correct expiry is on the licence purpose.
'''
try:
the_return = None
licence_expiry = selected_activity.get_expiry_date()
licence_expiry = datetime.datetime.strptime(
licence_expiry, "%Y-%m-%d"
).date() if isinstance(
licence_expiry, six.string_types
) else licence_expiry
today = timezone.now().date()
timedelta = datetime.timedelta
excl_purpose = []
if selected_activity.application.is_amendment():
prev_app = selected_activity.application.previous_application
prev_ret = prev_app.returns_application.exclude(
processing_status=DISCARD,
)
# discard previous returns for ApplicationConditions which have
# been removed from the Amendment Application.
act_id = selected_activity.licence_activity_id
prev_conditions = [
r.condition for r in prev_ret.filter(
condition__licence_activity_id=act_id
)
]
prev_conditions_ids = [
c.standard_condition_id for c in prev_conditions
]
next_conditions = [
c for c in selected_activity.application.conditions.filter(
licence_activity_id=act_id
).exclude(
return_type=None
)
]
next_conditions_ids = [
c.standard_condition_id for c in next_conditions
]
discard = set(prev_conditions_ids) - set(next_conditions_ids)
discardable = [
c for c in prev_conditions
if c.standard_condition_id in list(discard)
]
for condition in discardable:
do_discard_return(condition, prev_ret)
# create or amend Return for each Condition.
for condition in selected_activity.get_condition_list():
if condition.return_type and condition.due_date \
and condition.due_date >= today:
current_date = condition.due_date
the_return = None
if selected_activity.application.is_amendment():
the_return = do_amend_return(condition, prev_ret)
if not the_return:
the_return = do_create_return(condition, current_date)
do_create_return_recurrence(condition, current_date)
excl_purpose.append(condition.licence_purpose)
# discard/delete previous Returns from removed Conditions.
if selected_activity.application.is_amendment():
la = selected_activity.licence_activity
discardable = [ # a subset excluding amended Returns.
r for r in prev_ret
if r.condition.licence_purpose not in excl_purpose
and r.condition.licence_activity == la
and r.processing_status in [DUE, OVERDUE, DRAFT]
]
for r in discardable:
command = DiscardRequestCommand(request, r.condition, r)
command.execute()
except Exception as e:
logger.error('{0} {1} - {2}'.format(
'ReturnService.generate_return_request() ActivityID:',
selected_activity,
e
))
raise
return the_return
class ReturnETL(object):
'''
A context maintaining a reference for a ReturnETL strategy.
'''
strategy = None # composite strategy.
def __init__(self, strategy):
self.strategy = strategy
def process(self):
'''
Method to execute the strategy.
'''
self.strategy.do_algorithm()
class ReturnETLStrategy(object):
'''
An interface for ReturnETL strategies.
'''
LOGFILE = 'logs/etl_tasks.log'
logger_title = 'ReturnService.ETL()'
__metaclass__ = abc.ABCMeta
@abc.abstractclassmethod
def do_algorithm(self):
'''
Excutes the routine for this strategy.
'''
pass
class CleanseReturnSheet(ReturnETLStrategy):
'''
A real-time ReturnETLStrategy to remove dirty Return running sheets.
NOTE: Output is not logged but printed. Apply pipe tee statement to log.
'''
return_ids = None # list of specific returns.
def __init__(self, real_time, returns=None):
self.return_ids = returns
self.real_time = real_time
def do_algorithm(self):
'''
Process each Return running sheet ensuring data integrity.
'''
import datetime
log = '{0} {1} {2}'.format(
datetime.datetime.now(), self.logger_title, 'Commencing cleansing.'
)
print(log)
returns = Return.objects.filter(
return_type__data_format=ReturnType.FORMAT_SHEET
)
if self.return_ids: # filter returns for specific ids.
returns = [r for r in returns if r.id in self.return_ids]
duplicate_cnt = 0
doa_cnt = 0
for r in returns:
utils = ReturnSpeciesUtility(r)
for specie in r.return_type.regulated_species.all():
name = utils.get_id_from_species_name(specie.species_name)
try:
tables = [t for t in r.returntable_set.filter(name=name)]
except AttributeError:
continue
first = tables[0]
'''
1. Remove any duplicate species existing in tables.
'''
for idx, t in enumerate(tables, start=0):
if idx == 0:
continue
log = '{0} ReturnID: {1} {2} ReturnTableID: {3}'.format(
self.logger_title,
t.ret_id,
'Deleted duplicate species table.',
t.id,
)
duplicate_cnt += 1
if self.real_time:
print(log)
t.delete()
'''
2. Default the Date of Activity to the Date of Entry.
'''
for r in first.returnrow_set.all():
updated = False
doa = datetime.datetime.fromtimestamp(
r.data['date'] / 1000).strftime('%d/%m/%Y')
try:
if r.data['doa'] == '':
r.data['doa'] = doa
updated = True
except KeyError:
r.data['doa'] = doa
updated = True
if updated:
log = '{0} ReturnID: {1} {2} ReturnRowID: {3}'.format(
self.logger_title,
first.ret_id,
'Added Date of Activity.',
r.id,
)
doa_cnt += 1
if self.real_time:
print(log)
r.save()
log | |
here are the same
as those passed to "__prepare__").
This class object is the one that will be referenced by the zero-
argument form of "super()". "__class__" is an implicit closure
reference created by the compiler if any methods in a class body refer
to either "__class__" or "super". This allows the zero argument form
of "super()" to correctly identify the class being defined based on
lexical scoping, while the class or instance that was used to make the
current call is identified based on the first argument passed to the
method.
**CPython implementation detail:** In CPython 3.6 and later, the
"__class__" cell is passed to the metaclass as a "__classcell__" entry
in the class namespace. If present, this must be propagated up to the
"type.__new__" call in order for the class to be initialised
correctly. Failing to do so will result in a "DeprecationWarning" in
Python 3.6, and a "RuntimeWarning" in the future.
When using the default metaclass "type", or any metaclass that
ultimately calls "type.__new__", the following additional
customisation steps are invoked after creating the class object:
* first, "type.__new__" collects all of the descriptors in the class
namespace that define a "__set_name__()" method;
* second, all of these "__set_name__" methods are called with the
class being defined and the assigned name of that particular
descriptor; and
* finally, the "__init_subclass__()" hook is called on the immediate
parent of the new class in its method resolution order.
After the class object is created, it is passed to the class
decorators included in the class definition (if any) and the resulting
object is bound in the local namespace as the defined class.
When a new class is created by "type.__new__", the object provided as
the namespace parameter is copied to a new ordered mapping and the
original object is discarded. The new copy is wrapped in a read-only
proxy, which becomes the "__dict__" attribute of the class object.
See also:
**PEP 3135** - New super
Describes the implicit "__class__" closure reference
Metaclass example
-----------------
The potential uses for metaclasses are boundless. Some ideas that have
been explored include logging, interface checking, automatic
delegation, automatic property creation, proxies, frameworks, and
automatic resource locking/synchronization.
Here is an example of a metaclass that uses an
"collections.OrderedDict" to remember the order that class variables
are defined:
class OrderedClass(type):
@classmethod
def __prepare__(metacls, name, bases, **kwds):
return collections.OrderedDict()
def __new__(cls, name, bases, namespace, **kwds):
result = type.__new__(cls, name, bases, dict(namespace))
result.members = tuple(namespace)
return result
class A(metaclass=OrderedClass):
def one(self): pass
def two(self): pass
def three(self): pass
def four(self): pass
>>> A.members
('__module__', 'one', 'two', 'three', 'four')
When the class definition for *A* gets executed, the process begins
with calling the metaclass's "__prepare__()" method which returns an
empty "collections.OrderedDict". That mapping records the methods and
attributes of *A* as they are defined within the body of the class
statement. Once those definitions are executed, the ordered dictionary
is fully populated and the metaclass's "__new__()" method gets
invoked. That method builds the new type and it saves the ordered
dictionary keys in an attribute called "members".
Customizing instance and subclass checks
========================================
The following methods are used to override the default behavior of the
"isinstance()" and "issubclass()" built-in functions.
In particular, the metaclass "abc.ABCMeta" implements these methods in
order to allow the addition of Abstract Base Classes (ABCs) as
"virtual base classes" to any class or type (including built-in
types), including other ABCs.
class.__instancecheck__(self, instance)
Return true if *instance* should be considered a (direct or
indirect) instance of *class*. If defined, called to implement
"isinstance(instance, class)".
class.__subclasscheck__(self, subclass)
Return true if *subclass* should be considered a (direct or
indirect) subclass of *class*. If defined, called to implement
"issubclass(subclass, class)".
Note that these methods are looked up on the type (metaclass) of a
class. They cannot be defined as class methods in the actual class.
This is consistent with the lookup of special methods that are called
on instances, only in this case the instance is itself a class.
See also:
**PEP 3119** - Introducing Abstract Base Classes
Includes the specification for customizing "isinstance()" and
"issubclass()" behavior through "__instancecheck__()" and
"__subclasscheck__()", with motivation for this functionality in
the context of adding Abstract Base Classes (see the "abc"
module) to the language.
Emulating callable objects
==========================
object.__call__(self[, args...])
Called when the instance is "called" as a function; if this method
is defined, "x(arg1, arg2, ...)" is a shorthand for
"x.__call__(arg1, arg2, ...)".
Emulating container types
=========================
The following methods can be defined to implement container objects.
Containers usually are sequences (such as lists or tuples) or mappings
(like dictionaries), but can represent other containers as well. The
first set of methods is used either to emulate a sequence or to
emulate a mapping; the difference is that for a sequence, the
allowable keys should be the integers *k* for which "0 <= k < N" where
*N* is the length of the sequence, or slice objects, which define a
range of items. It is also recommended that mappings provide the
methods "keys()", "values()", "items()", "get()", "clear()",
"setdefault()", "pop()", "popitem()", "copy()", and "update()"
behaving similar to those for Python's standard dictionary objects.
The "collections" module provides a "MutableMapping" abstract base
class to help create those methods from a base set of "__getitem__()",
"__setitem__()", "__delitem__()", and "keys()". Mutable sequences
should provide methods "append()", "count()", "index()", "extend()",
"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python
standard list objects. Finally, sequence types should implement
addition (meaning concatenation) and multiplication (meaning
repetition) by defining the methods "__add__()", "__radd__()",
"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described
below; they should not define other numerical operators. It is
recommended that both mappings and sequences implement the
"__contains__()" method to allow efficient use of the "in" operator;
for mappings, "in" should search the mapping's keys; for sequences, it
should search through the values. It is further recommended that both
mappings and sequences implement the "__iter__()" method to allow
efficient iteration through the container; for mappings, "__iter__()"
should be the same as "keys()"; for sequences, it should iterate
through the values.
object.__len__(self)
Called to implement the built-in function "len()". Should return
the length of the object, an integer ">=" 0. Also, an object that
doesn't define a "__bool__()" method and whose "__len__()" method
returns zero is considered to be false in a Boolean context.
**CPython implementation detail:** In CPython, the length is
required to be at most "sys.maxsize". If the length is larger than
"sys.maxsize" some features (such as "len()") may raise
"OverflowError". To prevent raising "OverflowError" by truth value
testing, an object must define a "__bool__()" method.
object.__length_hint__(self)
Called to implement "operator.length_hint()". Should return an
estimated length for the object (which may be greater or less than
the actual length). The length must be an integer ">=" 0. This
method is purely an optimization and is never required for
correctness.
New in version 3.4.
Note: Slicing is done exclusively with the following three methods.
A call like
a[1:2] = b
is translated to
a[slice(1, 2, None)] = b
and so forth. Missing slice items are always filled in with "None".
object.__getitem__(self, key)
Called to implement evaluation of "self[key]". For sequence types,
the accepted keys should be integers and slice objects. Note that
the special interpretation of negative indexes (if the class wishes
to emulate a sequence type) is up to the "__getitem__()" method. If
*key* is of an inappropriate type, "TypeError" may be raised; if of
a value outside the set of indexes for the sequence (after any
special interpretation of negative values), "IndexError" should be
raised. For mapping types, if *key* is missing (not in the
container), "KeyError" should be raised.
Note: "for" loops expect that an "IndexError" will be raised for
illegal indexes to allow proper detection of the end of the
sequence.
object.__missing__(self, key)
Called by "dict"."__getitem__()" to implement "self[key]" for dict
subclasses when key is not in the dictionary.
object.__setitem__(self, key, value)
Called to implement assignment to "self[key]". Same note as for
"__getitem__()". This should only be implemented for mappings if
the objects support changes to the values for keys, or if new keys
can be added, or for sequences if elements can be replaced. The
same exceptions should be raised for improper *key* values as for
the "__getitem__()" method.
object.__delitem__(self, key)
Called to implement deletion of "self[key]". Same note as for
"__getitem__()". This should only be | |
<gh_stars>10-100
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# from optimize.py module by <NAME>
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
#
# Modified by <NAME> to support bounded minimization
"""
Downhill simplex optimizer.
"""
from __future__ import print_function
__all__ = ['simplex']
__docformat__ = "restructuredtext en"
__version__ = "0.7"
import numpy as np
def wrap_function(function, bounds):
ncalls = [0]
if bounds is not None:
lo, hi = [np.asarray(v) for v in bounds]
def function_wrapper(x):
ncalls[0] += 1
if np.any((x < lo) | (x > hi)):
return np.inf
else:
# function(x)
return function(x)
else:
def function_wrapper(x):
ncalls[0] += 1
return function(x)
return ncalls, function_wrapper
class Result(object):
"""
Results from the fit.
x : ndarray
Best parameter set
fx : float
Best value
iters : int
Number of iterations
calls : int
Number of function calls
status : boolean
True if the fit completed successful, false if terminated early
because of too many iterations.
"""
def __init__(self, x, fx, iters, calls, status):
self.x, self.fx, self.iters, self.calls = x, fx, iters, calls
self.status = status
def __str__(self):
msg = "Converged" if self.status else "Aborted"
return ("%s with %g at %s after %d calls"
% (msg, self.fx, self.x, self.calls))
def dont_abort():
return False
def simplex(f, x0=None, bounds=None, radius=0.05,
xtol=1e-4, ftol=1e-4, maxiter=None,
update_handler=None, abort_test=dont_abort):
"""
Minimize a function using Nelder-Mead downhill simplex algorithm.
This optimizer is also known as Amoeba (from Numerical Recipes) and
the Nealder-Mead simplex algorithm. This is not the simplex algorithm
for solving constrained linear systems.
Downhill simplex is a robust derivative free algorithm for finding
minima. It proceeds by choosing a set of points (the simplex) forming
an n-dimensional triangle, and transforming that triangle so that the
worst vertex is improved, either by stretching, shrinking or reflecting
it about the center of the triangle. This algorithm is not known for
its speed, but for its simplicity and robustness, and is a good algorithm
to start your problem with.
*Parameters*:
f : callable f(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
bounds : (ndarray,ndarray) or None
Bounds on the parameter values for the function.
radius: float
Size of the initial simplex. For bounded parameters (those
which have finite lower and upper bounds), radius is clipped
to a value in (0,0.5] representing the portion of the
range to use as the size of the initial simplex.
*Returns*: Result (`park.simplex.Result`)
x : ndarray
Parameter that minimizes function.
fx : float
Value of function at minimum: ``fopt = func(xopt)``.
iters : int
Number of iterations performed.
calls : int
Number of function calls made.
success : boolean
True if fit completed successfully.
*Other Parameters*:
xtol : float
Relative error in xopt acceptable for convergence.
ftol : number
Relative error in func(xopt) acceptable for convergence.
maxiter : int=200*N
Maximum number of iterations to perform. Defaults
update_handler : callable
Called after each iteration, as callback(k,n,xk,fxk),
where k is the current iteration, n is the maximum
iteration, xk is the simplex and fxk is the value of
the simplex vertices. xk[0],fxk[0] is the current best.
abort_test : callable
Called after each iteration, as callback(), to see if
an external process has requested stop.
*Notes*
Uses a Nelder-Mead simplex algorithm to find the minimum of
function of one or more variables.
"""
fcalls, func = wrap_function(f, bounds)
x0 = np.asfarray(x0).flatten()
# print "x0",x0
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
if rank == 0:
sim = np.zeros((N + 1,), dtype=x0.dtype)
else:
sim = np.zeros((N + 1, N), dtype=x0.dtype)
fsim = np.zeros((N + 1,), float)
sim[0] = x0
fsim[0] = func(x0)
# Metropolitan simplex: simplex has vertices at x0 and at
# x0 + j*radius for each unit vector j. Radius is a percentage
# change from the initial value, or just the radius if the initial
# value is 0. For bounded problems, the radius is a percentage of
# the bounded range in dimension j.
val = x0 * (1 + radius)
val[val == 0] = radius
if bounds is not None:
radius = np.clip(radius, 0, 0.5)
lo, hi = [np.asarray(v) for v in bounds]
# Keep the initial simplex inside the bounds
x0 = np.select([x0 < lo, x0 > hi], [lo, hi], x0)
bounded = ~np.isinf(lo) & ~np.isinf(hi)
val[bounded] = x0[bounded] + (hi[bounded] - lo[bounded]) * radius
val = np.select([val < lo, val > hi], [lo, hi], val)
# If the initial point was at or beyond an upper bound, then bounds
# projection will put x0 and x0+j*radius at the same point. We
# need to detect these collisions and reverse the radius step
# direction when such collisions occur. The only time the collision
# can occur at the lower bound is when upper and lower bound are
# identical. In that case, we are already done.
collision = val == x0
if np.any(collision):
reverse = x0 * (1 - radius)
reverse[reverse == 0] = -radius
reverse[bounded] = x0[bounded] - \
(hi[bounded] - lo[bounded]) * radius
val[collision] = reverse[collision]
# Make tolerance relative for bounded parameters
tol = np.ones(x0.shape) * xtol
tol[bounded] = (hi[bounded] - lo[bounded]) * xtol
xtol = tol
# Compute values at the simplex vertices
for k in range(0, N):
y = x0 + 0
y[k] = val[k]
sim[k + 1] = y
fsim[k + 1] = func(y)
# print sim
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = np.take(sim, ind, 0)
# print sim
iterations = 1
while iterations < maxiter:
if np.all(abs(sim[1:] - sim[0]) <= xtol) \
and max(abs(fsim[0] - fsim[1:])) <= ftol:
# print abs(sim[1:]-sim[0])
break
xbar = np.sum(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
# print "xbar" ,xbar,rho,sim[-1],N
# break
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in range(1, N + 1):
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
if update_handler is not None:
update_handler(iterations, maxiter, sim, fsim)
iterations += 1
if abort_test():
break # STOPHERE
status = 0 if iterations < maxiter else 1
res = Result(sim[0], fsim[0], iterations, fcalls[0], status)
res.next_start = sim[np.random.randint(N)]
return res
def main():
import time
def rosen(x): # The Rosenbrock function
return np.sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0, axis=0)
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = simplex(rosen, x0)
print(x)
print("Time:", time.time() - start)
x0 = [0] * 3
print("Nelder-Mead Simplex")
print("===================")
print("starting at zero")
start = time.time()
x = simplex(rosen, x0)
print(x)
print("Time:", time.time() - start)
x0 = [0.8, 1.2, 0.7]
lo, hi = [0] * 3, [1] * 3
print("Bounded Nelder-Mead Simplex")
print("===========================")
start | |
<filename>pax/plugins/io/WaveformSimulator.py
"""
Plugins to interface with the integrated waveform simulator (FaX)
This is I/O stuff only: truth file writing, instruction reading, etc.
There is no physics here, all that is in pax.simulation.
Added by Qing: Sorry there's a little physics (S2 after pulses)
is implemented here for it is easier.
"""
import os
import csv
import numpy as np
import pandas
from pax import plugin, units, utils
try:
import ROOT
import root_pandas # noqa
have_root = True
except ImportError:
print("You don't have ROOT or root_pandas, root truth file output is disabled")
have_root = False
def uniform_circle_rv(radius, n_samples=None):
"""Sample n_samples from x,y uniformly in a circle with radius"""
if n_samples is None:
just_give_one = True
n_samples = 1
else:
just_give_one = False
xs = []
ys = []
for sample_i in range(n_samples):
while True:
x = np.random.uniform(-radius, radius)
y = np.random.uniform(-radius, radius)
if x**2 + y**2 <= radius**2:
break
xs.append(x)
ys.append(y)
if just_give_one:
return xs[0], ys[0]
else:
return xs, ys
class WaveformSimulator(plugin.InputPlugin):
"""Common input plugin for waveform simulator plugins. Do not use directly, won't work.
Takes care of truth file writing as well.
"""
def startup(self):
self.all_truth_peaks = []
self.simulator = self.processor.simulator
# The simulator's internal config was already intialized in the core
def shutdown(self):
self.log.debug("Write the truth peaks to %s" % self.config['truth_file_name'])
output = pandas.DataFrame(self.all_truth_peaks)
output.to_csv(self.config['truth_file_name']+".csv", index_label='fax_truth_peak_id')
if have_root:
output.to_root(self.config['truth_file_name']+".root", 'fax_truth')
def store_true_peak(self, peak_type, g4_id, t, x, y, z, photon_times, electron_times=(), peak_top_fraction=0):
""" Saves the truth information about a peak (s1 or s2)
"""
true_peak = {
'instruction': self.current_instruction,
'repetition': self.current_repetition,
'event': self.current_event,
'peak_type': peak_type,
'g4_id': g4_id,
'x': x, 'y': y, 'z': z,
't_interaction': t,
'top_fraction': peak_top_fraction,
}
for name, times in (('photon', photon_times), ('electron', electron_times)):
if len(times) != 0:
# This signal type doesn't exist in this peak
true_peak.update({
('n_%ss' % name): len(times),
('t_mean_%ss' % name): np.mean(times),
('t_first_%s' % name): np.min(times),
('t_last_%s' % name): np.max(times),
('t_sigma_%ss' % name): np.std(times),
})
else:
true_peak.update({
('n_%ss' % name): float('nan'),
('t_mean_%ss' % name): float('nan'),
('t_first_%s' % name): float('nan'),
('t_last_%s' % name): float('nan'),
('t_sigma_%ss' % name): float('nan'),
})
self.truth_peaks.append(true_peak)
def s2(self, electrons, g4_id=-1, t=0., x=0., y=0., z=0.):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
electron_times = self.simulator.s2_electrons(electrons_generated=electrons, t=t, z=z, r=r)
if not len(electron_times):
return None
photon_times = self.simulator.s2_scintillation(electron_times, x, y)
if not len(photon_times):
return None
# Compute the xy for the S2 using the radial distortion map
dmap = self.simulator.rz_position_distortion_map
if dmap:
r += dmap.get_value(r, z, map_name='to_distorted_r')
x = r * np.cos(phi)
y = r * np.sin(phi)
arriving_photon_times_top, arriving_photon_times_bottom = self.simulator.queue_signal(
photon_times,
x,
y,
# Generate S2 hitpattern "at the anode": cue for simulator to use S2 LCE map
z=-self.config['gate_to_anode_distance'])
peak_top_fraction = float(len(arriving_photon_times_top)) / float(
len(arriving_photon_times_bottom) + len(arriving_photon_times_top))
self.store_true_peak(
's2',
g4_id,
t,
x,
y,
z,
np.concatenate((
arriving_photon_times_top,
arriving_photon_times_bottom
)
),
electron_times,
peak_top_fraction,
)
return None
def s1(self, photons, recoil_type, g4_id=-1, t=0., x=0., y=0., z=0.):
"""
:param photons: total # of photons generated in the S1
:param recoil_type: 'ER' for electronic recoil, 'NR' for nuclear recoil
:param t: Time at which the interaction occurs, i.e. offset for arrival times.
Defaults to s1_default_recombination_time.
:return: start_time, channel_waveforms
"""
photon_times = self.simulator.s1_photons(photons, recoil_type, x, y, z, t)
if not len(photon_times):
return None
arriving_photon_times_top, arriving_photon_times_bottom = self.simulator.queue_signal(
photon_times,
x=x,
y=y,
z=z
)
peak_top_fraction = float(len(arriving_photon_times_top)) / float(
len(arriving_photon_times_bottom) + len(arriving_photon_times_top))
self.store_true_peak(
's1',
g4_id,
t,
x,
y,
z,
np.concatenate((
arriving_photon_times_top,
arriving_photon_times_bottom
)),
peak_top_fraction=peak_top_fraction,
)
return None
def s2_after_pulses(self, g4_id=-1):
"""
:simulate the s2 after pulses
:the after pulses are assumed to be uniformly distributed in X-Y,
:so are not related to where the photon is generated
:We simplify the generation, assuming S2-after pulses
:will not further generate secondary s2 after pulses
"""
photon_detection_times = []
# join all the photon detection times in channels
for channel_id, single_channel_photon_detection_times \
in self.simulator.arrival_times_per_channel.items():
photon_detection_times.extend(
np.array(single_channel_photon_detection_times)
)
# generate the s2 after pulses for each type
s2_ap_electron_times = []
for s2_ap_data in self.config['s2_afterpulse_types'].values():
# calculate how many s2 after pulses
num_s2_afterpulses = np.random.binomial(
n=len(photon_detection_times),
p=s2_ap_data['p']
)
if num_s2_afterpulses == 0:
continue
# Find the time delay of the after pulses
dist_kwargs = s2_ap_data['time_parameters']
dist_kwargs['size'] = num_s2_afterpulses
delay = getattr(np.random, s2_ap_data['time_distribution'])(**dist_kwargs)
# Delete afterpulses coming after the maximum delay time (if this option has been enabled)
delay = delay[delay < self.config.get('max_s2_afterpulse_delay', float('inf'))]
# Simulate the electron loss
rvs_uniform = np.random.uniform(low=0, high=1., size=len(delay))
delay_after_drift = []
for dt, rv in zip(delay, rvs_uniform):
surviving_prob = np.exp(-dt/self.config['electron_lifetime_liquid'])
if rv < surviving_prob:
delay_after_drift.append(dt)
num_s2_afterpulses = len(delay_after_drift)
# Choose the original photons that generated the S2 afterpulse.
# In fact is it is not a detected photon, but an undetected one, that generated the afterpulse...
# but we only know the detected photon times anymore at this stage.
original_photon_times = np.random.choice(photon_detection_times,
size=num_s2_afterpulses,
replace=False)
s2_ap_electron_times.extend(original_photon_times + delay_after_drift)
# generate the s2 photons of each s2 pulses one by one
# the X-Y of after pulse is randomized, Z is set to 0
# randomize an array of X-Y
rsquare = np.random.uniform(
0,
np.power(self.config['tpc_radius'], 2.),
len(s2_ap_electron_times)
)
theta = np.random.uniform(0, 2.*np.pi, len(s2_ap_electron_times))
X = np.sqrt(rsquare)*np.cos(theta)
Y = np.sqrt(rsquare)*np.sin(theta)
for electron_id, s2_ap_electron_time \
in enumerate(s2_ap_electron_times):
s2_ap_photon_times = self.simulator.s2_scintillation(
[s2_ap_electron_time],
X[electron_id],
Y[electron_id]
)
# queue the photons caused by the s2 after pulses
arriving_photon_times_top, arriving_photon_times_bottom = self.simulator.queue_signal(
s2_ap_photon_times,
X[electron_id],
Y[electron_id],
-self.config['gate_to_anode_distance']
)
peak_top_fraction = float(len(arriving_photon_times_top)) / float(
len(arriving_photon_times_bottom) + len(arriving_photon_times_top))
if self.config.get("output_PI_truth_info", False):
self.store_true_peak(
'photoionization_afterpulse',
g4_id,
t=s2_ap_electron_time,
x=X[electron_id],
y=Y[electron_id],
z=-self.config['gate_to_anode_distance'],
photon_times=np.concatenate((
arriving_photon_times_top,
arriving_photon_times_bottom
)),
peak_top_fraction=peak_top_fraction,
)
def get_instructions_for_next_event(self):
raise NotImplementedError()
def simulate_single_event(self, instructions):
self.truth_peaks = []
for q in instructions:
self.log.debug("Simulating %s photons and %s electrons at %s cm z, at t=%s ns" % (
q['s1_photons'], q['s2_electrons'], q['z'], q['t']))
# Should we choose x and yrandomly?
if q['x'] == 'random':
x, y = uniform_circle_rv(self.config['tpc_radius'])
else:
x = float(q['x'])
y = float(q['y'])
if q['z'] == 'random':
z = - np.random.uniform(0, self.config['tpc_length'])
else:
z = float(q['z']) * units.cm
if int(q['s1_photons']):
self.s1(photons=int(q['s1_photons']),
recoil_type=q['recoil_type'],
g4_id=q['g4_id'],
t=float(q['t']), x=x, y=y, z=z)
if int(q['s2_electrons']):
self.s2(electrons=int(q['s2_electrons']),
g4_id=q['g4_id'],
t=float(q['t']), x=x, y=y, z=z)
# Based on the generated photon timing
# generate the after pulse
# currently make it simple, assuming s2 after pulses
# will not generate further s2 after pulses.
# If there were no instructions, there is no g4_id, but there will also be no photons,
# so running this is pointless.
if len(instructions):
self.s2_after_pulses(g4_id=q['g4_id'])
event = self.simulator.make_pax_event()
if hasattr(self, 'dataset_name'):
event.dataset_name = self.dataset_name
event.event_number = self.current_event
# Add start time offset to all peak start times in the truth file
# Can't be done at the time of peak creation, it is only known now...
# TODO: That's no longer true! so fix it
for p in self.truth_peaks:
for key in p.keys():
if key[:2] == 't_' and key[2:7] != 'sigma':
if p[key] == '':
continue
p[key] += self.config['event_padding']
self.all_truth_peaks.extend(self.truth_peaks)
return event
def get_events(self):
for instruction_number, instructions in enumerate(self.get_instructions_for_next_event()):
self.current_instruction = instruction_number
for repetition_i in range(self.config['event_repetitions']):
self.current_repetition = repetition_i
self.current_event = instruction_number * self.config['event_repetitions'] + repetition_i
self.log.debug('Instruction %s, iteration %s, event number %s' % (instruction_number,
repetition_i, self.current_event))
yield self.simulate_single_event(instructions)
class WaveformSimulatorFromCSV(WaveformSimulator):
"""Simulate waveforms from a csv file with instructions, see:
http://xenon1t.github.io/pax/simulator.html#instruction-file-format
"""
def startup(self):
"""
The startup routine of the WaveformSimulatorFromCSV plugin
"""
# Open the instructions file
filename = self.config['input_name']
self.dataset_name = os.path.basename(filename)
self.instructions_file = open(utils.data_file_name(filename), 'r')
#
# Slurp the entire instructions file, so we know the number of events
self.instruction_reader = csv.DictReader(self.instructions_file)
self.instructions = []
#
# Loop over lines, make instructions
instruction_number = 0
instruction = []
for p in self.instruction_reader:
p['g4_id'] = -1 # create fake g4_id=-1 for csv input
if p['depth'] == 'random':
p['z'] = 'random'
else:
p['z'] = -1 * float(p['depth'])
del p['depth']
if int(p['instruction']) == instruction_number:
# Deposition is part of the previous instruction
instruction.append(p)
else:
# New deposition reached!
if instruction:
self.instructions.append(instruction)
instruction_number = int(p['instruction'])
instruction = [p]
# For the final instruction
self.instructions.append(instruction)
self.number_of_events = len(self.instructions) * self.config['event_repetitions']
WaveformSimulator.startup(self)
def shutdown(self):
self.instructions_file.close()
WaveformSimulator.shutdown(self)
def get_instructions_for_next_event(self):
for instr in self.instructions:
yield instr
class WaveformSimulatorFromMC(WaveformSimulator):
"""Simulate waveforms from GEANT4 ROOT file with generated
S1, S2 information (e.g. from NEST, | |
0.06
3 1.47e+06 255.28 | 247.30 0.0 1000 0 | 0.10 0.15 131.86 0.07
3 1.49e+06 255.28 | 246.46 0.0 1000 0 | 0.10 0.13 132.77 0.06
3 1.50e+06 255.28 | 235.29 0.0 1000 0 | 0.10 0.13 133.17 0.08
3 1.52e+06 255.28 | 92.90 0.0 1000 0 | 0.08 0.15 140.81 0.08
3 1.54e+06 255.28 | -8.49 0.0 1000 0 | 0.02 0.17 144.82 0.10
3 1.56e+06 255.28 | 18.62 0.0 1000 0 | 0.01 0.18 159.15 0.10
3 1.58e+06 255.28 | 12.88 0.0 1000 0 | 0.00 0.20 161.08 0.08
3 1.59e+06 255.28 | 249.60 0.0 1000 0 | 0.02 0.19 160.45 0.07
3 1.61e+06 255.28 | 245.90 0.0 1000 0 | 0.09 0.16 158.35 0.06
3 1.63e+06 255.28 | 253.69 0.0 1000 0 | 0.10 0.15 157.53 0.06
3 1.65e+06 256.24 |
3 1.65e+06 256.24 | 256.24 1.8 1000 0 | 0.10 0.17 154.70 0.06
3 1.66e+06 257.26 |
3 1.66e+06 257.26 | 257.26 1.0 1000 0 | 0.10 0.17 152.74 0.06
3 1.68e+06 261.95 |
3 1.68e+06 261.95 | 261.95 2.2 1000 0 | 0.10 0.16 149.32 0.06
3 1.70e+06 261.95 | 259.62 2.4 1000 0 | 0.10 0.17 150.72 0.06
3 1.71e+06 261.95 | 248.64 0.0 1000 0 | 0.10 0.15 148.57 0.06
3 1.73e+06 261.95 | 249.29 0.0 1000 0 | 0.10 0.13 146.17 0.07
3 1.74e+06 261.95 | 249.37 0.0 1000 0 | 0.09 0.15 147.29 0.06
3 1.76e+06 261.95 | 245.88 0.0 1000 0 | 0.10 0.16 145.84 0.07
3 1.78e+06 261.95 | 261.27 0.0 1000 0 | 0.10 0.16 146.70 0.07
3 1.79e+06 261.95 | 259.66 0.0 1000 0 | 0.05 0.16 150.79 0.06
3 1.81e+06 261.95 | 260.88 0.0 1000 0 | 0.10 0.15 146.73 0.06
3 1.82e+06 261.95 | 242.13 0.0 1000 0 | 0.11 0.17 145.80 0.07
3 1.84e+06 261.95 | 257.71 0.0 1000 0 | 0.10 0.18 147.04 0.06
3 1.86e+06 261.95 | 260.06 0.0 1000 0 | 0.10 0.14 146.52 0.07
3 1.87e+06 261.95 | 245.67 0.0 1000 0 | 0.11 0.14 146.18 0.07
3 1.89e+06 261.95 | 237.52 0.0 1000 0 | 0.10 0.14 148.21 0.08
3 1.90e+06 261.95 | 259.79 0.0 1000 0 | 0.10 0.14 149.33 0.06
3 1.92e+06 261.95 | 258.17 0.0 1000 0 | 0.11 0.14 148.37 0.06
3 1.93e+06 261.95 | 253.84 0.0 1000 0 | 0.11 0.14 146.93 0.06
3 1.94e+06 261.95 | 254.08 0.0 1000 0 | 0.10 0.18 146.73 0.06
3 1.96e+06 261.95 | 251.79 0.0 1000 0 | 0.11 0.15 143.88 0.06
3 1.97e+06 261.95 | 253.61 0.0 1000 0 | 0.11 0.15 144.74 0.06
3 1.99e+06 261.95 | 261.72 0.0 1000 0 | 0.11 0.14 143.76 0.07
3 2.00e+06 263.24 |
3 2.00e+06 263.24 | 263.24 0.5 1000 0 | 0.09 0.16 147.80 0.07
3 2.01e+06 263.24 | 249.24 0.0 1000 0 | 0.10 0.17 150.97 0.07
3 2.03e+06 270.97 |
3 2.03e+06 270.97 | 270.97 0.4 1000 0 | 0.11 0.14 149.13 0.06
3 2.04e+06 272.27 |
3 2.04e+06 272.27 | 272.27 0.4 1000 0 | 0.11 0.15 147.54 0.06
3 2.06e+06 279.06 |
3 2.06e+06 279.06 | 279.06 1.4 1000 0 | 0.11 0.15 146.70 0.06
3 2.07e+06 279.06 | 262.20 0.0 1000 0 | 0.11 0.15 146.47 0.06
3 2.08e+06 279.06 | 92.01 0.0 1000 0 | 0.10 0.16 145.97 0.08
3 2.10e+06 279.06 | 45.48 0.0 1000 0 | 0.04 0.16 150.03 0.08
3 2.11e+06 279.06 | 266.86 0.0 1000 0 | 0.05 0.15 149.27 0.06
3 2.12e+06 279.06 | 259.74 0.0 1000 0 | 0.10 0.17 148.94 0.06
3 2.14e+06 279.06 | 277.93 0.0 1000 0 | 0.11 0.17 148.45 0.06
3 2.15e+06 279.06 | 274.22 0.0 1000 0 | 0.11 0.17 147.41 0.06
3 2.17e+06 279.06 | 229.47 0.0 1000 0 | 0.11 0.13 149.58 0.08
3 2.18e+06 279.06 | 268.43 0.0 1000 0 | 0.11 0.15 148.77 0.06
3 2.19e+06 279.06 | 270.04 0.0 1000 0 | 0.11 0.15 147.16 0.06
3 2.21e+06 279.06 | 258.05 0.0 1000 0 | 0.10 0.14 147.61 0.07
3 2.22e+06 279.06 | 261.98 0.0 1000 0 | 0.11 0.17 150.39 0.07
3 2.24e+06 279.06 | 268.90 0.0 1000 0 | 0.11 0.19 149.27 0.07
3 2.25e+06 279.06 | 277.89 0.0 1000 0 | 0.11 0.17 150.75 0.06
3 2.26e+06 279.06 | 277.51 0.0 1000 0 | 0.11 0.15 149.34 0.06
3 2.28e+06 279.06 | 276.45 0.0 1000 0 | 0.11 0.16 148.45 0.06
3 2.29e+06 279.06 | 276.91 0.0 1000 0 | 0.11 0.16 146.36 0.06
3 2.31e+06 279.06 | 273.43 0.0 1000 0 | 0.11 0.15 147.01 0.06
3 2.32e+06 279.06 | 264.30 0.0 1000 0 | 0.10 0.17 147.23 0.06
3 2.33e+06 279.06 | 272.07 0.0 1000 0 | 0.10 0.15 148.60 0.06
3 2.35e+06 279.06 | 267.41 0.0 1000 0 | 0.11 0.15 148.22 0.06
3 2.36e+06 279.06 | 273.78 0.0 1000 0 | 0.11 0.19 148.15 0.06
3 2.38e+06 279.06 | 274.00 0.0 1000 0 | 0.10 0.15 148.18 0.06
3 2.39e+06 279.06 | 264.71 0.0 1000 0 | 0.11 0.16 149.14 0.07
3 2.40e+06 279.06 | 266.69 0.0 1000 0 | 0.06 0.16 151.73 0.07
3 2.42e+06 279.51 |
3 2.42e+06 279.51 | 279.51 1.6 1000 0 | 0.09 0.16 154.09 0.06
3 2.43e+06 280.29 |
3 2.43e+06 280.29 | 280.29 0.8 1000 0 | 0.11 0.17 153.12 0.06
3 2.45e+06 280.29 | 262.86 0.0 1000 0 | 0.11 0.17 153.20 0.06
3 2.46e+06 280.29 | 273.96 0.0 1000 0 | 0.10 0.18 152.18 0.06
3 2.47e+06 281.82 |
3 2.47e+06 281.82 | 281.82 0.2 1000 0 | 0.11 0.17 153.48 0.06
3 2.49e+06 281.82 | 263.43 0.0 1000 0 | 0.12 0.16 151.74 0.06
3 2.50e+06 281.82 | 260.68 0.0 1000 0 | 0.10 0.15 154.54 0.07
3 2.52e+06 281.82 | 268.88 0.0 1000 0 | 0.10 0.16 154.29 0.06
3 2.53e+06 281.82 | 267.59 0.0 1000 0 | 0.11 0.15 153.52 0.06
3 2.54e+06 281.82 | 278.41 0.0 1000 0 | 0.11 0.13 153.54 0.06
3 2.56e+06 281.82 | 272.29 0.0 1000 0 | 0.11 0.16 152.92 0.06
3 2.57e+06 281.82 | 267.83 0.0 1000 0 | 0.11 0.17 152.42 0.06
3 2.59e+06 281.82 | 263.96 0.0 1000 0 | 0.11 0.17 154.01 0.06
3 2.60e+06 281.82 | 280.84 0.0 1000 0 | 0.11 0.16 153.49 0.06
3 2.61e+06 287.28 |
3 2.61e+06 287.28 | 287.28 0.2 1000 0 | 0.11 0.15 152.73 0.06
3 3.50e+06 287.28 | 38.06 0.0 1000 0 | -0.01 0.27 230.10 0.10
3 3.51e+06 287.28 | 13.15 0.0 1000 0 | -0.00 0.23 230.06 0.08
3 3.52e+06 287.28 | 43.99 0.0 1000 0 | 0.02 0.23 231.44 0.10
3 3.54e+06 287.28 | -0.56 0.0 1000 0 | 0.01 0.25 238.56 0.13
3 3.55e+06 287.28 | 46.62 0.0 1000 0 | 0.04 0.22 234.65 0.07
3 3.57e+06 287.28 | 31.43 0.0 1000 0 | 0.01 0.26 236.88 0.10
3 3.58e+06 287.28 | 105.30 0.0 1000 0 | 0.04 0.23 231.16 0.06
3 3.59e+06 287.28 | 258.81 0.0 1000 0 | 0.00 0.23 224.03 0.06
3 3.61e+06 287.28 | 256.95 0.0 1000 0 | 0.10 0.25 215.33 0.06
3 3.62e+06 287.28 | 253.97 0.0 1000 0 | 0.10 0.21 209.44 0.06
3 3.64e+06 287.28 | 265.32 0.0 1000 0 | 0.11 0.22 204.07 0.06
3 3.65e+06 287.28 | 269.82 0.0 1000 0 | 0.10 0.20 197.73 0.06
3 3.66e+06 287.28 | 265.63 0.0 1000 0 | 0.11 0.22 192.48 0.06
3 3.68e+06 287.28 | 270.77 0.0 1000 0 | 0.11 0.25 189.75 0.06
3 3.69e+06 287.28 | 256.46 0.0 1000 0 | 0.10 0.19 187.02 0.06
3 3.71e+06 287.28 | 273.00 0.0 1000 0 | 0.10 0.19 182.82 0.06
| UsedTime: 52955 | SavedDir: ./Swimmer-v3_ReSAC_3
| Learner: Save in ./Swimmer-v3_ReSAC_3
"""
elif env_name == 'Ant-v3':
from elegantrl.envs.CustomGymEnv import AntEnv
env_func = AntEnv
env_args = {
'env_num': 1,
'env_name': 'Ant-v3',
'max_step': 1000,
'state_dim': 27, # original MuJoCo Ant state_dim is 111
'action_dim': 8,
'if_discrete': False,
'target_return': 6000.0,
}
args = Arguments(agent_class, env_func=env_func, env_args=env_args)
args.reward_scale = 2 ** -4
args.num_layer = 3
args.net_dim = 2 ** 8
args.batch_size = int(args.net_dim * 2)
args.worker_num = 2
args.target_step = args.max_step
if gpu_id == 1:
args.repeat_times = 2 ** -1
if gpu_id == 2:
args.repeat_times = 2 ** -0
args.reward_scale = 2 ** -4
args.learning_rate = 2 ** -15
args.clip_grad_norm = 1.0
args.gamma = 0.985
args.if_act_target = False
args.explore_noise_std = 0.1 # for DPG
| |
if not hel_config is None:
raise CurrentImplementationError(
self.name() + " does not support helicity assignment." )
if Q is None:
raise CurrentImplementationError(
self.name() + " requires the total mapping momentum Q." )
# Retrieve alpha_s and mu_r
model_param_dict = self.model.get('parameter_dict')
alpha_s = model_param_dict['aS']
mu_r = model_param_dict['MU_R']
# Include the counterterm only in a part of the phase space
children = self.get_sorted_children(current, self.model)
parent = leg_numbers_map.inv[frozenset(children)]
pC = sum(higher_PS_point[child] for child in children)
qC = lower_PS_point[parent]
if self.is_cut(Q=Q, pC=pC):
return utils.SubtractionCurrentResult.zero(
current=current, hel_config=hel_config)
# Evaluate collinear subtracted kernel
zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
evaluation = self.evaluate_kernel(zs, kTs, parent)
# Find all colored leg numbers except for the parent in the reduced process
all_colored_parton_numbers = []
for leg in reduced_process.get('legs'):
if self.model.get_particle(leg.get('id')).get('color') == 1:
continue
all_colored_parton_numbers.append(leg.get('number'))
color_correlation_index = 1
p0 = higher_PS_point[children[0]]
p1 = higher_PS_point[children[1]]
# Loop over the colored parton number pairs (parent, j)
# and add the corresponding contributions to this current
for j in all_colored_parton_numbers:
# Write the eikonal for that pair
if j == parent:
continue
# pj = higher_PS_point[j]
# pj = sum(higher_PS_point[child] for child in leg_numbers_map[j])
qj = lower_PS_point[j]
# eik0 = -mod_eikonal(pj, p1, p0)
# eik1 = -mod_eikonal(pj, p0, p1)
eik0 = -mod_eikonal(qj, qC, p0)
eik1 = -mod_eikonal(qj, qC, p1)
evaluation['color_correlations'].append(((parent, j),))
evaluation['values'][(0, color_correlation_index)] = {'finite': eik0 + eik1}
color_correlation_index += 1
# Add the normalization factors
pC2 = pC.square()
norm = 8. * math.pi * alpha_s / pC2
norm *= self.factor(Q=Q, pC=pC, qC=qC)
for k in evaluation['values']:
evaluation['values'][k]['finite'] *= norm
# Construct and return result
result = utils.SubtractionCurrentResult()
result.add_result(
evaluation,
hel_config=hel_config,
squared_orders=tuple(sorted(current.get('squared_orders').items())))
return result
#=========================================================================================
# NLO initial-collinear currents, containing the soft limits
#=========================================================================================
class QCD_initial_collinear_0_gq(currents.QCDLocalCollinearCurrent):
"""gq collinear ISR tree-level current.
q(initial) > g(initial_after_emission) q(final)
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
# Check that the particles are a massless quark and its anti-quark in final-state
if len(ss.legs) != 2: return None
n_initial_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and cls.is_initial(leg):
n_initial_state_quarks += 1
if n_initial_state_quarks != 1: return None
n_final_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and not cls.is_initial(leg):
n_final_state_quarks += 1
if n_final_state_quarks != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
kT = kTs[0]
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None, ((parent, (kT, )), ), ],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None},
(1, 0): {'finite': None}, }
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = -1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (quark) and the one of the reduced Born ME (gluon)
initial_state_crossing_factor *= ((self.NC**2-1)/float(self.NC))
z = 1./x
# We re-use here the Altarelli-Parisi Kernel of the P_q\bar{q} final state kernel
# The line below implements the g_{\mu\nu} part of the splitting kernel.
# Notice that the extra longitudinal terms included in the spin-correlation 'None'
# from the relation:
# \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
# = g^{\mu\nu} + longitudinal terms
# are irrelevant because Ward identities evaluate them to zero anyway.
norm = initial_state_crossing_factor * self.TR
evaluation['values'][(0, 0)]['finite'] = norm
evaluation['values'][(1, 0)]['finite'] = norm * 4. * z*(1.-z) / kT.square()
return evaluation
class QCD_initial_collinear_0_qq(currents.QCDLocalCollinearCurrent):
""" qq collinear ISR tree-level current.
g(initial) > q(initial_after_emission) qx(final).
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
if len(ss.legs) != 2: return None
n_initial_state_gluons = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and cls.is_initial(leg):
n_initial_state_gluons += 1
if n_initial_state_gluons != 1: return None
n_final_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and not cls.is_initial(leg):
n_final_state_quarks += 1
if n_final_state_quarks != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None}}
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = 1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (gluon) and the one of the reduced Born ME (quark)
initial_state_crossing_factor *= (self.NC/float(self.NC**2-1))
z = 1./x
norm = initial_state_crossing_factor * self.CF
# We re-use here the Altarelli-Parisi Kernel of the P_gq final state kernel without
# the soft-subtractio term 2./z since the gluon is here in the initial state
evaluation['values'][(0, 0)]['finite'] = norm * (1. + (1.-z)**2) / z
return evaluation
class QCD_initial_collinear_0_qg(currents.QCDLocalCollinearCurrent):
"""qg collinear ISR tree-level current.
q(initial) > q(initial_after_emission) g(final)
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
if len(ss.legs) != 2: return None
n_initial_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and cls.is_initial(leg):
n_initial_state_quarks += 1
if n_initial_state_quarks != 1: return None
n_final_state_gluon = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and not cls.is_initial(leg):
n_final_state_gluon += 1
if n_final_state_gluon != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None}}
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = 1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (quark) and the one of the reduced Born ME (quark)
initial_state_crossing_factor *= 1.
z = 1./x
# We re-use here the Altarelli-Parisi Kernel of the P_qg final state kernel, including
# its soft subtraction
# We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
# P_qg = self.CF * ( (1.+z**2)/(1.-z) )
# CxS(P_qg) = self.CF * ( 2 / (x - 1) ) = self.CF * ( 2 z / (1 - z) )
# P_qg-CxS(P_qg) = self.CF * (1 + z**2 - 2*z) / (1 - z) = self.CF * ( 1 - z)
norm = | |
(M[0x1BBC] == 0x0B) ], "LD (IY+0CH),L"],
[ [ A(0x0B), IY(0x1BB0) ], [ 0xFD, 0x77, 0x0C ], 19, [ (PC == 0x3), (M[0x1BBC] == 0x0B) ], "LD (IY+0CH),A"],
[ [ BC(0xCAFE) ], [ 0xED, 0x43, 0xBC, 0x1B ], 22, [ (PC == 0x4), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "LD (1BBCH),BC" ],
[ [ DE(0xCAFE) ], [ 0xED, 0x53, 0xBC, 0x1B ], 22, [ (PC == 0x4), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "LD (1BBCH),DE" ],
[ [ SP(0xCAFE) ], [ 0xED, 0x73, 0xBC, 0x1B ], 22, [ (PC == 0x4), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "LD (1BBCH),SP" ],
[ [ IX(0xCAFE) ], [ 0xDD, 0x22, 0xBC, 0x1B ], 22, [ (PC == 0x4), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "LD (1BBCH),IX" ],
[ [ IY(0xCAFE) ], [ 0xFD, 0x22, 0xBC, 0x1B ], 22, [ (PC == 0x4), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "LD (1BBCH),IY" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_pop(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xC1, ], 10, [ (PC == 0x01), (SP == 0x1BBE), (BC == 0xCAFE) ], "POP BC" ],
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xD1, ], 10, [ (PC == 0x01), (SP == 0x1BBE), (DE == 0xCAFE) ], "POP DE" ],
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xE1, ], 10, [ (PC == 0x01), (SP == 0x1BBE), (HL == 0xCAFE) ], "POP HL" ],
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xF1, ], 10, [ (PC == 0x01), (SP == 0x1BBE), (AF == 0xCAFE) ], "POP AF" ],
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xDD, 0xE1, ], 14, [ (PC == 0x02), (SP == 0x1BBE), (IX == 0xCAFE) ], "POP IX" ],
[ [ M(0x1BBC, 0xFE), M(0x1BBD, 0xCA), SP(0x1BBC) ], [ 0xFD, 0xE1, ], 14, [ (PC == 0x02), (SP == 0x1BBE), (IY == 0xCAFE) ], "POP IY" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_push(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ AF(0xCAFE), SP(0x1BBC) ], [ 0xF5, ], 10, [ (PC == 0x01), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH AF" ],
[ [ BC(0xCAFE), SP(0x1BBC) ], [ 0xC5, ], 10, [ (PC == 0x01), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH BC" ],
[ [ DE(0xCAFE), SP(0x1BBC) ], [ 0xD5, ], 10, [ (PC == 0x01), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH DE" ],
[ [ HL(0xCAFE), SP(0x1BBC) ], [ 0xE5, ], 10, [ (PC == 0x01), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH HL" ],
[ [ IX(0xCAFE), SP(0x1BBC) ], [ 0xDD, 0xE5, ], 14, [ (PC == 0x02), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH IX" ],
[ [ IY(0xCAFE), SP(0x1BBC) ], [ 0xFD, 0xE5, ], 14, [ (PC == 0x02), (SP == 0x1BBA), (M[0x1BBA] == 0xFE), (M[0x1BBB] == 0xCA) ], "PUSH IY" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_ex(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ AF(0xA), ex, AF(0xB) ], [ 0x08 ], 4, [ (PC == 0x01), (AF == 0xA), ex, (AF == 0xB) ], "EX AF,AF'" ],
[ [ DE(0xA), HL(0xB)], [ 0xEB ], 4, [ (PC == 0x01), (DE == 0xB), (HL == 0xA) ], "EX DE,HL" ],
[ [ HL(0xCAFE), SP(0x1BBC), M(0x1BBC, 0x37), M(0x1BBD, 0x13),], [ 0xE3 ], 19, [ (PC == 0x01), (HL == 0x1337), (SP == 0x1BBC), (M[0x1BBC] == 0xFE), (M[0x1BBD] == 0xCA) ], "EX (SP),HL" ],
[ [ IX(0xCAFE), SP(0x1BBC), M(0x1BBC, 0x37), M(0x1BBD, 0x13),], [ 0xDD, 0xE3 ], 23, [ (PC==0x02), (IX==0x1337), (SP==0x1BBC), (M[0x1BBC]==0xFE), (M[0x1BBD]==0xCA) ], "EX (SP),IX" ],
[ [ IY(0xCAFE), SP(0x1BBC), M(0x1BBC, 0x37), M(0x1BBD, 0x13),], [ 0xFD, 0xE3 ], 23, [ (PC==0x02), (IY==0x1337), (SP==0x1BBC), (M[0x1BBC]==0xFE), (M[0x1BBD]==0xCA) ], "EX (SP),IY" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_exx(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
(pre, instructions, t_cycles, post, name) = [
[ BC(0xCAFE), DE(0x1BBC), HL(0xDEAD), exx, BC(0x1337), DE(0x8080), HL(0xF00F) ],
[ 0xD9 ], 4,
[ (BC == 0xCAFE), (DE == 0x1BBC), (HL == 0xDEAD), exx, (BC == 0x1337), (DE == 0x8080), (HL == 0xF00F) ],
"EXX"
]
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_ldi(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x02), A(0x00), M(0x1BBC, 0x2B) ], [ 0xED, 0xA0 ], 16, [ (PC==0x02),(HL==0x1BBD),(DE==0x2BBD),(BC==0x1),(M[0x2BBC]==0x2B), (F==0x2C) ], "LDI (nz, A==0x00)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x02), A(0x08), M(0x1BBC, 0x2B) ], [ 0xED, 0xA0 ], 16, [ (PC==0x02),(HL==0x1BBD),(DE==0x2BBD),(BC==0x1),(M[0x2BBC]==0x2B), (F==0x24) ], "LDI (nz, A==0x08)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x02), A(0x20), M(0x1BBC, 0x2B) ], [ 0xED, 0xA0 ], 16, [ (PC==0x02),(HL==0x1BBD),(DE==0x2BBD),(BC==0x1),(M[0x2BBC]==0x2B), (F==0x0C) ], "LDI (nz, A==0x20)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x01), A(0x00), M(0x1BBC, 0x2B) ], [ 0xED, 0xA0 ], 16, [ (PC==0x02),(HL==0x1BBD),(DE==0x2BBD),(BC==0x0),(M[0x2BBC]==0x2B), (F==0x28) ], "LDI (z, A==0z00)" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_ldir(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x2), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xB0 ], 21, [ (PC==0x00), (HL==0x1BBD), (DE==0x2BBD), (BC==0x1), (M[0x2BBC]==0xB), (F["V"]==1) ], "LDIR (count non-zero)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x1), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xB0 ], 16, [ (PC==0x02), (HL==0x1BBD), (DE==0x2BBD), (BC==0x0), (M[0x2BBC]==0xB), (F["V"]==0) ], "LDIR (count zero)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x2), M(0x1BBC, 0xB), M(0x1BBD, 0xC), F("V",1) ], [ 0xED, 0xB0 ], 37, [ (PC==0x02), (HL==0x1BBE), (DE==0x2BBE), (BC==0x0), (M[0x2BBC]==0xB), (M[0x2BBD]==0xC), (F["V"]==0) ], "LDIR (loop)" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_ldd(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x2), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xA8 ], 16, [ (PC==0x02), (HL==0x1BBB), (DE==0x2BBB), (BC==0x1), (M[0x2BBC]==0xB), (F["V"]==1) ], "LDI" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x1), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xA8 ], 16, [ (PC==0x02), (HL==0x1BBB), (DE==0x2BBB), (BC==0x0), (M[0x2BBC]==0xB), (F["V"]==0) ], "LDI" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_lddr(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x2), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xB8 ], 21, [ (PC==0x00), (HL==0x1BBB), (DE==0x2BBB), (BC==0x1), (M[0x2BBC]==0xB), (F["V"]==1) ], "LDIR (count non-zero)" ],
[ [ HL(0x1BBC), DE(0x2BBC), BC(0x1), M(0x1BBC, 0xB), F("V",1) ], [ 0xED, 0xB8 ], 16, [ (PC==0x02), (HL==0x1BBB), (DE==0x2BBB), (BC==0x0), (M[0x2BBC]==0xB), (F["V"]==0) ], "LDIR (count zero)" ],
[ [ HL(0x1BBD), DE(0x2BBD), BC(0x2), M(0x1BBC, 0xB), M(0x1BBD, 0xC), F("V",1) ], [ 0xED, 0xB8 ], 37, [ (PC==0x02), (HL==0x1BBB), (DE==0x2BBB), (BC==0x0), (M[0x2BBC]==0xB), (M[0x2BBD]==0xC), (F["V"]==0) ], "LDIR (loop)" ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_cpi(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [ HL(0x1BBC), BC(0x2), M(0x1BBC, 0xFE), A(0x00) ], [ 0xED, 0xA1 ], 16, [ (PC==0x02), (HL==0x1BBD), (BC==0x1), (F==0x06) ], "CPI (ne)" ],
[ [ HL(0x1BBC), BC(0x2), M(0x1BBC, 0xFE), A(0xFE) ], [ 0xED, 0xA1 ], 16, [ (PC==0x02), (HL==0x1BBD), (BC==0x1), (F==0x46) ], "CPI (eq)" ],
[ [ HL(0x1BBC), BC(0x1), M(0x1BBC, 0xFE), A(0x00) ], [ 0xED, | |
"bris",
"koto",
"ursa",
"plop",
"defi",
"tans",
"seis",
"grog",
"mell",
"runt",
"cede",
"teem",
"bonk",
"duos",
"suet",
"lich",
"inky",
"hewn",
"alga",
"vole",
"ands",
"syne",
"puna",
"flan",
"axed",
"jags",
"mumm",
"quip",
"shod",
"dels",
"iffy",
"kiva",
"saki",
"wove",
"bide",
"bevy",
"nori",
"flit",
"wast",
"doms",
"dour",
"kegs",
"nosy",
"soli",
"keir",
"alls",
"dabs",
"posy",
"rube",
"carn",
"erst",
"lehr",
"ghee",
"raws",
"vend",
"bani",
"anta",
"abut",
"eons",
"luff",
"teat",
"dais",
"mocs",
"pith",
"eaux",
"gaff",
"biro",
"mola",
"chug",
"echt",
"laud",
"buts",
"goer",
"peed",
"sulu",
"psst",
"bods",
"dere",
"peal",
"baas",
"udon",
"miri",
"molt",
"coir",
"cors",
"atma",
"kook",
"nobs",
"fons",
"vats",
"trod",
"coff",
"wans",
"ordo",
"nape",
"hols",
"leva",
"weft",
"phon",
"teds",
"lase",
"goos",
"gins",
"reek",
"wets",
"kier",
"kame",
"kens",
"perp",
"pomo",
"lipa",
"weet",
"supe",
"hums",
"vacs",
"bung",
"pika",
"rapt",
"gobo",
"otic",
"leet",
"pees",
"rasp",
"doze",
"lier",
"wonk",
"tats",
"raku",
"shul",
"dint",
"puls",
"fice",
"ells",
"gape",
"fems",
"opes",
"hwan",
"blam",
"dopa",
"gane",
"sone",
"wows",
"dure",
"tali",
"vasa",
"sers",
"moly",
"bole",
"craw",
"tuft",
"toft",
"kudo",
"tams",
"dele",
"flam",
"nibs",
"ails",
"gorp",
"buna",
"soba",
"cued",
"slob",
"jefe",
"ansa",
"glum",
"sabe",
"kayo",
"doge",
"hems",
"kaon",
"egal",
"serf",
"boxy",
"cigs",
"luma",
"woks",
"pois",
"hued",
"tela",
"hoar",
"jiao",
"blat",
"noma",
"jamb",
"rads",
"whys",
"fens",
"cads",
"lave",
"abed",
"chay",
"cees",
"shat",
"dows",
"girt",
"forb",
"ebon",
"fash",
"ruts",
"ulan",
"arvo",
"prow",
"asci",
"suss",
"scad",
"dace",
"wags",
"maim",
"sati",
"spaz",
"fugu",
"fava",
"teel",
"kudu",
"aper",
"tush",
"obis",
"espy",
"rums",
"jeon",
"deke",
"tipi",
"curs",
"zits",
"chon",
"bint",
"caca",
"karn",
"gunk",
"perv",
"vies",
"roti",
"nisi",
"tapa",
"lope",
"errs",
"roup",
"azan",
"faze",
"ajar",
"toph",
"fobs",
"irks",
"taus",
"nosh",
"diol",
"boor",
"bigs",
"byes",
"alts",
"rias",
"kora",
"gids",
"agha",
"ires",
"lout",
"tref",
"wark",
"gude",
"pixy",
"sall",
"sika",
"mete",
"lief",
"wads",
"swop",
"togs",
"flic",
"pams",
"tels",
"anis",
"jibe",
"chis",
"eger",
"goby",
"coni",
"haws",
"weel",
"gobs",
"raze",
"hasp",
"wain",
"ayah",
"lins",
"paik",
"durr",
"lari",
"biog",
"pyre",
"rais",
"illy",
"fado",
"apse",
"alef",
"cods",
"souk",
"dors",
"merk",
"tope",
"pean",
"lorn",
"nala",
"nock",
"tows",
"dewy",
"agin",
"weal",
"koan",
"flir",
"puce",
"peon",
"epee",
"stet",
"tods",
"tars",
"amyl",
"wuss",
"bort",
"pili",
"teed",
"leys",
"mabe",
"yobs",
"dibs",
"delt",
"emus",
"tiro",
"kaka",
"yurt",
"caws",
"kolo",
"lade",
"bubu",
"wend",
"mors",
"rhus",
"daps",
"burd",
"wiry",
"lory",
"birk",
"snog",
"tuts",
"exes",
"olio",
"tule",
"glia",
"putz",
"kune",
"sunn",
"dray",
"wiss",
"haft",
"gits",
"pone",
"hade",
"iglu",
"fogs",
"sook",
"fano",
"oleo",
"mumu",
"migs",
"paca",
"kaki",
"haku",
"oles",
"hame",
"kist",
"daws",
"axil",
"mosk",
"naan",
"yoni",
"luvs",
"weka",
"gook",
"loth",
"trow",
"tyee",
"eave",
"sibs",
"blab",
"gyre",
"stoa",
"kuru",
"grat",
"frit",
"fane",
"anes",
"achy",
"arum",
"pase",
"abri",
"cays",
"dyad",
"sags",
"lath",
"woos",
"mure",
"fuze",
"gild",
"amia",
"titi",
"kaas",
"wino",
"bine",
"wyle",
"gadi",
"lank",
"olla",
"cere",
"agon",
"apos",
"skat",
"aced",
"whir",
"moxa",
"juba",
"merl",
"burs",
"plod",
"aahs",
"aals",
"abas",
"abos",
"abye",
"abys",
"adit",
"ados",
"adze",
"aery",
"agas",
"agio",
"agly",
"agma",
"agog",
"ague",
"ahed",
"ahis",
"ains",
"airn",
"airt",
"aits",
"ajee",
"akee",
"alae",
"alar",
"albs",
"alee",
"alif",
"alit",
"alky",
"alme",
"alow",
"amah",
"amas",
"ambo",
"amus",
"anga",
"anoa",
"aped",
"apod",
"arak",
"arbs",
"arfs",
"aril",
"arks",
"asea",
"ashy",
"atap",
"auks",
"avos",
"avow",
"awee",
"awes",
"awls",
"awns",
"awny",
"axal",
"ayin",
"azon",
"bads",
"baff",
"bals",
"bams",
"baps",
"bawd",
"bawl",
"bedu",
"bels",
"bema",
"beys",
"bhut",
"bilk",
"bima",
"birl",
"bise",
"bisk",
"bitt",
"bize",
"blae",
"blaw",
"bleb",
"blet",
"blin",
"blub",
"boff",
"bogy",
"bops",
"bosk",
"bota",
"boyo",
"braw",
"broo",
"brrr",
"brux",
"bubo",
"bubs",
"buhr",
"bumf",
"bura",
"burb",
"busk",
"byre",
"byrl",
"cadi",
"caff",
"cagy",
"caid",
"caky",
"calk",
"calo",
"calx",
"caph",
"cark",
"caul",
"cavy",
"ceca",
"cepe",
"ceps",
"cero",
"cete",
"chaw",
"chid",
"cion",
"cire",
"cist",
"clag",
"clew",
"clod",
"clon",
"clop",
"cloy",
"cobs",
"coft",
"coif",
"coky",
"coly",
"conk",
"cony",
"coof",
"corf",
"corm",
"coss",
"cowy",
"coxa",
"coys",
"crus",
"cuds",
"cuif",
"cuke",
"culm",
"curf",
"curn",
"cusk",
"cwms",
"cyma",
"cyme",
"daff",
"dago",
"dags",
"dahs",
"daks",
"dals",
"darb",
"daub",
"daut",
"dawk",
"dawt",
"deil",
"delf",
"deme",
"demy",
"dews",
"dexy",
"deys",
"dhak",
"dhal",
"dhow",
"didy",
"diel",
"difs",
"dins",
"dipt",
"dirl",
"dite",
"dits",
"ditz",
"djin",
"doat",
"doby",
"doff",
"dogy",
"dols",
"dolt",
"dopy",
"dorp",
"dote",
"doum",
"doux",
"doxy",
"dozy",
"drat",
"dreg",
"drek",
"drib",
"drub",
"drys",
"duad",
"duci",
"dugs",
"duit",
"dunt",
"dups",
"durn",
"eath",
"ebbs",
"eche",
"ecus",
"eddo",
"edhs",
"eely",
"eery",
"effs",
"efts",
"egad",
"eggy",
"egis",
"eked",
"ekes",
"elds",
"elhi",
"elmy",
"emes",
"emeu",
"emic",
"emyd",
"engs",
"enol",
"enow",
"enuf",
"epha",
"ergs",
"erne",
"erns",
"eses",
"esne",
"etas",
"eths",
"etic",
"etui",
"ewer",
"exed",
"eyas",
"eyen",
"eyer",
"eyne",
"eyra",
"eyry",
"falx",
"fard",
"farl",
"faun",
"fays",
"feal",
"feck",
"feeb",
"fehs",
"feme",
"feod",
"fets",
"feus",
"fiar",
"fibs",
"fids",
"fief",
"firn",
"fisc",
"fixt",
"flab",
"fley",
"floc",
"floe",
"flub",
"flus",
"fogy",
"fohn",
"foin",
"fops",
"foys",
"fozy",
"frae",
"frap",
"frig",
"friz",
"froe",
"frow",
"frug",
"fubs",
"fuci",
"fuds",
"fugs",
"fumy",
"funs",
"futz",
"fyce",
"fyke",
"gabs",
"gads",
"gaed",
"gaen",
"gaes",
"gamb",
"gamp",
"gamy",
"gapy",
"gars",
"gaud",
"gaum",
"gaun",
"gaur",
"gawp",
"geck",
"geds",
"geed",
"gelt",
"genu",
"geta",
"geum",
"ghat",
"ghis",
"gibe",
"gibs",
"gied",
"gien",
"gink",
"gips",
"gird",
"girn",
"gled",
"gleg",
"gley",
"glim",
"glom",
"glop",
"gnar",
"gnaw",
"goas",
"goop",
"gorm",
"gowd",
"gowk",
"goys",
"grig",
"grot",
"grue",
"grum",
"guck",
"guff",
"guls",
"guvs",
"gybe",
"gyps",
"gyri",
"gyve",
"haaf",
"habu",
"hadj",
"haed",
"haem",
"haen",
"haes",
"haet",
"hags",
"hahs",
"haik",
"halm",
"hant",
"harl",
"hebe",
"hehs",
"hent",
"herl",
"hern",
"hest",
"heth",
"hets",
"hews",
"hied",
"hies",
"hila",
"hili",
"hims",
"hins",
"hisn",
"hods",
"hoed",
"hoer",
"holk",
"holp",
"homy",
"howf",
"howk",
"hoys",
"huic",
"hunh",
"hyla",
"hyps",
"hyte",
"iamb",
"ichs",
"idyl",
"iggs",
"ikat",
"ilea",
"ilka",
"ilks",
"imid",
"immy",
"impi",
"inby",
"inia",
"inly",
"inro",
"ired",
"irid",
"isba",
"iwis",
"ixia",
"izar",
"jagg",
"jape",
"jarl",
"jato",
"jauk",
"jaup",
"jeed",
"jeer",
"jees",
"jehu",
"jete",
"jibb",
"jibs",
"jiff",
"jilt",
"jimp",
"jink",
"jins",
"jism",
"jivy",
"jogs",
"joky",
"jole",
"jouk",
"jowl",
"jows",
"jube",
"juco",
"juga",
"juku",
"jupe",
"juts",
"kabs",
"kadi",
"kaes",
"kafs",
"kagu",
"kaif",
"kail",
"kapa",
"kaph",
"kbar",
"keas",
"keef",
"keek",
"keet",
"kefs",
"kelt",
"kepi",
"keps",
"kerf",
"khaf",
"khat",
"khet",
"khis",
"kibe",
"kief",
"kifs",
"kike",
"kins",
"kips",
"kirn",
"kirs",
"kith",
"knap",
"knar",
"knop",
"knur",
"koas",
"kobo",
"kobs",
"koel",
"kois",
"konk",
"koph",
"kops",
"kues",
"kufi",
"kvas",
"kyak",
"kyar",
"kyes",
"kyte",
"laic",
"laky",
"lall",
"lams",
"lati",
"latu",
"lavs",
"laze",
"leke",
"leks",
"leku",
"lept",
"leud",
"levo",
"lilt",
"limn",
"limy",
"liny",
"lipe",
"liri",
"lits",
"litu",
"lobs",
"logy",
"loid",
"loll",
"loof",
"lops",
"lota",
"lour",
"lown",
"lude",
"lues",
"lums",
"lunk",
"luny",
"lwei",
"lych",
"lyes",
"lyse",
"malm",
"maut",
"mawn",
"maws",
"mazy",
"meed",
"mels",
"meou",
"mewl",
"meze",
"mhos",
"miff",
"migg",
"mirk",
"mirs",
"miry",
"mity",
"mixt",
"moas",
"mogs",
"moil",
"moke",
"mols",
"mome",
"momi",
"mool",
"mope",
"mopy",
"moue",
"mown",
"mows",
"mozo",
"mugg",
"mump",
"muns",
"murk",
"murr",
"muts",
"mycs",
"myna",
"nabe",
"naff",
"naif",
"nans",
"naoi",
"naos",
"nard",
"nark",
"neap",
"nebs",
"neep",
"negs",
"neif",
"neuk",
"neum",
"nevi",
"nide",
"nidi",
"nill",
"nipa",
"nixe",
"nixy",
"nodi",
"nogg",
"nogs",
"noil",
"nows",
"nowt",
"nubs",
"nurd",
"nurl",
"oafs",
"oaky",
"oast",
"obas",
"obes",
"obia",
"obol",
"ocas",
"odah",
"odas",
"odea",
"odic",
"odyl",
"ofay",
"ogam",
"ogee",
"ohed",
"ohia",
"okas",
"okeh",
"okes",
"oldy",
"olea",
"onos",
"oohs",
"oots",
"oozy",
"opah",
"orad",
"orby",
"orle",
"orra",
"orts",
"orzo",
"osar",
"ossa",
"ouds",
"ouph",
"ouzo",
"owse",
"oxes",
"oxid",
"oxim",
"oyer",
"oyes",
"pacy",
"palp",
"paly",
"paps",
"pash",
"paty",
"pawl",
"peag",
"pech",
"peen",
"pehs",
"pein",
"peke",
"pelf",
"pepo",
"pfft",
"pfui",
"phis",
"phiz",
"phut",
"pial",
"pian",
"pice",
"piki",
"pily",
"piny",
"pipy",
"pirn",
"pish",
"pleb",
"plew",
"plie",
"pock",
"pogy",
"poky",
"pome",
"pood",
"poos",
"pouf",
"poxy",
"prao",
"prau",
"pree",
"prex",
"prig",
"proa",
"psis",
"ptui",
"puds",
"pule",
"puli",
"pung",
"pupu",
"purs",
"pyas",
"pyes",
"pyic",
"pyin",
"qadi",
"qaid",
"qats",
"qoph",
"quag",
"quey",
"ragg",
"ragi",
"raki",
"rale",
"rase",
"rato",
"reck",
"reft",
"rems",
"repp",
"resh",
"rets",
"rhos",
"rids",
"rifs",
"rile",
"rill",
"rimy",
"rins",
"rocs",
"roes",
"roil",
"ropy",
"rotl",
"roue",
"rued",
"ruer",
"rues",
"ruga",
"ruly",
"ryas",
"ryes",
"ryke",
"rynd",
"ryot",
"sabs",
"sadi",
"sagy",
"salp",
"sals",
"sard",
"scag",
"scow",
"scry",
"scup",
"scut",
"seel",
"sego",
"segs",
"seif",
"sels",
"seme",
"sene",
"sere",
"sews",
"sext",
"shmo",
"shog",
"shwa",
"sial",
"sibb",
"sice",
"sidh",
"sike",
"sild",
"sipe",
"sizy",
"skag",
"skas",
"skee",
"skeg",
"skep",
"skua",
"sloe",
"slub",
"slue",
"smew",
"snaw",
"sned",
"snib",
"snit",
"snye",
"soja",
"soke",
"soms",
"sorb",
"sord",
"sori",
"sorn",
"soth",
"sots",
"soys",
"spae",
"spic",
"spik",
"spiv",
"spue",
"sris",
"staw",
"stey",
"stob",
"stot",
"stum",
"stye",
"suba",
"sudd",
"suer",
"sugh",
"suks",
"sulk",
"sups",
"suqs",
"surd",
"swob",
"swum",
"sybo",
"syce",
"syke",
"syli",
"syph",
"tace",
"tads",
"tael",
"tahr",
"tamp",
"tavs",
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QImage, QPainter, QColor, QPen, QPalette, QFont
from PyQt5.QtWidgets import QLabel, QWidget, QApplication
from ..infer.inference_wrapper_single_line import InferenceWrapperSingleLine
from ..infer.inference_wrapper_two_lines import InferenceWrapperTwoLines
from ..visualizer.visualizer_image_processing import process_images
# Program which demonstrates the effectiveness or ineffectiveness of a lane detection model
# by displaying an image and highlighting the areas in which it predicts there are road lines
# Created by brendon-ai, September 2017
# Font used in varying sizes throughout the user interface
UI_FONT_NAME = 'Source Sans Pro'
# Scaling factor for the input image, which defines the size of the window
SCALING_FACTOR = 3
# The distance from the center to the edge of the green squares placed along the road line
MARKER_RADIUS = 2
# The opacity of the heat map displayed on the two bottom images
HEAT_MAP_OPACITY = 0.7
# Height of the line graph section of the UI
LINE_GRAPH_HEIGHT = 300
# Height of the border section above and below the guide lines on the line graph
LINE_GRAPH_BORDER_HEIGHT = 20
# Size (width and height) and font size of the labels on the horizontal edge of the line graph
LINE_GRAPH_LABEL_SIZE = 40
LINE_GRAPH_LABELS_FONT_SIZE = 12
# Height and font size of the legend labels below the line graph
LINE_GRAPH_LEGEND_HEIGHT = 30
LINE_GRAPH_LEGEND_FONT_SIZE = 12
# The absolute value of the unscaled number at which the
# positive and negative line graph guide lines are drawn
LINE_GRAPH_GUIDE_LINE_ABSOLUTE_VALUE = 1
# The height, text contents, and font of the labels
# that identify the heat maps in the user interface
HEAT_MAP_LABELS_HEIGHT = 50
HEAT_MAP_LABELS_TEXT = ['Left line heat map', 'Right line heat map']
HEAT_MAP_LABELS_FONT_SIZE = 16
# Labels for each of the elements of an image data tuple
IMAGE_DATA_LABELS = ['File name', 'Steering angle']
# The descriptions, multipliers, and corresponding colors
# of the lines that will be drawn on the line graph
LINE_DATA = [('Steering angle', 5, 'yellow', 'fifths of motor rotations'),
('Proportional error', 0.01, 'cyan', 'hundreds of pixels'),
('Derivative error', 1, 'magenta', 'slope of line')]
# The value of the light gray filler color, from 0 to 255
LIGHT_GRAY_COLOR = 192
# Main PyQt5 QWidget class
class Visualizer(QWidget):
# List of NumPy images
image_list = None
# List of image file names, steering angles, and errors
image_data = []
# The image we are currently on
image_index = 0
# The label that displays the current image
image_box = None
# List of lists of points to be drawn on the line graph
# One line for every color in the list of colors
line_point_lists = [[] for _ in range(len(LINE_DATA))]
# Vertical center of the line graph
line_graph_center = None
# Multiplier to convert a steering angle into pixels from the vertical center of the line graph
line_graph_multiplier = None
# The right bound of the line graph
line_graph_right_bound = None
# Fonts for the line graph and heat map labels
heat_map_labels_font = None
line_graph_labels_font = None
line_graph_legend_font = None
# Call various initialization functions
def __init__(self):
# Call the superclass initializer
super(Visualizer, self).__init__()
# Check that the number of command line arguments is correct
if len(sys.argv) != 5:
print(
'Usage:',
sys.argv[0],
'<in simulation (y or n)>',
'<image folder>',
'<right line trained model>',
'<left line trained model (if not in simulation)>',
'<stop sign detection model (if in simulation)'
)
sys.exit()
# Check if we are using images from the simulation or not
in_simulation = sys.argv[1].lower() == 'y'
# Create one inference wrapper if we are in the simulation, or two otherwise
inference_and_steering_wrapper = \
InferenceWrapperSingleLine(sys.argv[3], -45, 1.15, True) if in_simulation \
else InferenceWrapperTwoLines([sys.argv[4], sys.argv[3]])
# Pass on the path of the stop sign detection model if we are in the simulation
stop_sign_model_path = sys.argv[4] if in_simulation else None
# Load and perform inference on the images
image_folder = os.path.expanduser(sys.argv[2])
self.image_list, self.image_data = process_images(
image_folder=image_folder,
inference_and_steering_wrapper=inference_and_steering_wrapper,
marker_radius=MARKER_RADIUS,
heat_map_opacity=HEAT_MAP_OPACITY,
light_gray_color=LIGHT_GRAY_COLOR,
stop_sign_model_path=stop_sign_model_path
)
# Set the global image height and width variables
image_height, image_width = self.image_list[0].shape[:2]
# Calculate the height of one vertical half of the line graph ignoring the border
half_graph_height_minus_border = (
LINE_GRAPH_HEIGHT / 2) - LINE_GRAPH_BORDER_HEIGHT
# Use that, divided by the predefined guide line steering angle, to calculate the line graph multiplier
self.line_graph_multiplier = int(half_graph_height_minus_border /
LINE_GRAPH_GUIDE_LINE_ABSOLUTE_VALUE)
# Generate the fonts for the line graph and heat map
self.heat_map_labels_font, self.line_graph_labels_font, self.line_graph_legend_font = [
QFont(UI_FONT_NAME, font_size)
for font_size in [
HEAT_MAP_LABELS_FONT_SIZE, LINE_GRAPH_LABELS_FONT_SIZE,
LINE_GRAPH_LEGEND_FONT_SIZE
]
]
# Set up the UI
self.init_ui(image_height, image_width)
# Initialize the user interface
def init_ui(self, image_height, image_width):
# The size of the image box is that of the original image times SCALING_FACTOR
image_box_width = image_width * SCALING_FACTOR
image_box_height = image_height * SCALING_FACTOR
# Calculate the center of the line graph using the height of the image
# plus the height of the line graph label as an upper limit of the graph
self.line_graph_center = image_box_height + \
HEAT_MAP_LABELS_HEIGHT + (LINE_GRAPH_HEIGHT // 2)
# To calculate the window size, use the image box size plus the predefined height that will
# be occupied by the line graph, corresponding legend, and the label below the heat maps
window_width = image_box_width
window_height = image_box_height + HEAT_MAP_LABELS_HEIGHT + \
LINE_GRAPH_HEIGHT + LINE_GRAPH_LEGEND_HEIGHT
# Set the size, position, title, and color scheme of the window
self.setFixedSize(window_width, window_height)
self.move(0, 0)
self.setWindowTitle('Autonomous Driving System Visualizer')
# Calculate the right bound of the line graph, by offsetting it a certain amount from the right edge
self.line_graph_right_bound = self.width() - LINE_GRAPH_LABEL_SIZE
# Use white text on a dark gray background
palette = QPalette()
palette.setColor(QPalette.Foreground, Qt.black)
palette.setColor(QPalette.Background, Qt.lightGray)
self.setPalette(palette)
# Initialize the image box that holds the video frames
self.image_box = QLabel(self)
self.image_box.setAlignment(Qt.AlignCenter)
self.image_box.setFixedSize(image_box_width, image_box_height)
self.image_box.move(0, 0)
# Create labels below the image box that identify the two heat maps
self.create_heat_map_labels(image_box_width, image_box_height)
# Create numerical labels next to the line graph
self.create_line_graph_labels()
# Create a legend below the line graph
self.create_line_graph_legend(image_box_width, image_box_height)
# Make the window exist
self.show()
# Display the initial image
self.update_display(1)
# Create labels below the heat maps in the image box that identify their function
def create_heat_map_labels(self, image_box_width, image_box_height):
# Create two labels in a loop
for i in range(2):
# The width of the label will be half of the width of the main image box, rounded to an integer
label_width = int(round(image_box_width / 2))
# Get the X position by multiplying the width by the index
x_position = label_width * i
# The Y position is equal to the bottom of the main image box,
# which is equal to its height since its Y position is zero
y_position = image_box_height
# Create and format the label
heat_map_label = QLabel(self)
heat_map_label.setFont(self.heat_map_labels_font)
heat_map_label.setAlignment(Qt.AlignCenter)
heat_map_label.move(x_position, y_position)
heat_map_label.setFixedSize(label_width, HEAT_MAP_LABELS_HEIGHT)
heat_map_label.setText(HEAT_MAP_LABELS_TEXT[i])
# Create labels on the line graph for the steering angles at which guide lines are drawn
def create_line_graph_labels(self):
# Iterate over the three relevant steering angles
for steering_angle in [-LINE_GRAPH_GUIDE_LINE_ABSOLUTE_VALUE, 0, LINE_GRAPH_GUIDE_LINE_ABSOLUTE_VALUE]:
# Calculate the Y position at which to center the label based on the steering angle
y_position_center = self.get_line_graph_y_position(steering_angle)
# Offset it by half of the label size, because the coordinates correspond to the top left corner
y_position_offset = y_position_center - \
(LINE_GRAPH_LABEL_SIZE // 2)
# Create and format the label
line_graph_label = QLabel(self)
line_graph_label.setFont(self.line_graph_labels_font)
line_graph_label.move(self.line_graph_right_bound,
y_position_offset)
line_graph_label.setAlignment(Qt.AlignCenter)
line_graph_label.setFixedSize(LINE_GRAPH_LABEL_SIZE,
LINE_GRAPH_LABEL_SIZE)
line_graph_label.setText(str(steering_angle))
# Create a legend below the line graph describing the various lines
def create_line_graph_legend(self, image_box_width, image_box_height):
# Create and configure a label extending to the left, right, and bottom edges of the screen,
# and with its top edge aligned with the bottom of the bar graph, including the border area
line_graph_legend = QLabel(self)
line_graph_legend.setAlignment(Qt.AlignCenter)
line_graph_legend.setFont(self.line_graph_legend_font)
line_graph_legend.setFixedSize(image_box_width,
LINE_GRAPH_LEGEND_HEIGHT)
legend_top_edge = image_box_height + HEAT_MAP_LABELS_HEIGHT + LINE_GRAPH_HEIGHT
line_graph_legend.move(0, legend_top_edge)
# Create a description string that describes all of the lines
line_descriptions = ''
# Iterate over the configuration data for each of the lines
for line_name, _, color_name, unit_description in LINE_DATA:
# Format a string that describes the line, using inline color notation
line_description = '<font color=\'' + color_name + \
'\'>■</font>: ' + line_name | |
<reponame>benkiel/batchRoboFontExtension
import os
import tempfile
import shutil
from AppKit import *
import string
import re
import time
from vanilla import *
from defconAppKit.windows.baseWindow import BaseWindowController
from fontTools.ttLib import TTFont
from fontTools.pens.boundsPen import BoundsPen
from compositor import Font as CompositorFont
from lib.settings import shouldAddPointsInSplineConversionLibKey
from lib.scripting.codeEditor import CodeEditor
from mojo.roboFont import RFont
from mojo.extensions import getExtensionDefault, setExtensionDefault
from mojo.compile import autohint as OTFAutohint
from .woffBuilder import WOFFBuilder
from .woff2Builder import WOFF2Builder
from .eotBuilder import EOTBuilder
from .svgBuilder import SVGBuilder
from .autohint import TTFAutohint, defaultOptions, TTFAutoHintGroup
from .htmlWriter import HTMLWriter, CSSWriter
from batchTools import Report, settingsIdentifier, buildTree
hasUfo2svg = True
try:
import ufo2svg
except:
hasUfo2svg = False
def getFontBounds(font):
gs = font.getGlyphSet()
pen = BoundsPen(gs)
for g in gs.keys():
gs[g].draw(pen)
return pen.bounds
def fixMetrics(font):
minx, miny, maxx, maxy = getFontBounds(font)
font["OS/2"].usWinDescent = abs(miny)
font["OS/2"].usWinAscent = abs(maxy)
font["hhea"].descent = miny
font["hhea"].ascent = maxy
defaultFontInfoAttributes = ["familyName", "styleName", "descender", "xHeight", "ascender", "capHeight", "unitsPerEm"]
def convertToTTF(otfPath, dest, report):
temp = tempfile.mkstemp(suffix=".ttf")[1]
tempDest = tempfile.mkstemp(suffix=".ttf")[1]
font = RFont(otfPath, document=False, showInterface=False)
font.lib[shouldAddPointsInSplineConversionLibKey] = 1
font.kerning.clear()
for attr in font.info.asDict().keys():
if attr not in defaultFontInfoAttributes:
setattr(font.info, attr, None)
result = font.generate(path=temp, format="ttf", decompose=False, checkOutlines=False, autohint=False, releaseMode=True, glyphOrder=font.glyphOrder)
if not font.hasInterface():
font.close()
report.write(result)
sourceFont = TTFont(temp)
sourceFontWithTables = TTFont(otfPath)
for table in ["loca", "OS/2", "cmap", "name", "GSUB", "GPOS", "GDEF", "kern"]:
if table in sourceFontWithTables:
sourceFont[table] = sourceFontWithTables[table]
fixMetrics(sourceFont)
sourceFont.save(tempDest)
sourceFont.close()
del sourceFont
sourceFontWithTables.close()
del sourceFontWithTables
autohintOptions = getExtensionDefault(settingsIdentifier, defaultOptions)
result = TTFAutohint(tempDest, dest, autohintOptions)
report.writeItems(result)
os.remove(temp)
os.remove(tempDest)
def convertToOTF(ttfPath, dest, report):
temp = tempfile.mkstemp(suffix=".otf")[1]
font = RFont(ttfPath, document=False, showInterface=False)
font.kerning.clear()
for attr in font.info.asDict().keys():
if attr not in defaultFontInfoAttributes:
setattr(font.info, attr, None)
result = font.generate(path=temp, format="otf", decompose=False, checkOutlines=False, autohint=False, releaseMode=True, glyphOrder=font.glyphOrder)
if not font.hasInterface():
font.close()
report.write(result)
sourceFont = TTFont(temp)
sourceFontWithTables = TTFont(ttfPath)
for table in ["loca", "OS/2", "cmap", "name", "GSUB", "GPOS", "GDEF", "kern"]:
if table in sourceFontWithTables:
sourceFont[table] = sourceFontWithTables[table]
sourceFont.save(dest)
result = OTFAutohint(dest)
report.writeItems(result)
os.remove(temp)
def generateTTF(ufoPath, dest, report):
tempDest = tempfile.mkstemp(suffix=".ttf")[1]
font = RFont(ufoPath, document=False, showInterface=False)
font.lib[shouldAddPointsInSplineConversionLibKey] = 1
result = font.generate(path=tempDest, format="ttf", decompose=False, checkOutlines=True, autohint=False, releaseMode=True, glyphOrder=font.glyphOrder)
if not font.hasInterface():
font.close()
report.write(result)
autohintOptions = getExtensionDefault(settingsIdentifier, defaultOptions)
result = TTFAutohint(tempDest, dest, autohintOptions)
report.writeItems(result)
os.remove(tempDest)
def generateOTF(ufoPath, dest, report):
font = RFont(ufoPath, document=False, showInterface=False)
result = font.generate(path=dest, format="otf", decompose=False, checkOutlines=True, autohint=False, releaseMode=True, glyphOrder=font.glyphOrder)
if not font.hasInterface():
font.close()
report.write(result)
result = OTFAutohint(dest)
report.writeItems(result)
def convertToWoff(ttfPath, dest):
WOFFBuilder(ttfPath, dest)
def convertToWoff2(ttfPath, dest):
WOFF2Builder(ttfPath, dest)
def convertToEot(ttfPath, dest):
EOTBuilder(ttfPath, dest)
def convertToSVG(ttfPath, dest):
SVGBuilder(ttfPath, dest)
htmlPreviewDefault = string.ascii_letters + string.digits
class TTHAutoHintSettings(BaseWindowController):
identifier = "%s.%s" % (settingsIdentifier, "autohintSettings")
def __init__(self, parentWindow):
data = getExtensionDefault(self.identifier, dict())
self.w = Sheet((470, 580), parentWindow=parentWindow)
self.w.tabs = Tabs((10, 10, -10, -40), ["TTF AutoHint", "HTML Preview"])
self.w.tabs[0].settings = self.settings = TTFAutoHintGroup((0, 0, -0, -0))
self.settings.set(data)
y = 10
self.w.tabs[1].htmlText = TextBox((10, y, 100, 22), "HTML preview:")
y += 30
self.w.tabs[1].html = self.html = CodeEditor((10, y, -10, 250), getExtensionDefault("%s.htmlPreview" % settingsIdentifier, htmlPreviewDefault), lexer="html")
self.html.showLineNumbers(False)
y += 260
self.w.tabs[1].globalCssText = TextBox((10, y, 100, 22), "CSS Style:")
y += 30
self.w.tabs[1].globalCss = self.globalCss = CodeEditor((10, y, -10, -10), getExtensionDefault("%s.globalCSSPreview" % settingsIdentifier, ""), lexer="css")
self.globalCss.showLineNumbers(False)
self.w.saveButton = Button((-100, -30, -10, 20), "Save settings", callback=self.saveCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.saveButton)
self.w.closeButton = Button((-190, -30, -110, 20), "Cancel", callback=self.closeCallback, sizeStyle="small")
self.w.closeButton.bind(".", ["command"])
self.w.closeButton.bind(chr(27), [])
self.w.resetButton = Button((-280, -30, -200, 20), "Reset", callback=self.resetCallback, sizeStyle="small")
self.w.open()
def resetCallback(self, sender):
if self.w.tabs.get() == 0:
self.settings.set(None)
else:
setExtensionDefault("%s.htmlPreview" % settingsIdentifier, htmlPreviewDefault)
self.html.set(htmlPreviewDefault)
setExtensionDefault("%s.globalCSSPreview" % settingsIdentifier, "")
self.globalCss.set("")
def saveCallback(self, sender):
data = self.settings.get()
setExtensionDefault(self.identifier, data)
setExtensionDefault("%s.htmlPreview" % settingsIdentifier, self.html.get())
setExtensionDefault("%s.globalCSSPreview" % settingsIdentifier, self.globalCss.get())
self.closeCallback(sender)
def closeCallback(self, sender):
self.w.close()
_percentageRe = re.compile(r"%(?!\((familyName|styleName)\)s)")
class BatchRadioGroup(RadioGroup):
def __init__(self, posSize, titles, value=0, isVertical=True, callback=None, sizeStyle='regular'):
super(BatchRadioGroup, self).__init__(posSize, titles, isVertical=isVertical, callback=callback, sizeStyle=sizeStyle)
self.set(value)
WOFF_OTF_FORMAT = 0
WOFF_TTF_FORMAT = 1
class WebFormats(Group):
webSettings = ["Save OTF", "Save TTF", "Save Woff", "Save Woff2", "Save EOT", "Save SVG"]
def __init__(self, posSize, controller):
super(WebFormats, self).__init__(posSize)
self.controller = controller
y = 10
for setting in self.webSettings:
key = setting.replace(" ", "_").lower()
checkBox = CheckBox((10, y, -10, 22), setting,
value=getExtensionDefault("%s.%s" % (settingsIdentifier, key), True),
callback=self.saveDefaults)
setattr(self, key, checkBox)
if "Woff" in setting:
formatOption = BatchRadioGroup((120, y, 85, 22),
["OTF", "TTF"],
value=getExtensionDefault("%s.format_%s" % (settingsIdentifier, key), True),
callback=self.saveDefaults,
isVertical=False,
sizeStyle="mini")
setattr(self, "%s_format" % key, formatOption)
y += 30
self.save_svg.enable(hasUfo2svg)
y += 5
self.preserveTTFhints = CheckBox((10, y, -10, 18), "Preserve TTF hints",
value=getExtensionDefault("%s.preserveTTFhints" % settingsIdentifier, False),
sizeStyle="small")
y += 30
self.generateHTML = CheckBox((10, y, -10, 18), "Generate HTML",
value=getExtensionDefault("%s.generateHTML" % settingsIdentifier, True),
sizeStyle="small")
y += 30
middle = 45
self.suffixText = TextBox((10, y + 2, middle, 22), "Suffix:", alignment="right")
self.webSuffix = EditText((middle + 10, y, 100, 22),
getExtensionDefault("%s.webSuffix" % settingsIdentifier, "_web"),
callback=self.saveDefaults)
y += 30
self.convert = Button((-100, -30, -10, 22), "Generate", callback=self.convertCallback)
self.settings = ImageButton((-130, -28, 20, 20), bordered=False, imageNamed=NSImageNameSmartBadgeTemplate, callback=self.settingsCallback)
self.height = y
def saveDefaults(self, sender):
for setting in self.webSettings:
key = setting.replace(" ", "_").lower()
value = getattr(self, key).get()
setExtensionDefault("%s.%s" % (settingsIdentifier, key), value)
if "Woff" in setting:
value = getattr(self, "%s_format" % key).get()
setExtensionDefault("%s.format_%s" % (settingsIdentifier, key), value)
for key in ["webSuffix", "preserveTTFhints", "generateHTML"]:
value = getattr(self, key).get()
setExtensionDefault("%s.%s" % (settingsIdentifier, key), value)
# convert
def _getTempTTF(self, path, report=None, preserveTTFhints=False):
if not hasattr(self, "_tempTTFPath"):
_, ext = os.path.splitext(path)
ext = ext.lower()
self._tempTTFPath = tempfile.mkstemp(suffix=".ttf")[1]
if ext == ".otf":
report.write("Source is binary a OTF file. Convert to TTF.")
report.indent()
convertToTTF(path, self._tempTTFPath, report)
report.dedent()
elif ext == ".ttf":
report.write("Source is binary a TTF file.")
shutil.copyfile(path, self._tempTTFPath)
if not preserveTTFhints:
report.write("Auto hint the existing TTF file.")
report.indent()
tempDest = tempfile.mkstemp(suffix=".ttf")[1]
autohintOptions = getExtensionDefault(settingsIdentifier, defaultOptions)
result = TTFAutohint(self._tempTTFPath, tempDest, autohintOptions)
report.writeItems(result)
os.remove(self._tempTTFPath)
self._tempTTFPath = tempDest
report.dedent()
else:
if ext == ".ufo":
report.write("Source is a UFO file. Generate TTF.")
else:
report.write("Source is a %s file. Import the file. Generate TTF." % (ext[1:]))
report.indent()
generateTTF(path, self._tempTTFPath, report)
report.dedent()
return self._tempTTFPath
def _getTempOTF(self, path, report=None, preserveTTFhints=False):
if not hasattr(self, "_tempOTFPath"):
_, ext = os.path.splitext(path)
ext = ext.lower()
self._tempOTFPath = tempfile.mkstemp(suffix=".otf")[1]
if ext == ".otf":
report.write("Source is binary a OTF file.")
shutil.copyfile(path, self._tempOTFPath)
if not preserveTTFhints:
report.write("Auto hint the existing OTF file.")
report.indent()
result = OTFAutohint(self._tempOTFPath)
report.writeItems(result)
report.dedent()
elif ext == ".ttf":
report.write("Source is binary a TTF file. Convert to OTF.")
report.indent()
convertToOTF(path, self._tempOTFPath, report)
report.dedent()
else:
if ext == ".ufo":
report.write("Source is a UFO file. Generate OTF.")
else:
report.write("Source is a %s file. Import the file. Generate OTF." % (ext[1:]))
report.indent()
generateOTF(path, self._tempOTFPath, report)
report.dedent()
return self._tempOTFPath
def _removeTempFiles(self):
if hasattr(self, "_tempTTFPath"):
if os.path.exists(self._tempTTFPath):
os.remove(self._tempTTFPath)
del self._tempTTFPath
if hasattr(self, "_tempOTFPath"):
if os.path.exists(self._tempOTFPath):
os.remove(self._tempOTFPath)
del self._tempOTFPath
def _convertPath(self, path, destDir, saveOTF=True, saveTTF=True, saveWOFF=True, saveWOFFFormat=WOFF_TTF_FORMAT, saveWOFF2=True, saveWOFF2Format=WOFF_TTF_FORMAT, saveEOT=True, saveSVG=False, suffix="", report=None, preserveTTFhints=False):
fileName = os.path.basename(path)
fileName, ext = os.path.splitext(fileName)
ext = ext.lower()
if ext in [".ttf", ".otf"]:
font = CompositorFont(path)
else:
font = RFont(path, document=False, showInterface=False)
familyName = font.info.familyName
styleName = font.info.styleName
if not self.controller.keepFileNames():
fileName = "%s-%s" % (familyName, styleName)
fileName += suffix
fileName = fileName.replace(" ", "_")
if self.controller.exportInFolders():
fontDir = os.path.join(destDir, familyName.replace(" ", ""), styleName.replace(" ", ""))
else:
fontDir = destDir
otfPath = os.path.join(fontDir, fileName + ".otf")
ttfPath = os.path.join(fontDir, fileName + ".ttf")
woffPath = os.path.join(fontDir, fileName + ".woff")
woff2Path = os.path.join(fontDir, fileName + ".woff2")
eotPath = os.path.join(fontDir, fileName + ".eot")
svgPath = os.path.join(fontDir, fileName + ".svg")
# save otf
if saveOTF:
report.writeTitle("Build OTF", "'")
report.indent()
report.write("path: %s" % otfPath)
buildTree(fontDir)
temp = self._getTempOTF(path, report=report, preserveTTFhints=preserveTTFhints)
shutil.copyfile(temp, otfPath)
report.dedent()
report.newLine()
# save ttf
if saveTTF:
report.writeTitle("Build TTF", "'")
report.indent()
report.write("path: %s" % ttfPath)
buildTree(fontDir)
temp = self._getTempTTF(path, report=report, preserveTTFhints=preserveTTFhints)
shutil.copyfile(temp, ttfPath)
report.dedent()
report.newLine()
# convert to woff
if saveWOFF:
if saveWOFFFormat == WOFF_TTF_FORMAT:
func = self._getTempTTF
reportFormat = "TTF"
elif saveWOFFFormat == WOFF_OTF_FORMAT:
func = self._getTempOTF
reportFormat = "OTF"
report.writeTitle("Build WOFF (%s)" % reportFormat, "'")
report.indent()
report.write("path: %s" % woffPath)
buildTree(fontDir)
temp = func(path, report=report, preserveTTFhints=preserveTTFhints)
convertToWoff(temp, woffPath)
report.dedent()
report.newLine()
# convert to woff2
if saveWOFF2:
if saveWOFFFormat == WOFF_TTF_FORMAT:
func = self._getTempTTF
reportFormat = "TTF"
elif saveWOFFFormat == WOFF_OTF_FORMAT:
func = self._getTempOTF
reportFormat = "OTF"
report.writeTitle("Build WOFF2 (%s)" % reportFormat, "'")
report.indent()
report.write("path: %s" % woff2Path)
buildTree(fontDir)
temp = func(path, report=report, preserveTTFhints=preserveTTFhints)
convertToWoff2(temp, woff2Path)
report.dedent()
report.newLine()
# convert to eot
if saveEOT:
report.writeTitle("Build EOT", "'")
report.indent()
report.write("path: %s" % eotPath)
buildTree(fontDir)
temp = self._getTempTTF(path, report=report, preserveTTFhints=preserveTTFhints)
convertToEot(temp, eotPath)
report.dedent()
report.newLine()
# convert to svg
if saveSVG:
report.writeTitle("Build SVG", "'")
report.indent()
report.write("path: | |
info was a hack anyway :-(
args = 0
for arg in arguments:
params.append(
code.Parameter(
"arg_"+str(args),
code.ObjectType(arg.info.name)
)
)
args += 1
# turn matchers into list of conditions
[condition, suffix] = self.transform_matchers_into_condition(matchers, arguments)
name += suffix
# construct body
body = code.WhileDo(code.NotEquals(code.SimpleVariable("iter"), Null())) \
.contains(
code.IfStatement(condition, [
code.IfStatement(code.Equals(code.SimpleVariable("prev"),
Null()),
[ code.Assign(Deref(code.SimpleVariable("list")),
code.ObjectProperty("iter", "next")) ],
[ code.Assign(code.ObjectProperty("prev", "next"),
code.ObjectProperty("iter", "next")) ]
),
code.FunctionCall("free_"+type_name,
[ code.SimpleVariable("iter") ]),
code.Inc(code.SimpleVariable("removed"))
]),
code.Assign(code.SimpleVariable("prev"),
code.SimpleVariable("iter")),
code.Assign(code.SimpleVariable("iter"),
code.ObjectProperty("iter", "next"))
)
# provide a prototype
self.stack[0].find("lists").select("def").append(
code.Prototype(name, type=code.IntegerType(), params=params)
)
# create function and return it
return (self.stack[0].find("lists").select("dec").append(
code.Function(name, type=code.IntegerType(), params=params)
.contains(code.Assign(code.VariableDecl("removed", code.IntegerType()),
code.IntegerLiteral(0)),
code.Assign(code.VariableDecl("iter", RefType(type.type)),
Deref(code.SimpleVariable("list"))),
code.Assign(code.VariableDecl("prev", RefType(type.type)),
Null()),
body,
code.Return(code.SimpleVariable("removed"))
)
), True)
def transform_matchers_into_condition(self, matchers, arguments):
suffix = ""
conditions = []
args = 0
for idx, matcher in enumerate(matchers):
if matcher == None:
arg_type = arguments[args].info.name
suffix += "_match_arg_" + str(args)
conditions.append(
code.FunctionCall("equal_"+arg_type, type=code.BooleanType(),
arguments=[
code.SimpleVariable("arg_"+str(args)),
code.ObjectProperty("iter", "elem_" + str(idx)),
]
)
)
args += 1
elif not isinstance(matcher, code.Anything) and matcher.comp.operator != "*":
suffix += "_match_" + matcher.as_label()
conditions.append( {
"<" : code.LT,
">" : code.GT,
"<=" : code.LTEQ,
">=" : code.GTEQ,
"==" : code.Equals,
"!=" : code.NotEquals
}[matcher.comp.operator](code.ObjectProperty("iter", "elem_"+str(idx)),
matcher.expression)
)
# join conditions together with AND (TODO: should have factory method)
if len(conditions) < 1:
conditions = code.BooleanLiteral(True)
elif len(conditions) < 2:
conditions = conditions.pop(0)
else:
test = code.And(conditions.pop(0), conditions.pop(0))
while len(conditions) > 0:
test = code.And(test, conditions.pop(0))
conditions = test
return (conditions, suffix)
class Generic(Platform):
def type(self, type):
return {
"ByteType" : "char",
"BooleanType" : "int",
"IntegerType" : "int",
"FloatType" : "float",
"LongType" : "long"
}[str(type)]
class Dumper(language.Dumper):
"""
Visitor for CodeCanvas-based ASTs producing actual C code.
"""
def __init__(self, platform=None):
super(Dumper, self).__init__()
if platform is None: platform = Generic()
assert isinstance(platform, Platform)
self.platform = platform
@stacked
def visit_Constant(self, constant):
return "#define " + constant.id.accept(self) + " " + constant.value.accept(self)
@stacked
def visit_Function(self, function):
return function.type.accept(self) + " " + function.name + \
"(" + (", ".join([param.accept(self) for param in function.params]) \
if len(function.params) else "void") + ") " + \
"{\n" + \
"\n".join([child.accept(self) for child in function]) + \
"\n}"
@stacked
def visit_Prototype(self, function):
return function.type.accept(self) + " " + function.name + \
"(" + (", ".join([param.accept(self) for param in function.params]) \
if len(function.params) else "void") + ");"
@stacked
def visit_Parameter(self, param):
return param.type.accept(self) + " " + param.id.accept(self)
# Statements
@stacked
def visit_Print(self, printed):
return "printf(" + printed.string.accept(self) + ");"
@stacked
def visit_Import(self, importer):
file = importer.imported
if file[-2:] == ".c": file = '"' + file + '"'
else:
if not file[0:1] == "<": file = '"' + file + '.h"'
return "#include " + file
@stacked
def visit_IfStatement(self, cond):
return "if(" + cond.expression.accept(self) + ")" + \
"{" + "\n".join([stmt.accept(self) for stmt in cond.true_clause]) + "}" + \
(("else {" + "\n".join([stmt.accept(self) \
for stmt in cond.false_clause]) + \
"}") if len(cond.false_clause) > 0 else "")
@stacked
def visit_ShiftLeft(self, exp):
return exp.var.accept(self) + " >> " + str(exp.amount);
@stacked
def visit_Assign(self, stmt):
return stmt.operand.accept(self) + " = " + stmt.expression.accept(self) + ";"
@stacked
def visit_Add(self, stmt):
return stmt.operand.accept(self) + " += " + stmt.expression.accept(self) + ";"
@stacked
def visit_Sub(self, stmt):
return stmt.operand.accept(self) + " -= " + stmt.expression.accept(self) + ";"
@stacked
def visit_Object(self, obj):
return obj.name
@stacked
def visit_Inc(self, stmt):
return stmt.operand.accept(self) + "++;"
@stacked
def visit_Dec(self, stmt):
return stmt.operand.accept(self) + "--;"
@stacked
def visit_Plus(self, stmt):
return "(" + stmt.left.accept(self) + " + " + stmt.right.accept(self) + ")"
@stacked
def visit_Minus(self, stmt):
return "(" + stmt.left.accept(self) + " - " + stmt.right.accept(self) + ")"
@stacked
def visit_Mult(self, stmt):
return "(" + stmt.left.accept(self) + " * " + stmt.right.accept(self) + ")"
@stacked
def visit_Div(self, stmt):
return "(" + stmt.left.accept(self) + " / " + stmt.right.accept(self) + ")"
# Types
@stacked
def visit_NamedType(self, type):
return self.platform.type(type.name)
@stacked
def visit_VoidType(self, type):
return "void"
@stacked
def visit_FloatType(self, type):
return self.platform.type(type)
@stacked
def visit_IntegerType(self, type):
return self.platform.type(type)
@stacked
def visit_LongType(self, type):
return self.platform.type(type)
@stacked
def visit_BooleanType(self, type):
return self.platform.type(type)
@stacked
def visit_ByteType(self, type):
return self.platform.type(type)
@stacked
def visit_ManyType(self, type):
return type.type.accept(self) + "*"
@stacked
def visit_AmountType(self, type):
return type.type.accept(self)
@stacked
def visit_ObjectType(self, type):
name = type.name
if name[-1] == "s": name = name[0:-1]
return name + "_t*"
@stacked
def visit_StructuredType(self, struct):
name = struct.name.accept(self)
if name[-1] == "s": name = name[0:-1] # strip off trailing s from types
struct_name = name + "_t"
return "typedef struct " + struct_name + " {\n" + \
"\n".join([prop.accept(self) for prop in struct]) + \
"\n} " + struct_name + ";"
@stacked
def visit_UnionType(self, struct):
return "union { " + (" ".join([prop.accept(self) for prop in struct])) + "}"
@stacked
def visit_Property(self, prop):
return prop.type.accept(self) + " " + prop.name.accept(self) + \
("" if not isinstance(prop.type, code.AmountType) else "[" + str(prop.type.size) +"]") + \
";"
# Fragments
@stacked
def visit_ByteLiteral(self, literal):
return "0x%02x" % literal.value
@stacked
def visit_IntegerLiteral(self, literal):
return str(literal.value)
@stacked
def visit_FloatLiteral(self, literal):
return str(literal.value)
@stacked
def visit_StringLiteral(self, string):
return '"' + string.data.replace("\n", '\\n') + '"'
@stacked
def visit_BooleanLiteral(self, bool):
return "TRUE" if bool.value else "FALSE"
@stacked
def visit_Identifier(self, id):
return id.name
@stacked
def visit_ListLiteral(self, literal):
if len(literal.children) > 0 :
# strategy: listliterals are passes as varargs, with the number of args as
# a first argument
return str(len(literal.children)) + ", " + \
", ".join([item.accept(self) for item in literal.children])
else:
return "NULL"
@stacked
def visit_ObjectProperty(self, prop):
return prop.obj.accept(self) + "->" + prop.prop.accept(self)
@stacked
def visit_StructProperty(self, prop):
return prop.obj.accept(self) + "." + prop.prop.accept(self)
@stacked
def visit_Comment(self, comment):
if "\n" in comment.comment:
return "/*\n " + "\n ".join(comment.comment.split("\n")) + "\n*/"
else:
return "// " + comment.comment
# Loops
@stacked
def visit_WhileDo(self, loop):
return "while(" + loop.condition.accept(self) + ") {\n" + \
self.visit_children(loop) + \
"\n}"
@stacked
def visit_RepeatUntil(self, loop):
return "do {\n" + \
self.visit_children(loop) + \
"\n} while(!(" + loop.condition.accept(self) + "));"
# Calls
@stacked
def visit_FunctionCall(self, call):
return call.function.name + "(" + \
", ".join([arg.accept(self) for arg in call.arguments]) + ")" + \
(";" if isinstance(call.type, code.VoidType) \
or isinstance(self.stack[-2], code.Function) else "")
@stacked
def visit_SimpleVariable(self, var):
return var.id.accept(self)
@stacked
def visit_ListVariable(self, var):
return var.id.accept(self) + "[" + str(var.index) + "]"
# Expressions
@stacked
def visit_And(self, op):
return "(" + op.left.accept(self) + " && " + op.right.accept(self) + ")"
@stacked
def visit_Or(self, op):
return "(" + op.left.accept(self) + " || " + op.right.accept(self) + ")"
@stacked
def visit_Equals(self, op):
return "(" + op.left.accept(self) + " == " + op.right.accept(self) + ")"
@stacked
def visit_NotEquals(self, op):
return "(" + op.left.accept(self) + " != " + op.right.accept(self) + ")"
@stacked
def visit_LT(self, op):
return "(" + op.left.accept(self) + " < " + op.right.accept(self) + ")"
@stacked
def visit_LTEQ(self, op):
return "(" + op.left.accept(self) + " <= " + op.right.accept(self) + ")"
@stacked
def visit_GT(self, op):
return "(" + op.left.accept(self) + " > " + op.right.accept(self) + ")"
@stacked
def visit_GTEQ(self, op):
return "(" + op.left.accept(self) + " >= " + op.right.accept(self) + ")"
@stacked
def visit_Modulo(self, op):
return "(" + op.left.accept(self) + " % " + op.right.accept(self) + ")"
@stacked
def visit_Return(self, op):
return "return" + (" " + op.expression.accept(self) if not op.expression is None
else "") + ";"
@stacked
def visit_Not(self, op):
return "!" + op.operand.accept(self)
# C-specific extensions
@stacked
def visit_RefType(self, ref):
return ref.type.accept(self) + "*"
@stacked
def visit_VariableDecl(self, decl):
type_quantifier = ""
var_quantifier = ""
inside_assign = isinstance(self.stack[-2], code.Assign)
if isinstance(decl.type, code.AmountType):
if inside_assign: type_quantifier = "*"
else: var_quantifier = "[" + str(decl.type.size) +"]"
return decl.type.accept(self) + type_quantifier + " " + \
decl.name + var_quantifier + \
("" if inside_assign else ";")
# a bit specific, but for now it seems the only real possibility
@stacked
def visit_Deref(self, ref):
return "*" + ref.pointer.accept(self)
@stacked
def visit_Cast(self, cast):
return "(" + cast.to.accept(self) + ")" + cast.expression.accept(self)
@stacked
def visit_Null(self, null):
return | |
# coding: utf8
import abc
from os import path
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from clinicadl.utils.inputs import FILENAME_TYPE, MASK_PATTERN
#################################
# Datasets loaders
#################################
class CapsDataset(Dataset):
"""Abstract class for all derived CapsDatasets."""
def __init__(
self,
caps_directory,
data_df,
preprocessing,
transformations,
label_presence,
label=None,
label_code=None,
augmentation_transformations=None,
multi_cohort=False,
):
self.caps_directory = caps_directory
self.caps_dict = self.create_caps_dict(caps_directory, multi_cohort)
self.transformations = transformations
self.augmentation_transformations = augmentation_transformations
self.eval_mode = False
self.label_presence = label_presence
self.label = label
self.label_code = label_code
self.preprocessing = preprocessing
if not hasattr(self, "elem_index"):
raise ValueError(
"Child class of CapsDataset must set elem_index attribute."
)
if not hasattr(self, "mode"):
raise ValueError("Child class of CapsDataset must set mode attribute.")
# Check the format of the tsv file here
self.df = data_df
mandatory_col = {"participant_id", "session_id", "cohort"}
if self.label_presence and self.label is not None:
mandatory_col.add(self.label)
if not mandatory_col.issubset(set(self.df.columns.values)):
raise Exception(
"the data file is not in the correct format."
"Columns should include %s" % mandatory_col
)
self.elem_per_image = self.num_elem_per_image()
self.size = self[0]["image"].size()
@property
@abc.abstractmethod
def elem_index(self):
pass
def label_fn(self, target):
"""
Returns the label value usable in criterion.
Args:
target (str or float or int): value of the target.
Returns:
label (int or float): value of the label usable in criterion.
"""
# Reconstruction case (no label)
if self.label is None:
return None
# Regression case (no label code)
elif self.label_code is None:
return np.float32([target])
# Classification case (label + label_code dict)
else:
return self.label_code[target]
def __len__(self):
return len(self.df) * self.elem_per_image
@staticmethod
def create_caps_dict(caps_directory, multi_cohort):
from clinica.utils.inputs import check_caps_folder
if multi_cohort:
if not caps_directory.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the caps_dir argument should be a path to a TSV file."
)
else:
caps_df = pd.read_csv(caps_directory, sep="\t")
check_multi_cohort_tsv(caps_df, "CAPS")
caps_dict = dict()
for idx in range(len(caps_df)):
cohort = caps_df.loc[idx, "cohort"]
caps_path = caps_df.loc[idx, "path"]
check_caps_folder(caps_path)
caps_dict[cohort] = caps_path
else:
check_caps_folder(caps_directory)
caps_dict = {"single": caps_directory}
return caps_dict
def _get_path(self, participant, session, cohort, mode="image"):
"""
Gets the path to the tensor image (*.pt)
Args:
participant (str): ID of the participant.
session (str): ID of the session.
cohort (str): Name of the cohort.
mode (str): Type of mode used (image, patch, slice or roi).
Returns:
image_path (str): path to the image
"""
if cohort not in self.caps_dict.keys():
raise ValueError(
"Cohort names in labels and CAPS definitions do not match."
)
if self.preprocessing == "t1-linear":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_linear",
participant + "_" + session + FILENAME_TYPE["cropped"] + ".pt",
)
elif self.preprocessing == "t1-linear-downsampled":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_linear",
participant + "_" + session + FILENAME_TYPE["downsampled"] + ".pt",
)
elif self.preprocessing == "t1-extensive":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_extensive",
participant + "_" + session + FILENAME_TYPE["skull_stripped"] + ".pt",
)
elif self.preprocessing == "t1-volume":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"custom",
participant + "_" + session + FILENAME_TYPE["gm_maps"] + ".pt",
)
elif self.preprocessing == "shepplogan":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
"%s_%s%s.pt" % (participant, session, FILENAME_TYPE["shepplogan"]),
)
else:
raise NotImplementedError(
"The path to preprocessing %s is not implemented" % self.preprocessing
)
return image_path
def _get_meta_data(self, idx):
"""
Gets all meta data necessary to compute the path with _get_path
Args:
idx (int): row number of the meta-data contained in self.df
Returns:
participant (str): ID of the participant.
session (str): ID of the session.
cohort (str): Name of the cohort.
elem_index (int): Index of the part of the image.
label (str or float or int): value of the label to be used in criterion.
"""
image_idx = idx // self.elem_per_image
participant = self.df.loc[image_idx, "participant_id"]
session = self.df.loc[image_idx, "session_id"]
cohort = self.df.loc[image_idx, "cohort"]
if self.elem_index is None:
elem_idx = idx % self.elem_per_image
else:
elem_idx = self.elem_index
if self.label_presence and self.label is not None:
target = self.df.loc[image_idx, self.label]
label = self.label_fn(target)
else:
label = -1
return participant, session, cohort, elem_idx, label
def _get_full_image(self):
"""
Allows to get the an example of the image mode corresponding to the dataset.
Useful to compute the number of elements if mode != image.
Returns:
image (torch.Tensor) tensor of the full image.
"""
import nibabel as nib
from clinicadl.generate.generate_utils import find_image_path as get_nii_path
participant_id = self.df.loc[0, "participant_id"]
session_id = self.df.loc[0, "session_id"]
cohort = self.df.loc[0, "cohort"]
try:
image_path = self._get_path(
participant_id, session_id, cohort, mode="image"
)
image = torch.load(image_path)
except FileNotFoundError:
image_path = get_nii_path(
self.caps_dict,
participant_id,
session_id,
cohort=cohort,
preprocessing=self.preprocessing,
)
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image = ToTensor()(image_np)
return image
@abc.abstractmethod
def __getitem__(self, idx):
"""
Gets the sample containing all the information needed for training and testing tasks.
Args:
idx (int): row number of the meta-data contained in self.df
Returns:
Dict[str, Any]: dictionary with following items:
- "image" (torch.Tensor): the input given to the model,
- "label" (int or float): the label used in criterion,
- "participant_id" (str): ID of the participant,
- "session_id" (str): ID of the session,
- f"{self.mode}_id" (int): number of the element,
- "image_path": path to the image loaded in CAPS.
"""
pass
@abc.abstractmethod
def num_elem_per_image(self):
"""Computes the number of elements per image based on the full image."""
pass
def eval(self):
"""Put the dataset on evaluation mode (data augmentation is not performed)."""
self.eval_mode = True
return self
def train(self):
"""Put the dataset on training mode (data augmentation is performed)."""
self.eval_mode = False
return self
class CapsDatasetImage(CapsDataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(
self,
caps_directory,
data_file,
preprocessing="t1-linear",
train_transformations=None,
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
self.mode = "image"
super().__init__(
caps_directory,
data_file,
preprocessing,
augmentation_transformations=train_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
transformations=all_transformations,
multi_cohort=multi_cohort,
)
@property
def elem_index(self):
return None
def __getitem__(self, idx):
participant, session, cohort, _, label = self._get_meta_data(idx)
image_path = self._get_path(participant, session, cohort, "image")
image = torch.load(image_path)
if self.transformations:
image = self.transformations(image)
if self.augmentation_transformations and not self.eval_mode:
image = self.augmentation_transformations(image)
sample = {
"image": image,
"label": label,
"participant_id": participant,
"session_id": session,
"image_id": 0,
"image_path": image_path,
}
return sample
def num_elem_per_image(self):
return 1
class CapsDatasetPatch(CapsDataset):
def __init__(
self,
caps_directory,
data_file,
patch_size,
stride_size,
train_transformations=None,
prepare_dl=False,
patch_index=None,
preprocessing="t1-linear",
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
prepare_dl (bool): If true pre-extracted patches will be loaded.
patch_index (int, optional): If a value is given the same patch location will be extracted for each image.
else the dataset will load all the patches possible for one image.
patch_size (int): size of the regular cubic patch.
stride_size (int): length between the centers of two patches.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
if preprocessing == "shepplogan":
raise ValueError(
"Patch mode is not available for preprocessing %s" % preprocessing
)
self.patch_size = patch_size
self.stride_size = stride_size
self.patch_index | |
self.mem['KLAMPO9']['V'] == 1: self.add_list_signal('Reactor trip')
if self.mem['KLAMPO6']['V'] == 1: self.add_list_signal('SI valve open')
if self.mem['KLAMPO4']['V'] == 1: self.add_list_signal('Containment ISO')
if self.mem['KLAMPO2']['V'] == 1: self.add_list_signal('Feedwater ISO')
if self.mem['KLAMPO3']['V'] == 1: self.add_list_signal('Main steam line ISO')
if self.mem['KLAMPO134']['V'] == 1: self.add_list_signal('Aux feed pump 1 start')
if self.mem['KLAMPO135']['V'] == 1: self.add_list_signal('Aux feed pump 2 start')
if self.mem['KLAMPO136']['V'] == 1: self.add_list_signal('Aux feed pump 3 start')
if self.mem['KLAMPO70']['V'] == 1: self.add_list_signal('Charging pump 2 start')
if self.mem['KLAMPO69']['V'] == 1: self.add_list_signal('Charging pump 3 start')
if self.mem['KLAMPO124']['V'] == 0: self.add_list_signal('RCP 1 stop')
if self.mem['KLAMPO125']['V'] == 0: self.add_list_signal('RCP 2 stop')
if self.mem['KLAMPO126']['V'] == 0: self.add_list_signal('RCP 3 stop')
pass
def add_list_signal(self, content):
if len(self.ui.Auto_list.findItems('{}'.format(content), QtCore.Qt.MatchContains)) == 0:
self.ui.Auto_list.addItem('{} {}'.format(self.Call_CNS_time[0], content))
# ======================================================================================================================
# alarm function of operation state_by sb
def operation_state(self):
if self.strategy_selection_mem['operation_mode'] != []:
if self.strategy_selection_mem['operation_mode'][-1] == 1: # 비정상
self.ui.pushButton_8.setStyleSheet(self.back_color['gray'])
self.ui.pushButton_9.setStyleSheet(self.back_color['yellow'])
self.ui.pushButton_10.setStyleSheet(self.back_color['gray'])
elif self.strategy_selection_mem['operation_mode'][-1] == 2: # 비상
self.ui.pushButton_8.setStyleSheet(self.back_color['gray'])
self.ui.pushButton_9.setStyleSheet(self.back_color['gray'])
self.ui.pushButton_10.setStyleSheet(self.back_color['red'])
else: # 정상
self.ui.pushButton_8.setStyleSheet(self.back_color['green'])
self.ui.pushButton_9.setStyleSheet(self.back_color['gray'])
self.ui.pushButton_10.setStyleSheet(self.back_color['gray'])
# ======================================================================================================================
# history function of strategy selection_by sb
def history_ss(self):
triger_2 = self.strategy_selection_mem['strategy']
if triger_2 != []:
if triger_2[-1] == 'NA' and self.st_triger['no_on/off'] == False:
self.ui.listWidget.addItem('{}\tNormal Operation\tAutonomous control by RL'.format(self.Call_CNS_time[0]))
self.st_triger['no_on/off'] = True
# print('NAAAAA입니다')
elif triger_2[-1] == 'NM' and self.st_triger['no_on/off'] == False:
self.ui.listWidget.addItem('{}\tNormal Operation\tManual control'.format(self.Call_CNS_time[0]))
self.st_triger['no_on/off'] = True
# print('NMMMM입니다')
elif triger_2[-1] == 'AA_2301' and self.st_triger['ab_on/off'] == False:
self.ui.listWidget.addItem('{}\tAbnormal Operation\tAutonomous control by LSTM'.format(self.Call_CNS_time[0]))
self.st_triger['ab_on/off'] = True
elif triger_2[-1] == 'EA' and self.st_triger['em_on/off'] == False:
self.ui.listWidget.addItem('{}\tEmergency Operation\tAutonomous control by LSTM'.format(self.Call_CNS_time[0]))
self.st_triger['em_on/off'] = True
else:
pass
# ======================= Monitoring DIS ===============================================================================
def run_TSMS(self):
if self.mem['KCNTOMS']['V'] < 4:
self.ui.Performace_Mn.clear()
self.TSMS_State = {}
if self.strategy_selection_mem['operation_mode'] != []:
if self.strategy_selection_mem['operation_mode'][-1] == 2:
self.Monitoring()
def Calculator_SDM(self):
self.init_para = {
'HFP': 100, # H
'ReatorPower': 90, # T
'BoronConcentration': 1318, # T
'Burnup': 4000, # T
'Burnup_BOL': 150, # H
'Burnup_EOL': 18850, # H
'TotalPowerDefect_BOL': 1780, # H
'TotalPowerDefect_EOL': 3500, # H
'VoidCondtent': 50, # H
'TotalRodWorth': 5790, # H
'WorstStuckRodWorth': 1080, # H
'InoperableRodNumber': 1, # T
'BankWorth_D': 480, # H
'BankWorth_C': 1370, # H
'BankWorth_B': 1810, # H
'BankWorth_A': 760, # H
'AbnormalRodName': 'C', # T
'AbnormalRodNumber': 1, # T
'ShutdownMarginValue': 1770, # H
}
# 1. BOL, 현출력% -> 0% 하기위한 출력 결손량 계산
ReactorPower = self.mem['QPROLD']['V'] * 100
PowerDefect_BOL = self.init_para['TotalPowerDefect_BOL'] * ReactorPower / self.init_para['HFP']
# 2. EOL, 현출력% -> 0% 하기위한 출력 결손량 계산
PowerDefect_EOL = self.init_para['TotalPowerDefect_EOL'] * ReactorPower / self.init_para['HFP']
# 3. 현재 연소도, 현출력% -> 0% 하기위한 출력 결손량 계산
A = self.init_para['Burnup_EOL'] - self.init_para['Burnup_BOL']
B = PowerDefect_EOL - PowerDefect_BOL
C = self.init_para['Burnup'] - self.init_para['Burnup_EOL']
PowerDefect_Burnup = B * C / A + PowerDefect_BOL
# 4. 반응도 결손량을 계산
PowerDefect_Final = PowerDefect_Burnup + self.init_para['VoidCondtent']
# 5. 운전불가능 제어봉 제어능을 계산
InoperableRodWorth = self.init_para['InoperableRodNumber'] * self.init_para['WorstStuckRodWorth']
# 6. 비정상 제어봉 제어능을 계산
AbnormalRodWorth = self.init_para['BankWorth_{}'.format(
self.init_para['AbnormalRodName'])] / 8 * self.init_para['AbnormalRodNumber']
# 7. 운전 불능, 비정상 제어봉 제어능의 합 계산
InoperableAbnormal_RodWorth = InoperableRodWorth + AbnormalRodWorth
# 8. 현 출력에서의 정지여유도 계산
ShutdownMargin = self.init_para['TotalRodWorth'] - InoperableAbnormal_RodWorth - PowerDefect_Final
return ShutdownMargin
def Monitoring(self):
# LCO 3.4.4
if [self.mem['KLAMPO124']['V'], self.mem['KLAMPO125']['V'], self.mem['KLAMPO126']['V']].count(0) >= 2:
if not 'LCO 3.4.4' in self.TSMS_State.keys():
self.TSMS_State['LCO 3.4.4'] = {'Start_time': self.Call_CNS_time[1],
'End_time': self.Call_CNS_time[1]+24000}
end_time = self.calculate_time(self.Call_CNS_time[1]+24000)
self.ui.Performace_Mn.addItem('{}->{}\tLCO 3.4.4\tDissatisfaction'.format(self.Call_CNS_time[0],
end_time))
# LCO 3.4.1
if not 154.7 < self.mem['ZINST65']['V'] < 161.6 and 286.7 < self.mem['UCOLEG1']['V'] < 293.3:
if not'LCO 3.4.1' in self.TSMS_State.keys():
self.TSMS_State['LCO 3.4.1'] = {'Start_time': self.Call_CNS_time[1],
'End_time': self.Call_CNS_time[1]+7200}
end_time = self.calculate_time(self.Call_CNS_time[1]+7200)
self.ui.Performace_Mn.addItem('{}->{}\tLCO 3.4.1\tDissatisfaction'.format(self.Call_CNS_time[0],
end_time))
# LCO 3.4.3
if self.predict_SVM(self.mem['UCOLEG1']['V'], self.mem['ZINST65']['V']) != 1:
if not'LCO 3.4.3' in self.TSMS_State.keys():
self.TSMS_State['LCO 3.4.3'] = {'Start_time': self.Call_CNS_time[1],
'End_time': self.Call_CNS_time[1]+1800}
end_time = self.calculate_time(self.Call_CNS_time[1]+1800)
self.ui.Performace_Mn.addItem('{}->{}\tLCO 3.4.3\tDissatisfaction'.format(self.Call_CNS_time[0],
end_time))
# LCO 3.1.1
current_SDM = self.Calculator_SDM()
if current_SDM < 1770:
if not 'LCO 3.1.1' in self.TSMS_State.keys():
self.TSMS_State['LCO 3.1.1'] = {'Start_time': self.Call_CNS_time[1],
'End_time': self.Call_CNS_time[1] + 900}
end_time = self.calculate_time(self.Call_CNS_time[1] + 900)
self.ui.Performace_Mn.addItem('{}->{}\tLCO 3.1.1\tDissatisfaction'.format(self.Call_CNS_time[0],
end_time))
def Monitoring_Operation_Mode(self):
if self.mem['CRETIV']['V'] >= 0:
if self.mem['ZINST1']['V'] > 5:
mode = 1
elif self.mem['ZINST1']['V'] <= 5:
mode = 2
elif self.mem['CRETIV']['V'] < 0:
if self.mem['UCOLEG1']['V'] >= 177:
mode = 3
elif 93 < self.mem['UCOLEG1']['V'] < 177:
mode = 4
elif self.mem['UCOLEG1']['V'] <= 93:
mode = 5
else:
mode = 6
return mode
def TSMS_LCO_info(self, item):
LCO_name = item.text().split('\t')[1]
if LCO_name == 'LCO 3.4.4':
currnet_mode = self.Monitoring_Operation_Mode()
cont = '[{}] 현재 운전 모드 : [Mode-{}]\n'.format(LCO_name, currnet_mode)
cont += '=' * 50 + '\n'
cont += 'Follow up action : Enter Mode 3\n'
cont += '=' * 50 + '\n'
cont += '시작 시간\t:\t현재 시간\t:\t종료 시간\n'
cont += '{}\t:\t{}\t:\t{}\n'.format(self.calculate_time(self.TSMS_State[LCO_name]['Start_time']),
self.calculate_time(self.Call_CNS_time[1]),
self.calculate_time(self.TSMS_State[LCO_name]['End_time']))
cont += '=' * 50 + '\n'
if currnet_mode == 3:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Success\n'
elif currnet_mode == 1 or currnet_mode == 2:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Ongoing\n'
cont += '=' * 50 + '\n'
QMessageBox.information(self, "LCO 정보", cont)
elif LCO_name == 'LCO 3.4.1':
currnet_mode = self.Monitoring_Operation_Mode()
cont = '[{}] 현재 운전 모드 : [Mode-{}]\n'.format(LCO_name, currnet_mode)
cont += '=' * 50 + '\n'
cont += 'Follow up action :\n'
cont += ' - 154.7 < RCS Pressure < 161.6 [kg/cm²]\n'
cont += ' - 286.7 < RCS Cold-leg Temp < 293.3 [℃]\n'
cont += '=' * 50 + '\n'
cont += '시작 시간\t:\t현재 시간\t:\t종료 시간\n'
cont += '{}\t:\t{}\t:\t{}\n'.format(self.calculate_time(self.TSMS_State[LCO_name]['Start_time']),
self.calculate_time(self.Call_CNS_time[1]),
self.calculate_time(self.TSMS_State[LCO_name]['End_time']))
cont += '=' * 50 + '\n'
if 154.7 < self.mem['ZINST65']['V'] < 161.6 and 286.7 < self.mem['UCOLEG1']['V'] < 293.3:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Success\n'
else:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Ongoing\n'
cont += '=' * 50 + '\n'
QMessageBox.information(self, "LCO 정보", cont)
elif LCO_name == 'LCO 3.4.3':
currnet_mode = self.Monitoring_Operation_Mode()
cont = '[{}] 현재 운전 모드 : [Mode-{}]\n'.format(LCO_name, currnet_mode)
cont += '=' * 50 + '\n'
cont += 'Follow up action :\n'
cont += ' - Enter allowable operation region\n'
cont += ' - Limit Time : 30 min\n'
cont += '=' * 50 + '\n'
cont += '시작 시간\t:\t현재 시간\t:\t종료 시간\n'
cont += '{}\t:\t{}\t:\t{}\n'.format(self.calculate_time(self.TSMS_State[LCO_name]['Start_time']),
self.calculate_time(self.Call_CNS_time[1]),
self.calculate_time(self.TSMS_State[LCO_name]['End_time']))
cont += '=' * 50 + '\n'
if self.predict_SVM(self.mem['UCOLEG1']['V'], self.mem['ZINST65']['V']) != 1:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Ongoing\n'
else:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Success\n'
cont += '=' * 50 + '\n'
QMessageBox.information(self, "LCO 정보", cont)
elif LCO_name == 'LCO 3.1.1':
currnet_mode = self.Monitoring_Operation_Mode()
cont = '[{}] 현재 운전 모드 : [Mode-{}]\n'.format(LCO_name, currnet_mode)
cont += '=' * 50 + '\n'
cont += 'Follow up action :\n'
cont += ' - Boron Injectionl\n'
cont += '=' * 50 + '\n'
cont += '시작 시간\t:\t현재 시간\t:\t종료 시간\n'
cont += '{}\t:\t{}\t:\t{}\n'.format(self.calculate_time(self.TSMS_State[LCO_name]['Start_time']),
self.calculate_time(self.Call_CNS_time[1]),
self.calculate_time(self.TSMS_State[LCO_name]['End_time']))
cont += '=' * 50 + '\n'
if self.Calculator_SDM() >= 1770:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Ongoing\n'
else:
if self.TSMS_State[LCO_name]['End_time'] <= self.Call_CNS_time[1]:
cont += '현재 운전 상태 : Action Fail\n'
else:
cont += '현재 운전 상태 : Action Success\n'
cont += '=' * 50 + '\n'
QMessageBox.information(self, "LCO 정보", cont)
else:
pass
def Make_P_T_SVM(self):
# print('SVM 모델 훈련 시작')
data = pd.read_csv('SVM_PT_DATA.csv', header=None)
X = data.loc[:, 0:1].values
y = data[2].values
# 데이터 전처리
self.scaler.fit(X)
X = self.scaler.transform(X)
# SVM 훈련
svc = svm.SVC(kernel='rbf', gamma='auto', C=1000)
svc.fit(X, y)
# print("훈련 세트 정확도 : {: .3f}".format(svc.score(X_train_scaled, y_train)))
# print("테스트 세트 정확도 : {: .3f}".format(svc.score(X_test_scaled, y_test)))
return svc
def predict_SVM(self, Temp, Pressure):
temp = self.scaler.transform([[Temp, Pressure]])
return self.model_svm.predict(temp)[0]
# ======================================================================================================================
class sub_strategy_window(QDialog):
def __init__(self, mem):
super().__init__()
self.trig_mem = mem
self.Strategy_ui = Strategy_ui()
self.Strategy_ui.setupUi(self)
self.back_color = {
'gray': "background-color: rgb(229, 229, 229);",
'green': "background-color: rgb(0, 170, 0);",
'yellow': "background-color: rgb(255, 255, 0);",
'orange': "background-color: rgb(255, 85, 0);",
'red': "background-color: rgb(255, 0, 0);",
}
timer = | |
import argparse
import io
import unittest
from io import StringIO
from unittest.mock import Mock, patch, MagicMock, call
import allel
import zarr
from ddt import ddt, data
from scripts.vector_genotype_concordance import ConcordanceResult, classify, classify_sample, classify_chromosome, \
compute_hom_alt_mismatch, HOMOZIGOUS_ALTERNATE_CATEGORY, MISMATCH_CATEGORY, compute_het_mismatch, \
HETEROZIGOUS_CATEGORY, VectorGenotypeCallset, FilteringVectorGenotypeCallset, Commands, ArgumentParserBuilder, Summarizer, to_filtered_callset
A_URL = "A_URL"
A_FILTER_PATH = "A_FILTER_PATH"
SAMPLE_A = 'A'
SAMPLE_B = 'B'
SOME_SAMPLES = [SAMPLE_A, SAMPLE_B]
CHROMOSOME_1 = "YL"
CHROMOSOME_2 = "YR"
SOME_CHROMOSOMES = [CHROMOSOME_1, CHROMOSOME_2]
CHROMOSOME_1_GT = [[0, 1], [1, 1]]
CHROMOSOME_2_GT = [[0, 0], [-1, -1], [2, 0]]
SOME_CHROMOSOMES_GT = {CHROMOSOME_1: CHROMOSOME_1_GT, CHROMOSOME_2: CHROMOSOME_2_GT}
CHROMOSOME_1_SITE_FILTERS = [False, True]
CHROMOSOME_2_SITE_FILTERS = [True, False, True]
SOME_CHROMOSOMES_FILTERS = {CHROMOSOME_1: CHROMOSOME_1_SITE_FILTERS, CHROMOSOME_2: CHROMOSOME_2_SITE_FILTERS}
class TestSampleConcordanceResult(unittest.TestCase):
def test_add_and_print(self):
r = ConcordanceResult()
recorder = r.record_chromosome_statistics("SAMPLE", "CHROMOSOME")
recorder.record("LEFT", "RIGHT", 20)
recorder.record("LEFT1", "RIGHT1", 201)
recorder = r.record_chromosome_statistics("SAMPLE2", "OTHER_CHROMOSOME")
recorder.record("OLEFT", "ORIGHT", 10)
stream = io.StringIO()
r.print(stream)
self.assertEqual("""sample\tchromosome\tcontrol\ttest\tcount
SAMPLE\tCHROMOSOME\tLEFT\tRIGHT\t20
SAMPLE\tCHROMOSOME\tLEFT1\tRIGHT1\t201
SAMPLE2\tOTHER_CHROMOSOME\tOLEFT\tORIGHT\t10
""", stream.getvalue())
class TestClassify(unittest.TestCase):
def setUp(self) -> None:
self.control_creator = Mock()
self.control_creator.return_value = MagicMock()
self.test_creator = Mock()
self.test_creator.return_value = MagicMock()
self.results = MagicMock()
@patch('scripts.vector_genotype_concordance.classify_sample')
def test_no_sample(self, classify_mock):
classify([], SOME_CHROMOSOMES, self.control_creator, self.test_creator, self.results)
self.control_creator.assert_not_called()
self.test_creator.assert_not_called()
classify_mock.assert_not_called()
@patch('scripts.vector_genotype_concordance.classify_sample')
def test_with_sample_and_chromosomes(self, classify_mock):
classify(SOME_SAMPLES, SOME_CHROMOSOMES, self.control_creator, self.test_creator, self.results)
self.assertCountEqual(classify_mock.call_args_list,
[call(SAMPLE_A, SOME_CHROMOSOMES, self.control_creator.return_value,
self.test_creator.return_value, self.results),
call(SAMPLE_B, SOME_CHROMOSOMES, self.control_creator.return_value,
self.test_creator.return_value, self.results)])
@patch('scripts.vector_genotype_concordance.classify_sample')
def test_no_chromosomes(self, classify_mock):
classify(SOME_SAMPLES, [], self.control_creator, self.test_creator, self.results)
self.assertCountEqual(classify_mock.call_args_list,
[call(SAMPLE_A, [], self.control_creator.return_value, self.test_creator.return_value,
self.results),
call(SAMPLE_B, [], self.control_creator.return_value, self.test_creator.return_value,
self.results)])
class TestClassifySample(unittest.TestCase):
def setUp(self) -> None:
self.control = MagicMock()
self.test = MagicMock()
self.results = MagicMock()
self.record_chromosome_statistics_1 = MagicMock()
self.record_chromosome_statistics_2 = MagicMock()
def side_effect(sample, chromosome):
if sample == SAMPLE_A and chromosome == CHROMOSOME_1:
return self.record_chromosome_statistics_1
if sample == SAMPLE_A and chromosome == CHROMOSOME_2:
return self.record_chromosome_statistics_2
return None
self.results.record_chromosome_statistics.side_effect = side_effect
self.control_gt_chromosome_1 = MagicMock()
self.control_gt_chromosome_2 = MagicMock()
self.test_gt_chromosome_1 = MagicMock()
self.test_gt_chromosome_2 = MagicMock()
control_map = {CHROMOSOME_1: self.control_gt_chromosome_1, CHROMOSOME_2: self.control_gt_chromosome_2}
self.control.gt.side_effect = lambda chrom: control_map[chrom]
test_map = {CHROMOSOME_1: self.test_gt_chromosome_1, CHROMOSOME_2: self.test_gt_chromosome_2}
self.test.gt.side_effect = lambda chrom: test_map[chrom]
@patch('scripts.vector_genotype_concordance.classify_chromosome')
def test_no_chromosomes(self, classify_mock):
classify_sample(SAMPLE_A, [], self.control, self.test, self.results)
classify_mock.assert_not_called()
@patch('scripts.vector_genotype_concordance.classify_chromosome')
def test_with_chromosomes(self, classify_mock):
classify_sample(SAMPLE_A, SOME_CHROMOSOMES, self.control, self.test, self.results)
self.assertCountEqual(classify_mock.call_args_list,
[call(control=self.control_gt_chromosome_1, test=self.test_gt_chromosome_1,
recorder=self.record_chromosome_statistics_1),
call(control=self.control_gt_chromosome_2, test=self.test_gt_chromosome_2,
recorder=self.record_chromosome_statistics_2)])
class TestClassifyChromosome(unittest.TestCase):
def setUp(self) -> None:
self.control = MagicMock()
self.test = MagicMock()
self.recorder = MagicMock()
self.het_mismatch = 12
self.hom_alt_mismatch = 9
@patch('scripts.vector_genotype_concordance._classify_non_mismatch')
@patch('scripts.vector_genotype_concordance.compute_hom_alt_mismatch')
@patch('scripts.vector_genotype_concordance.compute_het_mismatch')
def test_classify_chromosome(self, het_mock, hom_alt_mock, non_mismatch_mock):
het_mock.return_value = self.het_mismatch
hom_alt_mock.return_value = self.hom_alt_mismatch
classify_chromosome(control=self.control, test=self.test, recorder=self.recorder)
self.assertCountEqual(het_mock.call_args_list,
[call(self.control, self.test, self.recorder)])
self.assertCountEqual(hom_alt_mock.call_args_list,
[call(self.control, self.test, self.recorder)])
self.assertCountEqual(non_mismatch_mock.call_args_list,
[call(self.het_mismatch, self.hom_alt_mismatch, self.control, self.test, self.recorder)])
@ddt
class TestMismatchChromosome(unittest.TestCase):
def setUp(self) -> None:
self.recorder = MagicMock()
@data(
([-1, -1], [0, 1], 0),
([0, 0], [0, 1], 0),
([1, 1], [0, 1], 0),
([0, 1], [-1, -1], 0),
([0, 1], [0, 0], 0),
([0, 1], [1, 1], 0),
([0, 1], [0, 1], 0),
([0, 1], [0, 2], 1),
([0, 1], [0, 3], 1),
([0, 2], [0, 1], 1),
([0, 2], [0, 2], 0),
([1, 2], [1, 2], 0),
([1, 2], [1, 3], 1),
)
def test_compute_het_mismatch(self, value):
control, test, expected = value
lgt = allel.GenotypeVector([control])
rgt = allel.GenotypeVector([test])
actual = compute_het_mismatch(control=lgt, test=rgt, recorder=self.recorder)
self.assertEqual(expected, actual)
self.assertCountEqual(self.recorder.record.call_args_list,
[call(HETEROZIGOUS_CATEGORY, MISMATCH_CATEGORY, expected)])
@data(
([0, 0], [0, 0], 0),
([-1, -1], [1, 1], 0),
([0, 0], [1, 1], 0),
([0, 1], [1, 1], 0),
([1, 1], [-1, -1], 0),
([1, 1], [0, 0], 0),
([1, 1], [0, 1], 0),
([1, 1], [1, 1], 0),
([2, 2], [2, 2], 0),
([3, 3], [3, 3], 0),
([1, 1], [2, 2], 1),
([1, 1], [3, 3], 1),
([2, 2], [1, 1], 1),
([2, 2], [3, 3], 1),
([3, 3], [1, 1], 1),
([3, 3], [2, 2], 1),
)
def test_compute_hom_alt_mismatch(self, value):
control, test, expected = value
lgt = allel.GenotypeVector([control])
rgt = allel.GenotypeVector([test])
actual = compute_hom_alt_mismatch(control=lgt, test=rgt, recorder=self.recorder)
self.assertEqual(expected, actual)
self.assertCountEqual(self.recorder.record.call_args_list,
[call(HOMOZIGOUS_ALTERNATE_CATEGORY, MISMATCH_CATEGORY, expected)])
class TestCallset(unittest.TestCase):
def test_call_set(self):
zarr_group = generate_gt_data(SAMPLE_A, SOME_CHROMOSOMES_GT)
under_test = VectorGenotypeCallset(sample=SAMPLE_A, callset=zarr_group)
actual = under_test.gt(CHROMOSOME_1)
self.assertTrue((allel.GenotypeVector(CHROMOSOME_1_GT) == actual).all())
actual = under_test.gt(CHROMOSOME_2)
self.assertTrue((allel.GenotypeVector(CHROMOSOME_2_GT) == actual).all())
@patch('scripts.vector_genotype_concordance.zarr.open')
def test_new_instance(self, mock):
expected = MagicMock()
def side_effect(path, mode='z'):
if mode == 'r' and path == SAMPLE_A:
return expected
return None
mock.side_effect = side_effect
actual = VectorGenotypeCallset.new_instance(sample=SAMPLE_A, file_path_format="{sample}")
self.assertEqual(expected, actual.callset)
self.assertEqual(SAMPLE_A, actual.sample)
class TestFilteredCallset(unittest.TestCase):
def setUp(self) -> None:
self.callset = MagicMock()
self.callset.gt.side_effect = lambda chromosome: allel.GenotypeVector(SOME_CHROMOSOMES_GT[chromosome])
def test_call_filtered_set(self):
site_filters = generate_filter(SOME_CHROMOSOMES_FILTERS)
under_test = FilteringVectorGenotypeCallset(site_filters=site_filters, callset=self.callset)
actual = under_test.gt(CHROMOSOME_1)
self.assertTrue((allel.GenotypeVector([[1, 1]]) == actual).all())
actual = under_test.gt(CHROMOSOME_2)
self.assertTrue((allel.GenotypeVector([[0, 0], [2, 0]]) == actual).all())
@patch('scripts.vector_genotype_concordance.zarr.open')
def test_new_instance(self, mock):
site_filter_path = "A_PATH"
callset = MagicMock()
expected = MagicMock()
def side_effect(path, mode='z'):
if mode == 'r' and path == site_filter_path:
return expected
return None
mock.side_effect = side_effect
actual = FilteringVectorGenotypeCallset.new_test_instance(site_filter_path=site_filter_path, callset=callset)
self.assertEqual(callset, actual.callset)
self.assertEqual(expected, actual.site_filters)
class TestToFilteredCallset(unittest.TestCase):
def setUp(self) -> None:
self.callset_mock = MagicMock()
self.filtered_callset_mock = MagicMock()
self.a_file_path_format = "A_FILE_PATH_FORMAT"
@patch('scripts.vector_genotype_concordance.VectorGenotypeCallset.new_instance')
@patch('scripts.vector_genotype_concordance.FilteringVectorGenotypeCallset.new_instance')
def test_to_filtered_callset(self, filtered_callset_mock, callset_mock):
callset_mock.side_effect = self._callset_mock_side_effect()
filtered_callset_mock.side_effect = self.filtered_callset_mock_side_effect()
actual = to_filtered_callset(A_URL, A_FILTER_PATH, self.a_file_path_format, SAMPLE_A)
self.assertEqual(self.filtered_callset_mock, actual)
def filtered_callset_mock_side_effect(self):
return lambda url, path, callset: self.filtered_callset_mock \
if callset == self.callset_mock and url == A_URL and path == A_FILTER_PATH else None
def _callset_mock_side_effect(self):
return lambda sample='', file_path_format='': self.callset_mock \
if sample == SAMPLE_A and file_path_format == self.a_file_path_format else None
class TestCountParser(unittest.TestCase):
def setUp(self):
self.under_test = ArgumentParserBuilder.new_instance(lambda: ErrorRaisingArgumentParser()) \
.with_count() \
.build()
def test_count(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['chromosome1', 'chromosome2'],
url='url', path='path', command=Commands.COUNT))
def test_single_sample(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1'], chromosomes=['chromosome1', 'chromosome2'],
url='url', path='path', command=Commands.COUNT))
def test_single_chromosome(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['chromosome1'],
url='url', path='path', command=Commands.COUNT))
def test_output_is_defaulted(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output=None,
samples=['sample1', 'sample2'], chromosomes=['chromosome1', 'chromosome2'],
url='url', path='path', command=Commands.COUNT))
def test_chromosomes_is_defaulted(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['2L', '2R', '3L', '3R', 'X'],
url='url', path='path', command=Commands.COUNT))
def test_url_is_defaulted(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-path', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['chromosome1', 'chromosome2'],
url='https://malariagen.github.io/intake/gcs.yml', path='path',
command=Commands.COUNT))
def test_path_is_defaulted(self):
args = self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url', 'url'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['chromosome1', 'chromosome2'],
url='url', path='ag3.site_filters_dt_20200416_gamb_colu_arab',
command=Commands.COUNT))
def test_count_short_options(self):
args = self.under_test.parse_args(
['count', '-c', 'control', '-t', 'test', '-o', 'output', '-s', 'sample1', 'sample2', '--chromosomes',
'chromosome1', 'chromosome2', '--filter-catalog-url', 'url', '-f', 'path'])
self.assertEqual(args,
argparse.Namespace(control='control', test='test', output='output',
samples=['sample1', 'sample2'], chromosomes=['chromosome1', 'chromosome2'],
url='url', path='path', command=Commands.COUNT))
def test_fail_if_no_control(self):
with self.assertRaises(ValueError) as cm:
self.under_test.parse_args(
['count', '--test', 'test', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(cm.exception.args[0], 'the following arguments are required: --control/-c')
def test_fail_if_no_test(self):
with self.assertRaises(ValueError) as cm:
self.under_test.parse_args(
['count', '--control', 'control', '--output', 'output', '--samples',
'sample1', 'sample2', '--chromosomes', 'chromosome1', 'chromosome2', '--filter-catalog-url',
'url', '--filter-path', 'path'])
self.assertEqual(cm.exception.args[0], 'the following arguments are required: --test/-t')
def test_fail_if_no_samples(self):
with self.assertRaises(ValueError) as cm:
self.under_test.parse_args(
['count', '--control', 'control', '--test', 'test', '--output', 'output', '--chromosomes',
'chromosome1', 'chromosome2', '--filter-catalog-url', 'url', '--filter-path', 'path'])
self.assertEqual(cm.exception.args[0], 'the following arguments are required: --samples/-s')
class TestSummarizeParser(unittest.TestCase):
def setUp(self):
self.under_test = ArgumentParserBuilder.new_instance(lambda: ErrorRaisingArgumentParser()) \
.with_summarize() \
.build()
def test_summarize(self):
args = self.under_test.parse_args(
['summarize', '--inputs', 'input1', 'input2', '--output', 'output'])
self.assertEqual(args,
argparse.Namespace(output='output', inputs=['input1', 'input2'], command=Commands.SUMMARIZE))
def test_single_input(self):
args = self.under_test.parse_args(
['summarize', '--inputs', 'input1', '--output', 'output'])
self.assertEqual(args,
argparse.Namespace(output='output', inputs=['input1'], command=Commands.SUMMARIZE))
def test_output_is_defaulted(self):
args = self.under_test.parse_args(
['summarize', '--inputs', 'input1', 'input2'])
self.assertEqual(args,
argparse.Namespace(output=None, inputs=['input1', 'input2'], command=Commands.SUMMARIZE))
def test_summarize_short_options(self):
args = self.under_test.parse_args(
['summarize', '-i', 'input1', 'input2', '-o', 'output'])
self.assertEqual(args,
argparse.Namespace(output='output', inputs=['input1', 'input2'], command=Commands.SUMMARIZE))
def test_fail_if_no_input(self):
with self.assertRaises(ValueError) as cm:
self.under_test.parse_args(
['summarize', '--output', 'output'])
self.assertEqual(cm.exception.args[0], 'the following arguments are required: --inputs/-i')
def mock_function():
pass
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError(message)
def generate_gt_data(sample: str, chromosome_gt: dict, chunks=1):
root = zarr.group()
sample_group = root.create_group(sample)
sample_group.create_groups(*chromosome_gt.keys())
for i in sample_group:
gt = chromosome_gt[i]
sites = len(gt)
gt_data = [[x] for x in gt]
calldata = sample_group[i].create_group("calldata")
calldata.create_dataset('GT', shape=(sites, 1, 2), chunks=(chunks, 1, 2), dtype='int8',
data=gt_data)
return root
def generate_filter(chromosome_bool: dict):
root = zarr.group()
root.create_groups(*chromosome_bool.keys())
for i in root:
bools = chromosome_bool[i]
variants = root[i].create_group('variants')
variants.create_dataset('filter_pass', shape=(len(bools),), chunks=(len(bools),), dtype='bool', data=bools)
return root
SUMMARY_AA0052 = """sample\tchromosome\tcontrol\ttest\tcount
AA0052-C\t2L\thet\tmismatch\t0
AA0052-C\t2L\thom_alt\tmismatch\t0
AA0052-C\t2L\thom_ref\thom_ref\t31905424
AA0052-C\t2L\thom_ref\thet\t11
AA0052-C\t2L\thom_ref\thom_alt\t0
AA0052-C\t2L\thom_ref\tmissing\t35
AA0052-C\t2L\thet\thom_ref\t15
AA0052-C\t2L\thet\thet\t317310
AA0052-C\t2L\thet\thom_alt\t16
AA0052-C\t2L\thet\tmissing\t0
AA0052-C\t2L\thom_alt\thom_ref\t0
AA0052-C\t2L\thom_alt\thet\t9
AA0052-C\t2L\thom_alt\thom_alt\t248559
AA0052-C\t2L\thom_alt\tmissing\t0
AA0052-C\t2L\tmissing\thom_ref\t55
AA0052-C\t2L\tmissing\thet\t0
AA0052-C\t2L\tmissing\thom_alt\t1
AA0052-C\t2L\tmissing\tmissing\t58548
AA0052-C\t2R\thet\tmismatch\t0
AA0052-C\t2R\thom_alt\tmismatch\t0
AA0052-C\t2R\thom_ref\thom_ref\t39944849
AA0052-C\t2R\thom_ref\thet\t12
AA0052-C\t2R\thom_ref\thom_alt\t0
AA0052-C\t2R\thom_ref\tmissing\t27
AA0052-C\t2R\thet\thom_ref\t12
AA0052-C\t2R\thet\thet\t371735
AA0052-C\t2R\thet\thom_alt\t3
AA0052-C\t2R\thet\tmissing\t0
AA0052-C\t2R\thom_alt\thom_ref\t0
AA0052-C\t2R\thom_alt\thet\t4
AA0052-C\t2R\thom_alt\thom_alt\t175609
AA0052-C\t2R\thom_alt\tmissing\t0
AA0052-C\t2R\tmissing\thom_ref\t38
AA0052-C\t2R\tmissing\thet\t0
AA0052-C\t2R\tmissing\thom_alt\t1
AA0052-C\t2R\tmissing\tmissing\t69377
"""
SUMMARY_AA0053 = """sample\tchromosome\tcontrol\ttest\tcount
AA0053-C\t2L\thet\tmismatch\t0
AA0053-C\t2L\thom_alt\tmismatch\t0
AA0053-C\t2L\thom_ref\thom_ref\t25437963
AA0053-C\t2L\thom_ref\thet\t9
AA0053-C\t2L\thom_ref\thom_alt\t0
AA0053-C\t2L\thom_ref\tmissing\t46
AA0053-C\t2L\thet\thom_ref\t7
AA0053-C\t2L\thet\thet\t268531
AA0053-C\t2L\thet\thom_alt\t3
AA0053-C\t2L\thet\tmissing\t0
AA0053-C\t2L\thom_alt\thom_ref\t0
AA0053-C\t2L\thom_alt\thet\t2
AA0053-C\t2L\thom_alt\thom_alt\t113941
AA0053-C\t2L\thom_alt\tmissing\t0
AA0053-C\t2L\tmissing\thom_ref\t18
AA0053-C\t2L\tmissing\thet\t0
AA0053-C\t2L\tmissing\thom_alt\t0
AA0053-C\t2L\tmissing\tmissing\t48865
AA0053-C\t2R\thet\tmismatch\t0
AA0053-C\t2R\thom_alt\tmismatch\t0
AA0053-C\t2R\thom_ref\thom_ref\t32809697
AA0053-C\t2R\thom_ref\thet\t11
AA0053-C\t2R\thom_ref\thom_alt\t0
AA0053-C\t2R\thom_ref\tmissing\t55
AA0053-C\t2R\thet\thom_ref\t19
AA0053-C\t2R\thet\thet\t365106
AA0053-C\t2R\thet\thom_alt\t8
AA0053-C\t2R\thet\tmissing\t4
AA0053-C\t2R\thom_alt\thom_ref\t0
AA0053-C\t2R\thom_alt\thet\t1
AA0053-C\t2R\thom_alt\thom_alt\t151264
AA0053-C\t2R\thom_alt\tmissing\t0
AA0053-C\t2R\tmissing\thom_ref\t26
AA0053-C\t2R\tmissing\thet\t0
AA0053-C\t2R\tmissing\thom_alt\t1
AA0053-C\t2R\tmissing\tmissing\t64670
"""
class TestSummarization(unittest.TestCase):
def setUp(self) | |
name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> edge_indices = extract_edge(indices, neighbor_lists)
>>> (len(indices), len(edge_indices))
(1151, 111)
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> IDs[edge_indices] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'extract_edge.vtk', IDs, 'edges_of_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('extract_edge.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N1 = find_neighborhood(neighbor_lists, indices, nedges=1)
N2 = find_neighborhood(neighbor_lists, N1, nedges=1)
edge_indices = list(set(N2).intersection(indices))
return edge_indices
def topo_test(index, values, neighbor_lists):
"""
Test to see if vertex is a "simple point".
A simple point is a vertex that when added to or removed from an object
(e.g., a curve) on a surface mesh does not alter the object's topology.
"Simple" is not to be mistaken with the following usage:
"A vertex is usually assigned one of five possible classifications:
simple, complex, boundary, interior edge, or corner vertex.
A simple vertex is surrounded by a closed fan of triangles".
Parameters
----------
index : integer
index of vertex
values : numpy array of integers or floats
values for all vertices
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
sp : bool
simple point or not?
n_inside : integer
number of neighboring vertices with a value greater than threshold
Examples
--------
>>> # Square with a center vertex:
>>> # indices [[0,1,2],[3,4,6],[7,8,9]] = 0 and indices [2,4,6] = 1:
>>> import numpy as np
>>> from mindboggle.guts.mesh import topo_test
>>> values = np.array([0,0,1,0,1,0,1,0,0])
>>> neighbor_lists = [[1,3],[0,2,3,4],[1,4,5],
... [0,1,4,6],[1,2,3,5,6,7],[2,4,7,8],
... [3,4,7],[4,5,6,8],[5,7]]
>>> sps = []
>>> for index in range(9):
... sp, n_inside = topo_test(index, values, neighbor_lists)
... sps.append(sp)
>>> sps
[False, True, True, True, False, True, True, True, False]
"""
import numpy as np
# Make sure argument is a numpy array:
if not isinstance(values, np.ndarray):
values = np.array(values)
# Find neighbors to the input vertex, and binarize them
# into those greater or less than a class boundary threshold equal to 0.5
# ("inside" and "outside"); count inside and outside neighbors:
I_neighbors = neighbor_lists[index]
neighbor_values = values[I_neighbors]
inside = [I_neighbors[i] for i,x in enumerate(neighbor_values) if x > 0.5]
n_inside = len(inside)
n_outside = len(I_neighbors) - n_inside
# If the number of inside or outside neighbors is zero,
# than the vertex IS NOT a simple point:
if n_outside * n_inside == 0:
sp = False
# Or if either the number of inside or outside neighbors is one,
# than the vertex IS a simple point:
elif n_outside == 1 or n_inside == 1:
sp = True
# Otherwise, test to see if all of the inside neighbors share neighbors
# with each other, in which case the vertex IS a simple point:
else:
# For each neighbor exceeding the threshold,
# find its neighbors that also exceed the threshold,
# then store these neighbors' indices in a sublist of "N":
labels = list(range(1, n_inside + 1))
N = []
for i_in in range(n_inside):
new_neighbors = neighbor_lists[inside[i_in]]
new_neighbors = [x for x in new_neighbors
if values[x] > 0.5 if x != index]
new_neighbors.extend([inside[i_in]])
N.append(new_neighbors)
# Consolidate labels of connected vertices --
# Loop through neighbors (lists within "N"),
# reassigning the labels for the lists until each label's
# list(s) has a unique set of vertices:
change = True
while change:
change = False
# Loop through pairs of inside neighbors
# and continue if their two labels are different:
for i in range(n_inside - 1):
for j in range(i + 1, n_inside):
if labels[i] != labels[j]:
# Assign the two subsets the same label
# if they share at least one vertex,
# and continue looping:
if set(N[i]).intersection(N[j]):
labels[i] = max([labels[i], labels[j]])
labels[j] = labels[i]
change = True
# The vertex is a simple point if all of its neighbors
# (if any) share neighbors with each other (one unique label):
D = []
if len([D.append(x) for x in labels if x not in D]) == 1:
sp = True
else:
sp = False
return sp, n_inside
# def fill_holes(regions, neighbor_lists, values=[], exclude_range=[],
# background_value=-1):
# """
# Fill holes in regions on a surface mesh by using region boundaries.
#
# NOTE: assumes one set of connected vertices per region
#
# Steps ::
#
# 1. Segment region vertex neighbors into connected vertices (region boundaries).
# 2. Remove the largest region boundary, presumably the
# outer contour of the region, leaving smaller boundaries,
# presumably the contours of holes within the region.
# 3. Call label_holes() to fill holes with surrounding region numbers.
#
# Parameters
# ----------
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
# values : list of integers
# values for vertices, for use in determining which holes to remove
# exclude_range : list of two floats
# hole is not filled if it contains values within this range
# (prevents cases where surface connected by folds mistaken for holes)
# background_value : integer
# background value
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# Examples
# --------
# >>> import numpy as np
# >>> from mindboggle.guts.mesh import fill_holes
# >>> from mindboggle.guts.mesh import find_neighbors_from_file
# >>> from mindboggle.mio.vtks import read_scalars
# >>> from mindboggle.mio.fetch_data import prep_tests
# >>> urls, fetch_data = prep_tests()
# >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
# >>> background_value = -1
# >>> # Select one fold
# >>> folds, name = read_scalars(folds_file, True, True)
# >>> fold_number = 4
# >>> folds[folds != fold_number] = background_value
# >>> I = np.where(folds==fold_number)[0]
# >>> neighbor_lists = find_neighbors_from_file(folds_file)
# >>> ## Find vertex whose removal (with its neighbors) would create a hole:
# >>> #for index in I:
# ... # N1 = neighbor_lists[index]
# ... # stop = True
# ... # for n in N1:
# ... # if any(folds[neighbor_lists[n]] == background_value):
# ... # stop = False
# ... # break
# ... # else:
# ... # for f in neighbor_lists[n]:
# ... # if any(folds[neighbor_lists[f]] == background_value):
# ... # stop = False
# ... # break
# ... # if stop:
# ... # break
# >>> index = I[100]
# >>> N = neighbor_lists[index]
# >>> N.append(index)
# >>> N
# [36768, 37670, 36769, 37679, 38522, 38529, 37688, 37689, 37678]
# >>> folds[N] = background_value
# >>> I = [x for x in I if x not in N]
#
# View hole (skip test):
#
# >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
# >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
# >>> rewrite_scalars(folds_file, 'hole.vtk', folds, 'hole', folds) # doctest: +SKIP
# >>> plot_surfaces('hole.vtk') # doctest: +SKIP
#
# Fill hole:
#
# >>> exclude_range = []
# >>> regions = np.copy(folds)
# >>> values = np.copy(folds)
# >>> regions = fill_holes(regions, neighbor_lists, values, exclude_range,
# ... background_value)
# >>> indices = [i for i,x in enumerate(regions) if x != background_value]
# >>> indices[0:10]
# [34148, 34149, 34150, 34151, 34152, 34153, 34154, 34155, 34157, 34158]
#
# View filled hole (skip test):
#
# >>> rewrite_scalars(folds_file, 'fill_hole.vtk', regions, 'fill_hole', regions) # doctest: +SKIP
# >>> plot_surfaces('fill_hole.vtk') # doctest: +SKIP
#
# """
# import numpy as np
# from mindboggle.guts.segment import segment
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# def label_holes(holes, regions, neighbor_lists):
# """
# Fill holes in regions on a surface mesh.
#
# Parameters
# ----------
# holes : list or array of integers
# hole numbers for all vertices
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# """
# import numpy as np
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# # Identify the vertices for each hole
# hole_numbers = [x for x in np.unique(holes) if x != background_value]
# for n_hole | |
<filename>cuhpy.py
'''
cuhpy - A python implementation of the Colorado Urban Hydrograph Procedure (2.0.0)
Version P2.0.0
by <NAME> / RESPEC
June 2017
Code converted from CUHP 2.0.0 VBA / Excel
Requires math & numpy - Download windows binaries from (http://www.lfd.uci.edu/~gohlke/pythonlibs/)
Requires cuhpdata.py
matplotlib is optional optional for plotting functions
'''
import math
import numpy as np
import cuhpdata
import datetime
import collections
_dataLib = cuhpdata._Data()
almostZero = 0.00001 #Global Value used in VBA version, might as well use it here
errorMessages = [] #List of various error messages
class Subcatchment():
"""The Subcatchment object represents individual subcatchments and their attributes
It computes the parameters for each subcatchment as described and is independent of rainfall and raingage data"""
def __init__(self,name,area,centroidLength,length,slope,impervious,perviousDepressionStorage,
imperviousDepressionStorage,hortonsInitial,hortonsDecay,hortonsFinal,dciaLevel=0,
dcifOverride=None,rpfOverride=None,ctOverride=None,cpOverride=None,w50Override=None,
w75Override=None,k50Override=None,k75Override=None,swmmNode=None,rainGageName=None,comment=None):
# --User Input Variables--
self.name=str(name) #subcatchment name (string)
self.area=float(area) #subcatchment area (mi^2)
self.centroidLength=float(centroidLength) #length to Centroid (mi)
self.length=float(length) #subcatchment length (mi)
self.slope=float(slope) #subcatchment slope (ft/ft)
self.impervious=float(impervious) #impervious percentage(%) - as a whole interger, i.e. 9.1 not .0091 for 9.1%
self.perviousDepressionStorage=float(perviousDepressionStorage) #Maximum pervious depression storage (in)
self.imperviousDepressionStorage=float(imperviousDepressionStorage) #Maximum impervious depression storage (in)
self.hortonsInitial=float(hortonsInitial) #Horton's initial infiltration rate (in/hr)
self.hortonsDecay=float(hortonsDecay) #Horton's decay coefficient
self.hortonsFinal=float(hortonsFinal) #Horton's final infiltration rate (in/hr)
self.dciaLevel=int(dciaLevel) #Directly connected impervious area level (0,1,2)(optional)
self.dcifOverride=dcifOverride #User input override for directly connected impervious area fraction (optional)
self.rpfOverride=rpfOverride #User input override value for receiving pervious area fraction (optional)
self.ctOverride=ctOverride #User input override value for time to peak coefficient (optional)
self.cpOverride=cpOverride #User input override value for time to peak runoff rate (optional)
self.w50Override = w50Override #User input override value for width of unit hydrograph at 50% PeakQ (optional)
self.w75Override = w75Override #User input override value for width of unit hydrograph at 75% PeakQ (optional)
self.k50Override = k50Override #User input override for fraction of hydrograph width before peak at 50% PeakQ (optional)
self.k75Override = k75Override #User input override value for fraction of hydrograph width before peak at 75% PeakQ (optional)
self.swmmNode=swmmNode #Corresponding SWMMnode (optional)
self.rainGageName = rainGageName #Raingage name (optional)
self.comment=comment #User Input Comment (optional)
# --Computed Variables--
self.rpf=None # recieving pervious fraction
self.spf = None # separate pervious fraction
self.dcif=None # directly connected impervious fraction
self.uif = None # unconnected impervious fraction
self.dcia = None # directly connected impervious area (mi^2)
self.uia = None # unconnected impervious area (mi^2)
self.rpa = None # recieving pervious area (mi^2)
self.spa = None # separate pervious area (mi^2)
self.avgInfiltration = None # Average Infiltration
# --Run these functions on initialization--
self._computePerviousFractions()
self._computeAvgInfiltration()
def _computePerviousFractions(self):
""" Calculates rpf and dcif for the subcatchment"""
decImperv = self.impervious / 100 #easier than dividing by 100 every time
#-----Get Paramaters-----
for i in range(0,len(_dataLib.rpfCoeff[self.dciaLevel])): #Find the appropriate rpf paramaters from the library
if decImperv <= _dataLib.rpfCoeff[self.dciaLevel][i][0]:
rpfParams = _dataLib.rpfCoeff[self.dciaLevel][i][1:]
break
for i in range(0,len(_dataLib.dcifCoeff[self.dciaLevel])):#Find the appropriate dcif paramaters from the library
if decImperv <= _dataLib.dcifCoeff[self.dciaLevel][i][0]:
dcifParams=_dataLib.dcifCoeff[self.dciaLevel][i][1:]
break
#-----Compute Pervious Fractions and areas-----
self.dcif = self.impervious / 100 * dcifParams[0] + dcifParams[1] #Compute DCIF
if self.dcifOverride is not None: #Look for DCIF override
if 0 <= self.dcifOverride <= 1:
self.dcif=self.dcifOverride
else:
errorMessages.append("DCIF Override out of Range, Ignoring")
if self.dcif >= 1: self.dcif=1-almostZero
if self.dcif <= 0: self.dcif=almostZero
self.rpf = self.impervious / 100 * rpfParams[0] + rpfParams[1] #Compute RPF
if self.rpfOverride is not None: #Look for RPF override
if 0 <= self.rpfOverride <= 1:
self.dcif=self.rpfOverride
else:
errorMessages.append("RPF Override out of Range, Ignoring")
if self.rpf >= 1: self.rpf = 1 - almostZero
if self.rpf <= 0: self.rpf = almostZero
self.uif = 1 - self.dcif
self.spf = 1 - self.rpf
self.uia=((1-self.dcif)*self.area*self.impervious/100) # Compute unconnected impervious area, Eqn B-21
self.rpa=self.rpf*((1-self.impervious/100)*self.area)# Compute recieving pervious area, Eqn B-22
self.dcia = self.dcif * self.impervious/100 * self.area # Compute directly connected impervious area
self.spa = self.spf * ((1 - self.impervious / 100) * self.area) # Compute separate pervious area
return True
def _computeAvgInfiltration(self): #Computes the Average Infiltration per Eqn B-25
self.avgInfiltration=(self.hortonsFinal+((self.hortonsInitial-self.hortonsFinal)/(7200*self.hortonsDecay))
*(1-math.exp(-7200*self.hortonsDecay))) #Eqn B-25, average
return True
class RainGage():
# The RainGage object computes and holds raingage data
# Each type of raingage has a different input method
def __init__(self,rgType,rgName=None,timeStep=None,userInputDepths=None,oneHourDepth=None,sixHourDepth=None,correctionArea=None,returnPeriod=None):
# --User input variables
self.rgType = rgType
self.rgName = rgName # raingage name (optional)
self.timeStep = timeStep #timeStep - 5 min for standard CUHP raingages, variable for user input
self.oneHourDepth = oneHourDepth
self.sixHourDepth = sixHourDepth
self.correctionArea = correctionArea
self.returnPeriod = returnPeriod
self.userInputDepths = userInputDepths
# --Computed Variables
self.twoHourDepth = None
self.threeHourDepth = None
self.rainDepths=[]
if self.rgType == "UserInput":
if None in (self.timeStep,self.userInputDepths): raise ValueError("User defined gage requires timestep and depths")
self.timeStep = timeStep
self.oneHourDepth = oneHourDepth
time, totalDepth = 0, 0
for value in self.userInputDepths:
time += self.timeStep
self.rainDepths.append(value)
totalDepth += value
if time >= 60 and self.oneHourDepth is None:
self.oneHourDepth = totalDepth
elif self.rgType == "Standard": #Standard distribution with 5-minute hyetograph
if None in (self.oneHourDepth, self.returnPeriod): raise ValueError(
"Standard distribution gage requires one hour depth and return period")
self.timeStep = 5
self.oneHourDepth = oneHourDepth
self.returnPeriod = returnPeriod
hyetograph = _dataLib.oneHourDistribution
if self.returnPeriod not in hyetograph[0][1]: raise ValueError("Invalid Return Period for RainGage")
else: rindex = hyetograph[0][1].index(self.returnPeriod)
time = self.timeStep
while time <= 120:
thisHyetoDepth = hyetograph[time/5+1][1][rindex]
thisGageDepth = thisHyetoDepth * self.oneHourDepth
self.rainDepths.append(thisGageDepth)
time += self.timeStep
self.rainDepths.append(0.0)
self._adjustDips()
elif self.rgType == "AreaCorrected":
if None in (self.oneHourDepth, self.sixHourDepth, self.returnPeriod, self.correctionArea): raise ValueError(
"Area corrected gage requires 1- and 6- hour depths, return period and correction area")
self.oneHourDepth = oneHourDepth
self.sixHourDepth = sixHourDepth
self.correctionArea = correctionArea
self.returnPeriod = returnPeriod
self.timeStep = 5
self.twoHourDepth = (self.sixHourDepth - self.oneHourDepth) * 0.342
self.threeHourDepth = (self.sixHourDepth - self.oneHourDepth) * 0.597
hyetograph = _dataLib.oneHourDistribution
if self.returnPeriod not in hyetograph[0][1]: raise ValueError("Invalid Return Period for RainGage")
else: rindex = hyetograph[0][1].index(self.returnPeriod)
time = self.timeStep
while time <= 600 / self.timeStep: # Fill out first two hours from the standard hyetograph
thisHyetoDepth = hyetograph[time/5+1][1][rindex]
thisGageDepth = thisHyetoDepth * self.oneHourDepth
self.rainDepths.append(thisGageDepth)
time += self.timeStep
darfChart = None
if self.correctionArea >= 15: #Correction areas greater than 15 square miles get filled out to six hours
while time < 180 + self.timeStep: #uniformally distribute the 2-3 hour depth
time += self.timeStep
self.rainDepths.append((self.threeHourDepth-self.twoHourDepth)/12)
threeToSixHourIncrement = (self.sixHourDepth - np.sum(self.rainDepths)) /36
while time < 360 + self.timeStep: #Evenly distribute the remaining rainfall
time += self.timeStep
self.rainDepths.append(threeToSixHourIncrement)
self.rainDepths.append(0.0)
#Get the Appropriate Depth Area Reduction Factors (DARF) if appropriate:
if self.returnPeriod <= 10:
if self.correctionArea >= _dataLib.darf_under_10yr[0][1][0]: darfChart = _dataLib.darf_under_10yr
else:
if self.correctionArea >= _dataLib.darf_over_10yr[0][1][0]: darfChart = _dataLib.darf_over_10yr
elif self.returnPeriod <= 10 and self.correctionArea >= _dataLib.darf_under_10yr: darfChart = _dataLib.darf_under_10yr
self._adjustDips()
#Apply DARF
if darfChart is not None:
self.uncorrectedDepths = self.rainDepths
if self.correctionArea in darfChart[0][1]: # test if the correction area is in the chart
dIndex = darfChart[0][1].index(self.correctionArea)
for i in range(len(self.rainDepths)):
self.rainDepths[i] = self.uncorrectedDepths[i] * darfChart[i + 2][1][dIndex]
else:
j = 1
lIndex, uIndex, multiplier = None, None, None #lower index
while multiplier is None: # if the correction area is not in the chart, use the nearest correction area
if self.correctionArea <= darfChart[0][1][j]:
lIndex = j - 1
uIndex = j + 1
multiplier = (float(self.correctionArea) - float(darfChart[0][1][j-1])) / (float(darfChart[0][1][j]) - float(darfChart[0][1][j-1]))
j += 1
if j > len(darfChart[0][1]): lIndex, uIndex = j
for i in range(len(self.rainDepths)):
self.rainDepths[i] = self.uncorrectedDepths[i] * (darfChart[i + 1][1][lIndex] + multiplier * (darfChart[i + 2][1][uIndex] - darfChart[i + 2][1][lIndex]))
else: raise ValueError("Invalid Rain Gage Type")
def _adjustDips(self):
isSmooth = False
numIterations = 0
adjustment = 0.0
while not isSmooth:
dips_found = False
for i in range(self.rainDepths.index(max(self.rainDepths)), len(self.rainDepths) - 1):
if self.rainDepths[i] < self.rainDepths[i + 1]:
dips_found = True
for j in range(i - 1, self.rainDepths.index(max(self.rainDepths)), -1):
if self.rainDepths[j] > self.rainDepths[i + 1]:
avg = (self.rainDepths[j] + self.rainDepths[i + 1]) / 2
if avg > self.rainDepths[i + 1]:
self.rainDepths[i + 1] = avg
adjustment += avg - self.rainDepths[i + 1]
break
if dips_found:
numIterations += 1
else:
isSmooth = True
if numIterations > 100: raise ValueError("Error generating curve: Unusual rainfall values and/or multiple peaks detected.")
if adjustment != 0:
peakIndex = self.rainDepths.index(max(self.rainDepths))
totalRainfall = 0
for i in range(peakIndex - 2, peakIndex + 3):
totalRainfall += self.rainDepths[i]
test_value = self.rainDepths[peakIndex - 2] - adjustment * self.rainDepths[peakIndex - 2] / | |
"""User resources unit tests."""
import unittest
import common
from notelist.responses import (
METHOD_NOT_ALLOWED, MISSING_TOKEN, INVALID_TOKEN, OK,
ERROR_METHOD_NOT_ALLOWED, ERROR_INVALID_CREDENTIALS, ERROR_MISSING_TOKEN,
ERROR_INVALID_TOKEN, ERROR_VALIDATION)
from notelist.views.authentication import (
USER_LOGGED_IN, TOKEN_REFRESHED, USER_LOGGED_OUT, INVALID_CREDENTIALS)
class LoginTestCase(common.BaseTestCase):
"""Login resource unit tests."""
def test_get(self):
"""Test the Get method of the Login view.
This test tries to call the Get method, which shouldn't work.
"""
r = self.client.get("/auth/login")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
def test_post(self):
"""Test the Get method of the Login view.
This test tries to log in as some user with valid credentials, which
should work.
"""
# Log in
data = {
"username": self.reg1["username"],
"password": self.reg1["password"]}
r = self.client.post("/auth/login", json=data)
# Check status code
self.assertEqual(r.status_code, 200)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], USER_LOGGED_IN)
self.assertEqual(res_data[keys[1]], OK)
# Check result
self.assertIn("result", res_data)
result = res_data["result"]
self.assertEqual(type(result), dict)
for i in (
"user_id", "access_token", "access_token_expiration",
"refresh_token", "refresh_token_expiration"
):
self.assertIn(i, result)
v = result[i]
self.assertEqual(type(v), str)
self.assertNotEqual(v, "")
def test_post_missing_fields(self):
"""Test the Get method of the Login view.
This test tries to log in as some user with some mandatory field
missing, which shouldn't work.
"""
# Log in (without data)
r1 = self.client.post("/auth/login")
# Log in (without username)
data = {"password": <PASSWORD>["password"]}
r2 = self.client.post("/auth/login", json=data)
# Log in (without password)
data = {"username": self.reg1["username"]}
r3 = self.client.post("/auth/login", json=data)
# Check status codes and messages
keys = ("message", "message_type")
for r in (r1, r2, r3):
# Status code
self.assertEqual(r.status_code, 400)
# Message
res_data = r.json
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[1]], ERROR_VALIDATION)
def test_post_disabled_user(self):
"""Test the Get method of the Login view.
This test tries to log in as some disabled user, which shouldn't work.
"""
# Log in
data = {
"username": self.reg2["username"],
"password": <PASSWORD>["password"]}
r = self.client.post("/auth/login", json=data)
# Check status code
self.assertEqual(r.status_code, 401)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], INVALID_CREDENTIALS)
self.assertEqual(res_data[keys[1]], ERROR_INVALID_CREDENTIALS)
def test_post_user_not_found(self):
"""Test the Get method of the Login view.
This test tries to log in as a user that doesn't exist, which shouldn't
work.
"""
# Log in
data = {"username": "test", "password": "<PASSWORD>"}
r = self.client.post("/auth/login", json=data)
# Check status code
self.assertEqual(r.status_code, 401)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], INVALID_CREDENTIALS)
self.assertEqual(res_data[keys[1]], ERROR_INVALID_CREDENTIALS)
def test_post_invalid_password(self):
"""Test the Get method of the Login view.
This test tries to log in as some user providing an invalid password,
which shouldn't work.
"""
# Log in
data = {
"username": self.reg1["username"],
"password": self.reg1["password"] + "_"}
r = self.client.post("/auth/login", json=data)
# Check status code
self.assertEqual(r.status_code, 401)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], INVALID_CREDENTIALS)
self.assertEqual(res_data[keys[1]], ERROR_INVALID_CREDENTIALS)
def test_put(self):
"""Test the Put method of the Login view.
This test tries to call the Put method, which shouldn't work.
"""
r = self.client.put("/auth/login")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
def test_delete(self):
"""Test the Delete method of the Login view.
This test tries to call the Delete method, which shouldn't work.
"""
r = self.client.delete("/auth/login")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
class TokenRefreshTestCase(common.BaseTestCase):
"""Token Refresh resource unit tests."""
def test_get(self):
"""Test the Get method of the Token Refresh view.
This test tries to get a new, not fresh, access token providing the
user refresh token, which should work.
"""
# Log in
data = {
"username": self.reg1["username"],
"password": <PASSWORD>["password"]}
r = self.client.post("/auth/login", json=data)
refresh_token = r.json["result"]["refresh_token"]
# Get a new, not fresh, access token
headers = {"Authorization": f"Bearer {refresh_token}"}
r = self.client.get("/auth/refresh", headers=headers)
# Check status code
self.assertEqual(r.status_code, 200)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], TOKEN_REFRESHED)
self.assertEqual(res_data[keys[1]], OK)
# Check result
self.assertIn("result", res_data)
result = r.json["result"]
self.assertEqual(type(result), dict)
# Check new access token
self.assertIn("access_token", result)
access_token = result["access_token"]
self.assertEqual(type(access_token), str)
self.assertNotEqual(access_token, "")
def test_get_missing_refresh_token(self):
"""Test the Get method of the Token Refresh view.
This test tries to get a new, not fresh, access token without providing
a refresh token, which shouldn't work.
"""
# Get access token
r = self.client.get("/auth/refresh")
# Check status code
self.assertEqual(r.status_code, 401)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], MISSING_TOKEN)
self.assertEqual(res_data[keys[1]], ERROR_MISSING_TOKEN)
def test_get_invalid_refresh_token(self):
"""Test the Get method of the Token Refresh view.
This test tries to get a new, not fresh, access token given an invalid
refresh token, which shouldn't work.
"""
# Get a new, not fresh, access token providing an invalid access token
# ("1234").
headers = {"Authorization": "Bearer 1234"}
r = self.client.get("/auth/refresh", headers=headers)
# Check status code
self.assertEqual(r.status_code, 422)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], INVALID_TOKEN)
self.assertEqual(res_data[keys[1]], ERROR_INVALID_TOKEN)
def test_post(self):
"""Test the Post method of the Token Refresh view.
This test tries to call the Post method, which shouldn't work.
"""
r = self.client.post("/auth/refresh")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
def test_put(self):
"""Test the Put method of the Token Refresh view.
This test tries to call the Put method, which shouldn't work.
"""
r = self.client.put("/auth/refresh")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
def test_delete(self):
"""Test the Delete method of the Token Refresh view.
This test tries to call the Delete method, which shouldn't work.
"""
r = self.client.delete("/auth/refresh")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
class LogoutTestCase(common.BaseTestCase):
"""Logout resource unit tests."""
def test_get(self):
"""Test the Get method of the Logout view.
This test logs in as some user with valid credentials and then tries to
log out, which should work.
"""
# Log in
data = {
"username": self.reg1["username"],
"password": self.reg1["password"]}
r = self.client.post("/auth/login", json=data)
access_token = r.json["result"]["access_token"]
# Log out
headers = {"Authorization": f"Bearer {access_token}"}
r = self.client.get("/auth/logout", headers=headers)
# Check status code
self.assertEqual(r.status_code, 200)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], USER_LOGGED_OUT)
self.assertEqual(res_data[keys[1]], OK)
def test_get_missing_access_token(self):
"""Test the Get method of the Logout view.
This test tries to log out without providing an access token, which
shouldn't work.
"""
# Log out without providing the access token
r = self.client.get("/auth/logout")
# Check status code
self.assertEqual(r.status_code, 401)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], MISSING_TOKEN)
self.assertEqual(res_data[keys[1]], ERROR_MISSING_TOKEN)
def test_get_invalid_access_token(self):
"""Test the Get method of the Logout view.
This test tries to log out providing an invalid access token, which
shouldn't work.
"""
# Log out providing an invalid access token ("<PASSWORD>")
headers = {"Authorization": "Bearer 1234"}
r = self.client.get("/auth/refresh", headers=headers)
# Check status code
self.assertEqual(r.status_code, 422)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], INVALID_TOKEN)
self.assertEqual(res_data[keys[1]], ERROR_INVALID_TOKEN)
def test_post(self):
"""Test the Post method of the Logout view.
This test tries to call the Post method, which shouldn't work.
"""
r = self.client.post("/auth/logout")
# Check status code
self.assertEqual(r.status_code, 405)
# Check message
res_data = r.json
keys = ("message", "message_type")
for i in keys:
self.assertIn(i, res_data)
self.assertEqual(res_data[keys[0]], METHOD_NOT_ALLOWED)
self.assertEqual(res_data[keys[1]], ERROR_METHOD_NOT_ALLOWED)
def test_put(self):
"""Test the Put method of the Logout view.
This test tries to | |
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
max_tree_id = list(self.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(
self, node, level_change,
left_right_change, new_tree_id):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
getattr(node, self.tree_id_attr)
]
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(
node, level_change, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < | |
<reponame>thewahome/msgraph-cli
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BookingBusinessesOperations(object):
"""BookingBusinessesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~bookings.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_appointments(
self,
booking_business_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum13"]]]
select=None, # type: Optional[List[Union[str, "models.Enum14"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfBookingAppointment"]
"""Get appointments from bookingBusinesses.
Get appointments from bookingBusinesses.
:param booking_business_id: key: id of bookingBusiness.
:type booking_business_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~bookings.models.Enum13]
:param select: Select properties to be returned.
:type select: list[str or ~bookings.models.Enum14]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfBookingAppointment or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~bookings.models.CollectionOfBookingAppointment]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfBookingAppointment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_appointments.metadata['url'] # type: ignore
path_format_arguments = {
'bookingBusiness-id': self._serialize.url("booking_business_id", booking_business_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfBookingAppointment', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_appointments.metadata = {'url': '/bookingBusinesses/{bookingBusiness-id}/appointments'} # type: ignore
def create_appointments(
self,
booking_business_id, # type: str
body, # type: "models.MicrosoftGraphBookingAppointment"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphBookingAppointment"
"""Create new navigation property to appointments for bookingBusinesses.
Create new navigation property to appointments for bookingBusinesses.
:param booking_business_id: key: id of bookingBusiness.
:type booking_business_id: str
:param body: New navigation property.
:type body: ~bookings.models.MicrosoftGraphBookingAppointment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphBookingAppointment, or the result of cls(response)
:rtype: ~bookings.models.MicrosoftGraphBookingAppointment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphBookingAppointment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_appointments.metadata['url'] # type: ignore
path_format_arguments = {
'bookingBusiness-id': self._serialize.url("booking_business_id", booking_business_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphBookingAppointment')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphBookingAppointment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_appointments.metadata = {'url': '/bookingBusinesses/{bookingBusiness-id}/appointments'} # type: ignore
def get_appointments(
self,
booking_business_id, # type: str
booking_appointment_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum15"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphBookingAppointment"
"""Get appointments from bookingBusinesses.
Get appointments from bookingBusinesses.
:param booking_business_id: key: id of bookingBusiness.
:type booking_business_id: str
:param booking_appointment_id: key: id of bookingAppointment.
:type booking_appointment_id: str
:param select: Select properties to be returned.
:type select: list[str or ~bookings.models.Enum15]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphBookingAppointment, or the result of cls(response)
:rtype: ~bookings.models.MicrosoftGraphBookingAppointment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphBookingAppointment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_appointments.metadata['url'] # type: ignore
path_format_arguments = {
'bookingBusiness-id': self._serialize.url("booking_business_id", booking_business_id, 'str'),
'bookingAppointment-id': self._serialize.url("booking_appointment_id", booking_appointment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphBookingAppointment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_appointments.metadata = {'url': '/bookingBusinesses/{bookingBusiness-id}/appointments/{bookingAppointment-id}'} # type: ignore
def update_appointments(
self,
booking_business_id, # type: str
booking_appointment_id, # type: str
body, # type: "models.MicrosoftGraphBookingAppointment"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property appointments in bookingBusinesses.
Update the navigation property appointments in bookingBusinesses.
:param booking_business_id: key: id of bookingBusiness.
:type booking_business_id: str
:param booking_appointment_id: key: id of bookingAppointment.
:type booking_appointment_id: str
:param body: New navigation property values.
:type body: ~bookings.models.MicrosoftGraphBookingAppointment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_appointments.metadata['url'] # type: ignore
path_format_arguments = {
'bookingBusiness-id': self._serialize.url("booking_business_id", booking_business_id, 'str'),
'bookingAppointment-id': self._serialize.url("booking_appointment_id", booking_appointment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphBookingAppointment')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_appointments.metadata = {'url': '/bookingBusinesses/{bookingBusiness-id}/appointments/{bookingAppointment-id}'} # type: ignore
def delete_appointments(
self,
booking_business_id, # type: str
booking_appointment_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property appointments for bookingBusinesses.
Delete navigation property appointments for bookingBusinesses.
:param | |
integers. For the transfer function for a single channel pass it's index as integer.
n_bins (None): number of bins in the transfer function (determines frequency resolution).
If None, use the maximum number of bins.
show (bool): whether to show the plot right after drawing.
axis (matplotlib.axes.Axes | None): axis to plot to. If None create a new plot.
Returns:
(numpy.ndarray): the frequency bins in the range from 0 Hz to the Nyquist frequency.
(numpy.ndarray: the magnitude of each frequency in `w`.
None: If `show` is True OR and `axis` was specified, a plot is drawn and nothing is returned.
Examples::
filt = slab.Filter.band(frequency=(100, 1500), kind='bp') # bandpass filter
filt.tf(show=True) # compute and plot the transfer functions
w, h = filt.tf(show=False) # compute and return the transfer functions
"""
if isinstance(channels, int):
if channels > self.n_filters: # check chan is in range of n_filters
raise IndexError("Channel index out of range!")
else:
channels = [channels]
elif isinstance(channels, list): # check that all the elements are unique and in range
if len(channels) != len(set(channels)):
raise ValueError("There should be no duplicates in the list of channels!")
if min(channels) < 0 or max(channels) < self.n_filters:
raise IndexError("Channel index out of range!")
if not all([isinstance(i, int) for i in channels]):
raise ValueError("Channels must be integers!")
elif channels == 'all':
channels = list(range(self.n_filters)) # now we have a list of filter indices to process
if n_bins is None:
n_bins = self.data.shape[0]
if self.fir:
if scipy is False:
raise ImportError('Computing transfer functions of FIR filters requires Scipy.')
h = numpy.empty((n_bins, len(channels)))
w = numpy.empty(n_bins)
for i, idx in enumerate(channels):
w, _h = scipy.signal.freqz(self.channel(idx), worN=n_bins, fs=self.samplerate)
h[:, i] = 20 * numpy.log10(numpy.abs(_h.flatten()))
else:
w = self.frequencies
data = self.data[:, channels]
data[data == 0] += numpy.finfo(float).eps
h = 20 * numpy.log10(data)
if not n_bins == len(w): # interpolate if necessary
w_interp = numpy.linspace(0, w[-1], n_bins)
h_interp = numpy.zeros((n_bins, len(channels)))
for idx, _ in enumerate(channels):
h_interp[:, idx] = numpy.interp(w_interp, w, h[:, idx])
h = h_interp
w = w_interp
if show or (axis is not None):
if plt is False:
raise ImportError('Plotting transfer functions requires matplotlib.')
if axis is None:
_, axis = plt.subplots()
axis.plot(w, h)
axis.set(title='Frequency [Hz]', xlabel='Amplitude [dB]', ylabel='Frequency Response')
axis.grid(True)
if show:
plt.show()
else:
return w, h
@staticmethod
# TODO: oversampling factor needed for cochleagram!
def cos_filterbank(length=5000, bandwidth=1/3, low_cutoff=0, high_cutoff=None, pass_bands=False, samplerate=None):
"""
Generate a set of Fourier filters. Each filter's transfer function is given by the positive phase of a
cosine wave. The amplitude of the cosine is that filters central frequency. Following the organization of the
cochlea, the width of the filter increases in proportion to it's center frequency. This increase is defined
by Moore & Glasberg's formula for the equivalent rectangular bandwidth (ERB) of auditory filters. This
functions is used for example to divide a sound into bands for equalization.
Attributes:
length (int): The number of bins in each filter, determines the frequency resolution.
bandwidth (float): Width of the sub-filters in octaves. The smaller the bandwidth, the more filters
will be generated.
low_cutoff (int | float): The lower limit of frequency range in Hz.
high_cutoff (int | float): The upper limit of frequency range in Hz. If None, use the Nyquist frequency.
pass_bands (bool): Whether to include a half cosine at the filter bank's lower and upper edge frequency.
If True, allows reconstruction of original bandwidth when collapsing subbands.
samplerate (int | None): the samplerate of the sound that the filter shall be applied to.
If None, use the default samplerate.s
Examples::
sig = slab.Sound.pinknoise(samplerate=44100)
fbank = slab.Filter.cos_filterbank(length=sig.n_samples, bandwidth=1/10, low_cutoff=100,
samplerate=sig.samplerate)
fbank.tf()
# apply the filter bank to the data. The filtered sound will contain as many channels as there are
# filters in the bank. Every channel is a copy of the original sound with one filter applied.
# In this context, the channels are the signals sub-bands:
sig_filt = fbank.apply(sig)
"""
if samplerate is None:
samplerate = slab.signal._default_samplerate
if not high_cutoff:
high_cutoff = samplerate / 2
freq_bins = numpy.fft.rfftfreq(length, d=1/samplerate)
n_freqs = len(freq_bins)
center_freqs, bandwidth, erb_spacing = Filter._center_freqs(
low_cutoff=low_cutoff, high_cutoff=high_cutoff, bandwidth=bandwidth, pass_bands=pass_bands)
n_filters = len(center_freqs)
filts = numpy.zeros((n_freqs, n_filters))
freqs_erb = Filter._freq2erb(freq_bins)
for i in range(n_filters):
l = center_freqs[i] - erb_spacing
h = center_freqs[i] + erb_spacing
avg = center_freqs[i] # center of filter
width = erb_spacing * 2 # width of filter
filts[(freqs_erb > l) & (freqs_erb < h), i] = numpy.cos(
(freqs_erb[(freqs_erb > l) & (freqs_erb < h)] - avg) / width * numpy.pi)
return Filter(data=filts, samplerate=samplerate, fir=False)
@staticmethod
def _center_freqs(low_cutoff, high_cutoff, bandwidth=1/3, pass_bands=False):
ref_freq = 1000 # Hz, reference for conversion between oct and erb bandwidth
ref_erb = Filter._freq2erb(ref_freq)
erb_spacing = Filter._freq2erb(ref_freq*2**bandwidth) - ref_erb
h = Filter._freq2erb(high_cutoff)
l = Filter._freq2erb(low_cutoff)
n_filters = int(numpy.round((h - l) / erb_spacing))
center_freqs, erb_spacing = numpy.linspace(l, h, n_filters, retstep=True)
if not pass_bands:
center_freqs = center_freqs[1:-1] # exclude low and highpass filters
bandwidth = numpy.log2(Filter._erb2freq(ref_erb + erb_spacing) /
ref_freq) # convert erb_spacing to octaves
return center_freqs, bandwidth, erb_spacing
@staticmethod
def collapse_subbands(subbands, filter_bank=None):
"""
Sum a sound that has been filtered with a filterbank and which channels represent the sub-bands of
the original sound. For each sound channel, the fourier transform is calculated and the result is
multiplied with the corresponding filter in the filter bank. For the resulting spectrum, and inverse
fourier transform is performed. The resulting sound is summed over the all channels.
Arguments:
subbands (slab.Signal): The sound which is divided into subbands by filtering. The number of channels
in the sound must be equal to the number of filters in the filter bank.
filter_bank (None | slab.Filter): The filter bank applied to the sound's subbands. The number of
filters must be equal to the number of channels in the sound. If None a filter bank with the default
parameters is generated. Note that the filters must have a number of frequency bins equal to the
number of samples in the sound.
Returns:
(slab.Signal): A sound generated from summing the spectra of the subbands.
Examples::
sig = slab.Sound.whitenoise() # generate a sound
fbank = slab.Filter.cos_filterbank(length=sig.n_samples) # generate a filter bank
subbands = fbank.apply(sig) # divide the sound into subbands by applying the filter
# by collapsing the subbands, a new sound is generated that is (almost) equal to the original sound:
collapsed = fbank.collapse_subbands(subbands, fbank)
"""
new = copy.deepcopy(subbands)
if not filter_bank:
filter_bank = Filter.cos_filterbank(
length=subbands.n_samples, samplerate=subbands.samplerate)
elif filter_bank.fir:
raise ValueError("Not implemented for FIR filters!")
if subbands.samplerate != filter_bank.samplerate:
raise ValueError('Signal and filter bank need to have the same samplerate!')
subbands_rfft = numpy.fft.rfft(subbands.data, axis=0)
subbands = numpy.fft.irfft(subbands_rfft * filter_bank.data, axis=0)
collapsed = Signal(data=subbands.sum(axis=1), samplerate=filter_bank.samplerate)
new.data, new.samplerate = collapsed.data, collapsed.samplerate
return new
def filter_bank_center_freqs(self):
"""
Get the maximum of each filter in a filter bank. For filter banks generated with the `cos_filterbank`
method this corresponds to the filters center frequency.
Returns:
(numpy.ndarray): array with length equal to the number of filters in the bank, containing each filter's
center frequency.
"""
if self.fir:
raise NotImplementedError('Not implemented for FIR filter banks.')
freqs = self.frequencies
center_freqs = numpy.zeros(self.n_filters)
for i in range(self.n_filters): # for each filter
idx = numpy.argmax(self.channel(i).data) # get index of max Gain
center_freqs[i] = freqs[idx] # look-up freqe of index -> centre_freq for that filter
return center_freqs
@staticmethod
def equalizing_filterbank(reference, sound, length=1000, bandwidth=1/8, low_cutoff=200, high_cutoff=None,
alpha=1.0):
"""
Generate an equalizing filter from the spectral difference between a `sound` and a `reference`. Both are
divided into sub-bands using the `cos_filterbank` and the level difference per sub-band is calculated. The
sub-band frequencies and level differences are then used to generate an equalizing filter that makes the
spectrum of the `sound` more equal to the one of the `reference`. The main use case is equalizing the
differences between transfer functions of individual loudspeakers.
Arguments:
reference (slab.Sound): The reference for equalization, i.e. what the sound should look like after
applying the equalization. | |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import int
from future import standard_library
standard_library.install_aliases()
import os
import json
import requests
import types
import re
import time
import copy
import traceback
from flask import (
jsonify,
Blueprint,
request,
Response,
render_template,
make_response,
g,
url_for,
redirect,
)
from flask_login import login_required
from pprint import pformat
from string import Template
from urllib.parse import urljoin
from datetime import datetime
import functools
import operator
from hysds.celery import app as celery_app
from hysds.task_worker import do_submit_task
import hysds_commons.action_utils
import hysds_commons.mozart_utils
import hysds_commons.container_utils
import hysds_commons.job_spec_utils
from tosca import app
import tosca.lib.grq_utils
import werkzeug.routing
mod = Blueprint("services/user_rules", __name__)
def get_utc_time():
"""Get UTC type now without subseconds."""
return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def create_user_rules_index(es_url, es_index):
"""Create user rules index applying percolator mapping."""
# create index with percolator mapping
mapping_file = os.path.normpath(
os.path.join(app.root_path, "..", "configs", "user_rules_dataset.mapping")
)
with open(mapping_file) as f:
mapping = f.read()
r = requests.put("%s/%s" % (es_url, es_index), data=mapping)
r.raise_for_status()
def add_grq_mappings(es_url, es_index):
"""Add mappings from GRQ product indexes."""
# get current mappings in user rules
r = requests.get("%s/%s/_mapping" % (es_url, es_index))
r.raise_for_status()
user_rules_mappings = r.json()[es_index]["mappings"]
# get all mappings from GRQ product indexes using alias
grq_index = app.config["ES_INDEX"]
r = requests.get("%s/%s/_mapping" % (es_url, grq_index))
r.raise_for_status()
mappings = r.json()
for idx in mappings:
for doc_type in mappings[idx]["mappings"]:
if doc_type not in user_rules_mappings:
r = requests.put(
"%s/%s/_mapping/%s" % (es_url, es_index, doc_type),
data=json.dumps(mappings[idx]["mappings"][doc_type]),
)
r.raise_for_status()
@mod.route("/user_rules/get_jobspec_names", methods=["GET"])
def get_jobspecs():
"""Get the list of jobspecs"""
try:
jspecs = hysds_commons.job_spec_utils.get_job_spec_types(
app.config["MOZART_ES_URL"], logger=app.logger
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
jspecs = []
else:
raise
return jsonify({"jobspecs": jspecs})
@mod.route("/user_rules/get_container_names", methods=["GET"])
def get_containers():
"""Get the list of containers"""
try:
cspecs = hysds_commons.container_utils.get_container_types(
app.config["MOZART_ES_URL"], logger=app.logger
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
cspecs = []
else:
raise
return jsonify({"containers": cspecs})
@mod.route("/user_rules/actions_config", methods=["GET"])
@login_required
def get_actions_config():
"""Return actions config."""
# which interface is asking
ifc = request.args.get("ifc")
if ifc == "monitor":
ifc_filter = "monitoring_allowed"
elif ifc == "process":
ifc_filter = "processing_allowed"
else:
raise RuntimeError("Invalid interface: %s" % ifc)
# enable actions that need special auth
try:
action_specs = sorted(
hysds_commons.action_utils.get_action_spec(
app.config["ES_URL"],
app.config["MOZART_ES_URL"],
app.config["OPS_USER"],
),
key=lambda s: s["label"].lower(),
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
action_specs = []
else:
raise
actions = []
for action in action_specs:
if not action[ifc_filter]:
continue
if action["public"] is False:
if g.user.id in action.get("allowed_accounts", []):
action["public"] = True
actions.append(action)
return jsonify({"actions": actions})
@mod.route("/user_rules/get_job_queues", methods=["GET"])
@login_required
def get_job_queues():
"""Return list of job queues."""
job_type = request.args.get("job_type")
queues = hysds_commons.mozart_utils.get_queue_list(
app.config["MOZART_REST_API"], job_type
)
return jsonify(queues)
@mod.route("/user_rules/add", methods=["POST"])
@login_required
def add_user_rule():
"""Add a user rule."""
# get rule
rule_name = request.form["rule_name"]
workflow = request.form["workflow"]
priority = int(request.form.get("priority", 0))
query_string = request.form["query_string"]
kwargs = request.form["kwargs"]
queue = request.form["queue"]
if workflow is None:
return (
jsonify(
{
"success": False,
"message": "Workflow not specified.",
"result": None,
}
),
500,
)
# app.logger.debug("user: %s" % g.user.id)
# app.logger.debug("rule_name: %s" % rule_name)
# app.logger.debug("workflow: %s" % workflow)
# app.logger.debug("priority: %s" % priority)
# app.logger.debug("query_string: %s" % query_string)
# app.logger.debug("kwargs: %s" % kwargs)
# app.logger.debug("Adding tag '%s' to id '%s'." % (tag, id))
# get es url and index
es_url = app.config["ES_URL"]
es_index = app.config["USER_RULES_INDEX"]
# if doesn't exist, create index
r = requests.get("%s/%s" % (es_url, es_index))
if r.status_code == 404:
create_user_rules_index(es_url, es_index)
# ensure GRQ product index mappings exist in percolator index
add_grq_mappings(es_url, es_index)
# query
query = {
"query": {
"bool": {
"must": [
{"term": {"username": g.user.id}},
{"term": {"rule_name": rule_name}},
]
}
}
}
r = requests.post(
"%s/%s/.percolator/_search" % (es_url, es_index), data=json.dumps(query)
)
result = r.json()
if r.status_code != 200:
app.logger.debug(
"Failed to query ES. Got status code %d:\n%s"
% (r.status_code, json.dumps(result, indent=2))
)
r.raise_for_status()
if result["hits"]["total"] == 1:
app.logger.debug("Found a rule using that name already: %s" % rule_name)
return (
jsonify(
{
"success": False,
"message": "Found a rule using that name already: %s" % rule_name,
"result": None,
}
),
500,
)
job_type = None
passthru_query = False
query_all = False
mtime = get_utc_time()
for action in sorted(
hysds_commons.action_utils.get_action_spec(
app.config["ES_URL"], app.config["MOZART_ES_URL"], app.config["OPS_USER"]
),
key=lambda s: s["label"].lower(),
):
if action["type"] == workflow:
job_type = action["job_type"]
passthru_query = action.get("passthru_query", False)
query_all = action.get("query_all", False)
if job_type is None:
app.logger.debug("No job_type find for '%s'." % workflow)
return (
jsonify(
{
"success": False,
"message": "No job_type found for '%s'." % workflow,
"result": None,
}
),
500,
)
# upsert new document
new_doc = {
"workflow": workflow,
"priority": priority,
"rule_name": rule_name,
"username": g.user.id,
"query_string": query_string,
"kwargs": kwargs,
"job_type": job_type,
"enabled": True,
"query": json.loads(query_string),
"passthru_query": passthru_query,
"query_all": query_all,
"queue": queue,
"modified_time": mtime,
"creation_time": mtime,
}
r = requests.post(
"%s/%s/.percolator/" % (es_url, es_index), data=json.dumps(new_doc)
)
result = r.json()
if r.status_code != 201:
app.logger.debug(
"Failed to insert new rule for %s. Got status code %d:\n%s"
% (g.user.id, r.status_code, json.dumps(result, indent=2))
)
r.raise_for_status()
return jsonify({"success": True, "message": "", "result": result,})
@mod.route("/user_rules/list", methods=["GET"])
@login_required
def get_user_rules():
"""Get user rules."""
# get url and index
es_url = app.config["ES_URL"]
es_index = app.config["USER_RULES_INDEX"]
# if doesn't exist, create index
r = requests.get("%s/%s" % (es_url, es_index))
if r.status_code == 404:
create_user_rules_index(es_url, es_index)
# ensure GRQ product index mappings exist in percolator index
add_grq_mappings(es_url, es_index)
# query
if g.user.id == app.config["OPS_USER"]:
query = {"query": {"match_all": {}}}
else:
query = {"query": {"bool": {"must": [{"term": {"username": g.user.id}}]}}}
r = requests.post(
"%s/%s/.percolator/_search?search_type=scan&scroll=10m&size=100"
% (es_url, es_index),
data=json.dumps(query),
)
if r.status_code != 200:
app.logger.debug(
"Failed to query ES. Got status code %d:\n%s"
% (r.status_code, json.dumps(r.content, indent=2))
)
r.raise_for_status()
# app.logger.debug("result: %s" % pformat(r.json()))
scan_result = r.json()
count = scan_result["hits"]["total"]
scroll_id = scan_result["_scroll_id"]
# get list of rules
rules = []
rule_count = 0
while True:
r = requests.post("%s/_search/scroll?scroll=10m" % es_url, data=scroll_id)
res = r.json()
scroll_id = res["_scroll_id"]
if len(res["hits"]["hits"]) == 0:
break
for hit in res["hits"]["hits"]:
rule_count += 1
rule = hit["_source"]
rule["#"] = rule_count
rule["id"] = hit["_id"]
rules.append(rule)
return jsonify({"success": True, "rules": rules})
@mod.route("/user_rules/remove", methods=["POST"])
@login_required
def remove_user_rule():
"""Remove a user rule."""
# get tag
id = request.form["id"]
if id is None:
return jsonify({"success": False, "message": "Rule ID not specified."}), 500
app.logger.debug("Removing rule id '%s'." % id)
# delete
es_url = app.config["ES_URL"]
es_index = app.config["USER_RULES_INDEX"]
r = requests.delete("%s/%s/.percolator/%s" % (es_url, es_index, id))
result = r.json()
if r.status_code != 200:
app.logger.debug(
"Failed to delete rule with ID %s. Got status code %d" % (id, r.status_code)
)
r.raise_for_status()
return jsonify({"success": True, "message": "", "result": result,})
@mod.route("/user_rules/toggle_status", methods=["POST"])
@login_required
def toggle_status():
"""Toggle enabled parameter."""
# get id and enabled status
id = request.form["id"]
if id is None:
return jsonify({"success": False, "message": "Product ID not specified."}), 500
enabled = request.form["enabled"]
app.logger.debug("Setting enabled to '%s' to id '%s'." % (enabled, id))
# update enabled
if enabled == "true":
enabled = True
else:
enabled = False
new_doc = {
"doc": {"enabled": enabled, "modified_time": get_utc_time(),},
"doc_as_upsert": True,
}
es_url = app.config["ES_URL"]
es_index = app.config["USER_RULES_INDEX"]
r = requests.post(
"%s/%s/.percolator/%s/_update" % (es_url, es_index, id),
data=json.dumps(new_doc),
)
result = r.json()
if r.status_code != 200:
app.logger.debug(
"Failed to update enabled field for %s. Got status code %d:\n%s"
% (id, r.status_code, json.dumps(result, indent=2))
)
r.raise_for_status()
return jsonify({"success": True, "message": ""})
@mod.route("/user_rules/edit", methods=["POST"])
@login_required
def edit_user_rule():
"""Edit a user rule."""
# get rule
id = request.form["id"]
rule_name = request.form["rule_name"]
workflow = request.form["workflow"]
priority = int(request.form.get("priority", 0))
queue = request.form["queue"]
query_string = request.form["query_string"]
kwargs = request.form["kwargs"]
if workflow is None:
return (
jsonify(
{
"success": False,
"message": "Workflow not specified.",
"result": None,
}
),
500,
)
# app.logger.debug("user: %s" % g.user.id)
# app.logger.debug("rule_id: %s" % id)
# app.logger.debug("rule_name: %s" % rule_name)
# app.logger.debug("workflow: %s" % workflow)
# app.logger.debug("query_string: %s" % query_string)
# app.logger.debug("kwargs: %s" % kwargs)
# app.logger.debug("Adding tag '%s' to id '%s'." % (tag, id))
# get job type
job_type = None
passthru_query = False
query_all = False
for action in sorted(
hysds_commons.action_utils.get_action_spec(
app.config["ES_URL"], app.config["MOZART_ES_URL"], app.config["OPS_USER"]
),
key=lambda s: s["label"].lower(),
):
if action["type"] == workflow:
job_type = action["job_type"]
passthru_query = action.get("passthru_query", False)
query_all = action.get("query_all", False)
if job_type is None:
app.logger.debug("No job_type find for '%s'." % workflow)
return (
jsonify(
{
"success": False,
"message": "No job_type found for '%s'." % workflow,
"result": None,
}
),
500,
)
# upsert new document
new_doc = {
"doc": {
"workflow": workflow,
"priority": priority,
"rule_name": rule_name,
"username": g.user.id,
"query_string": query_string,
"kwargs": kwargs,
"job_type": job_type,
"query": json.loads(query_string),
"passthru_query": passthru_query,
"query_all": query_all,
"queue": | |
from rpython.jit.backend.aarch64 import registers as r
from rpython.jit.backend.aarch64 import locations
from rpython.jit.backend.arm import conditions as c
from rpython.jit.backend.aarch64.arch import WORD, JITFRAME_FIXED_SIZE
from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat,
ConstPtr,
INT, REF, FLOAT)
from rpython.jit.metainterp.history import TargetToken
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.backend.llsupport.regalloc import FrameManager, \
RegisterManager, TempVar, compute_vars_longevity, BaseRegalloc, \
get_scale
from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory
from rpython.jit.backend.aarch64 import registers as r
from rpython.jit.backend.aarch64.jump import remap_frame_layout_mixed
from rpython.jit.backend.aarch64.locations import imm
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.backend.llsupport.descr import CallDescr
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.codewriter import longlong
from rpython.rlib.rarithmetic import r_uint
class TempInt(TempVar):
type = INT
def __repr__(self):
return "<TempInt at %s>" % (id(self),)
class TempPtr(TempVar):
type = REF
def __repr__(self):
return "<TempPtr at %s>" % (id(self),)
class TempFloat(TempVar):
type = FLOAT
def __repr__(self):
return "<TempFloat at %s>" % (id(self),)
class ARMFrameManager(FrameManager):
def __init__(self, base_ofs):
FrameManager.__init__(self)
self.base_ofs = base_ofs
def frame_pos(self, i, box_type):
return locations.StackLocation(i, locations.get_fp_offset(self.base_ofs, i), box_type)
@staticmethod
def frame_size(type):
return 1
@staticmethod
def get_loc_index(loc):
assert loc.is_stack()
return loc.position
class ARMRegisterManager(RegisterManager):
def return_constant(self, v, forbidden_vars=[], selected_reg=None):
self._check_type(v)
if isinstance(v, Const):
if isinstance(v, ConstPtr):
tp = REF
elif isinstance(v, ConstFloat):
tp = FLOAT
else:
tp = INT
loc = self.get_scratch_reg(tp,
self.temp_boxes + forbidden_vars,
selected_reg=selected_reg)
immvalue = self.convert_to_imm(v)
self.assembler.load(loc, immvalue)
return loc
else:
return RegisterManager.return_constant(self, v,
forbidden_vars, selected_reg)
class VFPRegisterManager(ARMRegisterManager):
all_regs = r.all_vfp_regs
box_types = [FLOAT]
save_around_call_regs = r.all_vfp_regs
def convert_to_imm(self, c):
adr = self.assembler.datablockwrapper.malloc_aligned(8, 8)
x = c.getfloatstorage()
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x
return locations.ConstFloatLoc(adr)
def call_result_location(self, v):
return r.d0
def __init__(self, longevity, frame_manager=None, assembler=None):
RegisterManager.__init__(self, longevity, frame_manager, assembler)
def get_scratch_reg(self, type=FLOAT, forbidden_vars=[], selected_reg=None):
assert type == FLOAT # for now
box = TempFloat()
self.temp_boxes.append(box)
reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars,
selected_reg=selected_reg)
return reg
class CoreRegisterManager(ARMRegisterManager):
all_regs = r.all_regs
box_types = None # or a list of acceptable types
no_lower_byte_regs = all_regs
save_around_call_regs = r.caller_resp
frame_reg = r.fp
def __init__(self, longevity, frame_manager=None, assembler=None):
RegisterManager.__init__(self, longevity, frame_manager, assembler)
def call_result_location(self, v):
return r.x0
def convert_to_imm(self, c):
if isinstance(c, ConstInt):
val = rffi.cast(lltype.Signed, c.value)
return locations.ImmLocation(val)
else:
assert isinstance(c, ConstPtr)
return locations.ImmLocation(rffi.cast(lltype.Signed, c.value))
assert 0
def get_scratch_reg(self, type=INT, forbidden_vars=[], selected_reg=None):
assert type == INT or type == REF
box = None
if type == INT:
box = TempInt()
else:
box = TempPtr()
self.temp_boxes.append(box)
reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars,
selected_reg=selected_reg)
return reg
def get_free_reg(self):
free_regs = self.free_regs
for i in range(len(free_regs) - 1, -1, -1):
if free_regs[i] in self.save_around_call_regs:
continue
return free_regs[i]
DEFAULT_IMM_SIZE = 4096
def check_imm_arg(arg, size=DEFAULT_IMM_SIZE, allow_zero=True):
i = arg
if allow_zero:
lower_bound = i >= 0
else:
lower_bound = i > 0
return i < size and lower_bound
def check_imm_box(arg, size=DEFAULT_IMM_SIZE, allow_zero=True):
if isinstance(arg, ConstInt):
return check_imm_arg(arg.getint(), size, allow_zero)
return False
class Regalloc(BaseRegalloc):
def __init__(self, assembler):
self.cpu = assembler.cpu
self.assembler = assembler
self.frame_manager = None
self.jump_target_descr = None
self.final_jump_op = None
def _prepare(self, inputargs, operations, allgcrefs):
cpu = self.cpu
self.fm = ARMFrameManager(cpu.get_baseofs_of_frame_field())
self.frame_manager = self.fm
operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
allgcrefs)
# compute longevity of variables
longevity = compute_vars_longevity(inputargs, operations)
self.longevity = longevity
fm = self.frame_manager
asm = self.assembler
self.vfprm = VFPRegisterManager(longevity, fm, asm)
self.rm = CoreRegisterManager(longevity, fm, asm)
return operations
def prepare_loop(self, inputargs, operations, looptoken, allgcrefs):
operations = self._prepare(inputargs, operations, allgcrefs)
self._set_initial_bindings(inputargs, looptoken)
self.possibly_free_vars(list(inputargs))
return operations
def loc(self, var):
if var.type == FLOAT:
return self.vfprm.loc(var)
else:
return self.rm.loc(var)
def possibly_free_var(self, var):
if var.type == FLOAT:
self.vfprm.possibly_free_var(var)
else:
self.rm.possibly_free_var(var)
def force_spill_var(self, var):
if var.type == FLOAT:
self.vfprm.force_spill_var(var)
else:
self.rm.force_spill_var(var)
def possibly_free_vars_for_op(self, op):
for i in range(op.numargs()):
var = op.getarg(i)
if var is not None: # xxx kludgy
self.possibly_free_var(var)
if op.is_guard():
self.possibly_free_vars(op.getfailargs())
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
self.possibly_free_var(var)
def get_scratch_reg(self, type, forbidden_vars=[], selected_reg=None):
if type == FLOAT:
return self.vfprm.get_scratch_reg(type, forbidden_vars,
selected_reg)
else:
return self.rm.get_scratch_reg(type, forbidden_vars, selected_reg)
def get_free_reg(self):
return self.rm.get_free_reg()
def free_temp_vars(self):
self.rm.free_temp_vars()
self.vfprm.free_temp_vars()
def make_sure_var_in_reg(self, var, forbidden_vars=[],
selected_reg=None, need_lower_byte=False):
if var.type == FLOAT:
return self.vfprm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
else:
return self.rm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
def convert_to_imm(self, value):
if isinstance(value, ConstInt):
return self.rm.convert_to_imm(value)
else:
assert isinstance(value, ConstFloat)
return self.vfprm.convert_to_imm(value)
def compute_hint_frame_locations(self, operations):
# optimization only: fill in the 'hint_frame_locations' dictionary
# of rm and xrm based on the JUMP at the end of the loop, by looking
# at where we would like the boxes to be after the jump.
op = operations[-1]
if op.getopnum() != rop.JUMP:
return
self.final_jump_op = op
descr = op.getdescr()
assert isinstance(descr, TargetToken)
if descr._ll_loop_code != 0:
# if the target LABEL was already compiled, i.e. if it belongs
# to some already-compiled piece of code
self._compute_hint_frame_locations_from_descr(descr)
#else:
# The loop ends in a JUMP going back to a LABEL in the same loop.
# We cannot fill 'hint_frame_locations' immediately, but we can
# wait until the corresponding prepare_op_label() to know where the
# we would like the boxes to be after the jump.
def _compute_hint_frame_locations_from_descr(self, descr):
arglocs = descr._arm_arglocs
jump_op = self.final_jump_op
assert len(arglocs) == jump_op.numargs()
for i in range(jump_op.numargs()):
box = jump_op.getarg(i)
if not isinstance(box, Const):
loc = arglocs[i]
if loc is not None and loc.is_stack():
self.frame_manager.hint_frame_pos[box] = (
self.fm.get_loc_index(loc))
def position(self):
return self.rm.position
def next_instruction(self):
self.rm.next_instruction()
self.vfprm.next_instruction()
def prepare_op_increment_debug_counter(self, op):
boxes = op.getarglist()
a0, = boxes
base_loc = self.make_sure_var_in_reg(a0, boxes)
value_loc = self.get_scratch_reg(INT, boxes)
self.free_temp_vars()
return [base_loc, value_loc]
def void(self, op):
return []
prepare_op_jit_debug = void
prepare_op_enter_portal_frame = void
prepare_op_leave_portal_frame = void
prepare_op_zero_array = void # dealth with in opassembler.py
prepare_op_keepalive = void
def prepare_int_ri(self, op, res_in_cc):
boxes = op.getarglist()
a0, a1 = boxes
imm_a0 = check_imm_box(a0)
imm_a1 = check_imm_box(a1)
if not imm_a0 and imm_a1:
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.convert_to_imm(a1)
elif imm_a0 and not imm_a1:
l1 = self.convert_to_imm(a0)
l0 = self.make_sure_var_in_reg(a1, boxes)
else:
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.make_sure_var_in_reg(a1, boxes)
self.possibly_free_vars_for_op(op)
res = self.force_allocate_reg(op)
# note that we always allocate res, even if res_in_cc is True,
# that only means overflow is in CC
return [l0, l1, res]
def prepare_op_int_add(self, op):
return self.prepare_int_ri(op, False)
def prepare_op_int_sub(self, op):
boxes = op.getarglist()
a0, a1 = boxes
imm_a1 = check_imm_box(a1)
if imm_a1:
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.convert_to_imm(a1)
else:
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.make_sure_var_in_reg(a1, boxes)
self.possibly_free_vars_for_op(op)
res = self.force_allocate_reg(op)
return [l0, l1, res]
def prepare_comp_op_int_sub_ovf(self, op, res_in_cc):
# ignore res_in_cc
return self.prepare_op_int_sub(op)
def prepare_op_int_mul(self, op):
boxes = op.getarglist()
a0, a1 = boxes
reg1 = self.make_sure_var_in_reg(a0, forbidden_vars=boxes)
reg2 = self.make_sure_var_in_reg(a1, forbidden_vars=boxes)
self.possibly_free_vars(boxes)
self.possibly_free_vars_for_op(op)
res = self.force_allocate_reg(op)
self.possibly_free_var(op)
return [reg1, reg2, res]
def prepare_comp_op_int_mul_ovf(self, op, res_in_cc):
return self.prepare_op_int_mul(op)
def prepare_op_int_force_ge_zero(self, op):
argloc = self.make_sure_var_in_reg(op.getarg(0))
resloc = self.force_allocate_reg(op, [op.getarg(0)])
return [argloc, resloc]
def prepare_op_int_signext(self, op):
argloc = self.make_sure_var_in_reg(op.getarg(0))
numbytes = op.getarg(1).getint()
resloc = self.force_allocate_reg(op)
return [argloc, imm(numbytes), resloc]
# some of those have forms of imm that they accept, but they're rather
# obscure. Can be future optimization
prepare_op_int_and = prepare_op_int_mul
prepare_op_int_or = prepare_op_int_mul
prepare_op_int_xor = prepare_op_int_mul
prepare_op_int_lshift = prepare_op_int_mul
prepare_op_int_rshift = prepare_op_int_mul
prepare_op_uint_rshift = prepare_op_int_mul
prepare_op_uint_mul_high = prepare_op_int_mul
def prepare_int_cmp(self, op, res_in_cc):
boxes = op.getarglist()
arg0, arg1 = boxes
imm_a1 = check_imm_box(arg1)
l0 = self.make_sure_var_in_reg(arg0, forbidden_vars=boxes)
if imm_a1:
l1 = self.convert_to_imm(arg1)
else:
l1 = self.make_sure_var_in_reg(arg1, forbidden_vars=boxes)
self.possibly_free_vars_for_op(op)
self.free_temp_vars()
if not res_in_cc:
res = self.force_allocate_reg(op)
return [l0, l1, res]
return [l0, l1]
prepare_comp_op_int_lt = prepare_int_cmp
prepare_comp_op_int_le = prepare_int_cmp
prepare_comp_op_int_ge = prepare_int_cmp
prepare_comp_op_int_gt = prepare_int_cmp
prepare_comp_op_int_ne = prepare_int_cmp
prepare_comp_op_int_eq = prepare_int_cmp
prepare_comp_op_ptr_eq = prepare_comp_op_instance_ptr_eq = prepare_int_cmp
prepare_comp_op_ptr_ne = prepare_comp_op_instance_ptr_ne = prepare_int_cmp
prepare_comp_op_uint_lt = prepare_int_cmp
prepare_comp_op_uint_le = prepare_int_cmp
prepare_comp_op_uint_ge = prepare_int_cmp
prepare_comp_op_uint_gt = prepare_int_cmp
def prepare_float_op(self, op, res_in_cc):
assert res_in_cc
loc1 = self.make_sure_var_in_reg(op.getarg(0))
loc2 = self.make_sure_var_in_reg(op.getarg(1))
return [loc1, loc2]
prepare_comp_op_float_lt = prepare_float_op
prepare_comp_op_float_le = prepare_float_op
prepare_comp_op_float_gt = prepare_float_op
prepare_comp_op_float_ge = prepare_float_op
prepare_comp_op_float_eq = prepare_float_op
prepare_comp_op_float_ne = prepare_float_op
def prepare_op_int_le(self, op):
return self.prepare_int_cmp(op, False)
prepare_op_int_lt = prepare_op_int_le
prepare_op_int_gt = prepare_op_int_le
prepare_op_int_ge = prepare_op_int_le
prepare_op_int_eq = prepare_op_int_le
prepare_op_int_ne = prepare_op_int_le
prepare_op_uint_lt = prepare_op_int_le
prepare_op_uint_le = prepare_op_int_le
prepare_op_uint_gt = prepare_op_int_le
prepare_op_uint_ge = prepare_op_int_le
def prepare_unary(self, op):
a0 = op.getarg(0)
assert not isinstance(a0, Const)
reg = self.make_sure_var_in_reg(a0)
self.possibly_free_vars_for_op(op)
res = self.force_allocate_reg(op)
return [reg, res]
prepare_op_int_is_true = prepare_unary
prepare_op_int_is_zero = prepare_unary
prepare_op_int_neg = prepare_unary
prepare_op_int_invert = prepare_unary
def prepare_comp_unary(self, op, res_in_cc):
a0 = op.getarg(0)
assert not isinstance(a0, Const)
reg = self.make_sure_var_in_reg(a0)
return [reg]
prepare_comp_op_int_is_true = prepare_comp_unary
prepare_comp_op_int_is_zero = prepare_comp_unary
# --------------------------------- floats --------------------------
| |
pd.concat.
def concat(
objs: List[Union[DataFrame, Series]],
axis: Axis = 0,
join: str = "outer",
ignore_index: bool = False,
sort: bool = False,
) -> Union[Series, DataFrame]:
"""
Concatenate pandas-on-Spark objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
Combine two ``Series``.
>>> s1 = ps.Series(['a', 'b'])
>>> s2 = ps.Series(['c', 'd'])
>>> ps.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> ps.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = ps.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = ps.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> ps.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> ps.concat([df2, s1])
letter number 0
0 c 3.0 None
1 d 4.0 None
0 None NaN a
1 None NaN b
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = ps.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> ps.concat([df1, df3])
letter number animal
0 a 1 None
1 b 2 None
0 c 3 cat
1 d 4 dog
Sort the columns.
>>> ps.concat([df1, df3], sort=True)
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> ps.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = ps.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> ps.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
>>> reset_option("compute.ops_on_diff_frames")
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of pandas-on-Spark "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(cast(Sized, objs)) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only ps.Series "
"and ps.DataFrame are valid".format(name=type(objs).__name__)
)
if join not in ["inner", "outer"]:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
axis = validate_axis(axis)
psdf: DataFrame
if axis == 1:
psdfs: List[DataFrame] = [
obj.to_frame() if isinstance(obj, Series) else obj for obj in objs
]
level: int = min(psdf._internal.column_labels_level for psdf in psdfs)
psdfs = [
DataFrame._index_normalized_frame(level, psdf)
if psdf._internal.column_labels_level > level
else psdf
for psdf in psdfs
]
concat_psdf = psdfs[0]
column_labels: List[Label] = concat_psdf._internal.column_labels.copy()
psdfs_not_same_anchor = []
for psdf in psdfs[1:]:
duplicated = [label for label in psdf._internal.column_labels if label in column_labels]
if len(duplicated) > 0:
pretty_names = [name_like_string(label) for label in duplicated]
raise ValueError(
"Labels have to be unique; however, got duplicated labels %s." % pretty_names
)
column_labels.extend(psdf._internal.column_labels)
if same_anchor(concat_psdf, psdf):
concat_psdf = DataFrame(
concat_psdf._internal.with_new_columns(
[
concat_psdf._psser_for(label)
for label in concat_psdf._internal.column_labels
]
+ [psdf._psser_for(label) for label in psdf._internal.column_labels]
)
)
else:
psdfs_not_same_anchor.append(psdf)
if len(psdfs_not_same_anchor) > 0:
@no_type_check
def resolve_func(psdf, this_column_labels, that_column_labels):
raise AssertionError("This should not happen.")
for psdf in psdfs_not_same_anchor:
if join == "inner":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="inner",
)
elif join == "outer":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="full",
)
concat_psdf = concat_psdf[column_labels]
if ignore_index:
concat_psdf.columns = list(map(str, _range(len(concat_psdf.columns)))) # type: ignore[assignment]
if sort:
concat_psdf = concat_psdf.sort_index()
return concat_psdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs: List[DataFrame] = []
num_series = 0
series_names = set()
for obj in objs:
if isinstance(obj, Series):
num_series += 1
series_names.add(obj.name)
new_objs.append(obj.to_frame(DEFAULT_SERIES_NAME))
else:
assert isinstance(obj, DataFrame)
new_objs.append(obj)
column_labels_levels: Set[int] = set(obj._internal.column_labels_level for obj in new_objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_psdfs = [psdf.index for psdf in new_objs]
index_of_first_psdf = indices_of_psdfs[0]
for index_of_psdf in indices_of_psdfs:
if index_of_first_psdf.names != index_of_psdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_psdf} and {index_of_psdf}".format(
index_of_first_psdf=index_of_first_psdf.names,
index_of_psdf=index_of_psdf.names,
)
)
column_labels_of_psdfs = [psdf._internal.column_labels for psdf in new_objs]
index_names_of_psdfs: List[List[Optional[Label]]]
if ignore_index:
index_names_of_psdfs = [[] for _ in new_objs]
else:
index_names_of_psdfs = [psdf._internal.index_names for psdf in new_objs]
if all(name == index_names_of_psdfs[0] for name in index_names_of_psdfs) and all(
idx == column_labels_of_psdfs[0] for idx in column_labels_of_psdfs
):
# If all columns are in the same order and values, use it.
psdfs = new_objs
else:
if join == "inner":
interested_columns = set.intersection(*map(lambda x: set(x), column_labels_of_psdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = [
label for label in column_labels_of_psdfs[0] if label in interested_columns
]
# When multi-index column, although pandas is flaky if `join="inner" and sort=False`,
# always sort to follow the `join="outer"` case behavior.
if (len(merged_columns) > 0 and len(merged_columns[0]) > 1) or sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = [psdf[merged_columns] for psdf in new_objs]
elif join == "outer":
merged_columns = []
for labels in column_labels_of_psdfs:
merged_columns.extend(label for label in labels if label not in merged_columns)
assert len(merged_columns) > 0
# Always sort when multi-index columns or there are more than two Series,
# and if there is only one Series, never sort.
sort = len(merged_columns[0]) > 1 or num_series > 1 or (num_series != 1 and sort)
if sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = []
for psdf in new_objs:
columns_to_add = list(set(merged_columns) - set(psdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = psdf._internal.resolved_copy.spark_frame
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), SF.lit(None))
data_columns = psdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
psdf = DataFrame(
psdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in psdf._internal.index_spark_column_names
],
column_labels=(psdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for | |
from __future__ import absolute_import
import pathlib
import subprocess
import tempfile
from io import BytesIO
from typing import Generator
from unittest.case import TestCase
from unittest.mock import (
MagicMock,
Mock,
call,
mock_open,
patch,
)
import pytest
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.spark.processing import (
PySparkProcessor,
SparkJarProcessor,
_SparkProcessorBase,
_HistoryServer,
FileType,
)
SPARK_EVENT_LOGS_S3_URI = "s3://bucket/spark-events"
REGION = "us-east-1"
BUCKET_NAME = "bucket"
@pytest.fixture
def processing_output():
return ProcessingOutput(
source="/opt/ml/processing/spark-events/",
destination=SPARK_EVENT_LOGS_S3_URI,
s3_upload_mode="Continuous",
)
@pytest.fixture
def processing_input():
return ProcessingInput(
source="s3_uri",
destination="destination",
)
@pytest.fixture()
def sagemaker_session():
boto_mock = MagicMock(name="boto_session", region_name=REGION)
session_mock = MagicMock(
name="sagemaker_session",
boto_session=boto_mock,
boto_region_name=REGION,
config=None,
local_mode=False,
)
session_mock.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
return session_mock
@pytest.fixture
def spark_processor_base(sagemaker_session) -> _SparkProcessorBase:
spark_processor_base = _SparkProcessorBase(
base_job_name="sm-spark",
role="AmazonSageMaker-ExecutionRole",
framework_version="2.4",
instance_count=1,
instance_type="ml.c5.xlarge",
image_uri="790336243319.dkr.ecr.us-west-2.amazonaws.com/sagemaker-spark:0.1",
sagemaker_session=sagemaker_session,
)
return spark_processor_base
@pytest.fixture
def py_spark_processor(sagemaker_session) -> PySparkProcessor:
spark = PySparkProcessor(
base_job_name="sm-spark",
role="AmazonSageMaker-ExecutionRole",
framework_version="2.4",
instance_count=1,
instance_type="ml.c5.xlarge",
image_uri="790336243319.dkr.ecr.us-west-2.amazonaws.com/sagemaker-spark:0.1",
sagemaker_session=sagemaker_session,
)
return spark
@pytest.fixture
def tempdir() -> Generator[tempfile.TemporaryDirectory, None, None]:
dir = tempfile.TemporaryDirectory()
yield dir
dir.cleanup()
@pytest.fixture
def empty_tempdir_path() -> Generator[str, None, None]:
dir = tempfile.TemporaryDirectory()
yield pathlib.Path(dir.name).resolve()
dir.cleanup()
@pytest.fixture
def jars_dir(tempdir: tempfile.TemporaryDirectory) -> str:
return tempdir.name
@pytest.fixture
def jar_file(jars_dir: str) -> Generator[str, None, None]:
tmp = tempfile.NamedTemporaryFile(dir=jars_dir, prefix="1", suffix=".jar")
yield tmp.name
tmp.close()
def test_pyspark_processor_instantiation(sagemaker_session):
# This just tests that the import is right and that the processor can be instantiated
# Functionality is tested in project root container directory.
PySparkProcessor(
base_job_name="sm-spark",
role="AmazonSageMaker-ExecutionRole",
framework_version="2.4",
instance_count=1,
instance_type="ml.c5.xlarge",
sagemaker_session=sagemaker_session,
)
happy_config_dict = {
"Classification": "core-site",
"Properties": {"hadoop.security.groups.cache.secs": "250"},
}
happy_config_list = [
{"Classification": "core-site", "Properties": {"hadoop.security.groups.cache.secs": "250"}},
{"Classification": "spark-defaults", "Properties": {"spark.driver.memory": "2"}},
]
nested_config = [
{
"Classification": "yarn-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {
"YARN_RESOURCEMANAGER_OPTS": "-Xdebug -Xrunjdwp:transport=dt_socket"
},
"Configurations": [],
}
],
}
]
invalid_classification_dict = {"Classification": "invalid-site", "Properties": {}}
invalid_classification_list = [invalid_classification_dict]
missing_classification_dict = {"Properties": {}}
missing_classification_list = [missing_classification_dict]
missing_properties_dict = {"Classification": "core-site"}
missing_properties_list = [missing_properties_dict]
@pytest.mark.parametrize(
"config,expected",
[
(happy_config_dict, None),
(invalid_classification_dict, ValueError),
(happy_config_list, None),
(invalid_classification_list, ValueError),
(nested_config, None),
(missing_classification_dict, ValueError),
(missing_classification_list, ValueError),
(missing_properties_dict, ValueError),
(missing_properties_list, ValueError),
],
)
def test_configuration_validation(config, expected, sagemaker_session) -> None:
# This just tests that the import is right and that the processor can be instantiated
# Functionality is tested in project root container directory.
spark = PySparkProcessor(
base_job_name="sm-spark",
role="AmazonSageMaker-ExecutionRole",
framework_version="2.4",
instance_count=1,
instance_type="ml.c5.xlarge",
sagemaker_session=sagemaker_session,
)
if expected is None:
spark._validate_configuration(config)
else:
with pytest.raises(expected):
spark._validate_configuration(config)
@patch("sagemaker.processing.ScriptProcessor.run")
def test_spark_processor_base_run(mock_super_run, spark_processor_base):
spark_processor_base.run(submit_app="app")
mock_super_run.assert_called_with("app", None, None, None, True, True, None, None, None)
@pytest.mark.parametrize(
"config, expected",
[
(
{
"spark_event_logs_s3_uri": None,
"configuration": None,
"inputs": None,
"outputs": None,
},
{"inputs": None, "outputs": None},
),
(
{
"spark_event_logs_s3_uri": SPARK_EVENT_LOGS_S3_URI,
"configuration": None,
"inputs": None,
"outputs": None,
},
{"inputs": None, "outputs": [processing_output]},
),
(
{
"spark_event_logs_s3_uri": None,
"configuration": happy_config_dict,
"inputs": None,
"outputs": None,
},
{"inputs": [processing_input], "outputs": None},
),
(
{
"spark_event_logs_s3_uri": None,
"configuration": happy_config_dict,
"inputs": [],
"outputs": None,
},
{"inputs": [processing_input], "outputs": None},
),
],
)
@patch("sagemaker.spark.processing.ProcessingOutput")
@patch("sagemaker.spark.processing._SparkProcessorBase._stage_configuration")
@patch("sagemaker.processing.ScriptProcessor.run")
def test_spark_processor_base_extend_processing_args(
mock_super_run,
mock_stage_configuration,
mock_processing_output,
spark_processor_base,
config,
expected,
sagemaker_session,
):
mock_stage_configuration.return_value = processing_input
mock_processing_output.return_value = processing_output
extended_inputs, extended_outputs = spark_processor_base._extend_processing_args(
inputs=config["inputs"],
outputs=config["outputs"],
configuration=config["configuration"],
spark_event_logs_s3_uri=config["spark_event_logs_s3_uri"],
)
assert extended_inputs == expected["inputs"]
assert extended_outputs == expected["outputs"]
serialized_configuration = BytesIO("test".encode("utf-8"))
@patch("sagemaker.spark.processing.BytesIO")
@patch("sagemaker.spark.processing.S3Uploader.upload_string_as_file_body")
def test_stage_configuration(mock_s3_upload, mock_bytesIO, py_spark_processor, sagemaker_session):
desired_s3_uri = "s3://bucket/None/input/conf/configuration.json"
mock_bytesIO.return_value = serialized_configuration
result = py_spark_processor._stage_configuration({})
mock_s3_upload.assert_called_with(
body=serialized_configuration,
desired_s3_uri=desired_s3_uri,
sagemaker_session=sagemaker_session,
)
assert result.source == desired_s3_uri
@pytest.mark.parametrize(
"config, expected",
[
({"submit_deps": None, "input_channel_name": "channelName"}, ValueError),
({"submit_deps": ["s3"], "input_channel_name": None}, ValueError),
({"submit_deps": ["other"], "input_channel_name": "channelName"}, ValueError),
({"submit_deps": ["file"], "input_channel_name": "channelName"}, ValueError),
({"submit_deps": ["file"], "input_channel_name": "channelName"}, ValueError),
(
{"submit_deps": ["s3", "s3"], "input_channel_name": "channelName"},
(None, "s3://bucket,s3://bucket"),
),
(
{"submit_deps": ["jar"], "input_channel_name": "channelName"},
(processing_input, "s3://bucket"),
),
],
)
@patch("sagemaker.spark.processing.S3Uploader")
def test_stage_submit_deps(mock_s3_uploader, py_spark_processor, jar_file, config, expected):
submit_deps_dict = {
None: None,
"s3": "s3://bucket",
"jar": jar_file,
"file": "file://test",
"other": "test://",
}
submit_deps = None
if config["submit_deps"] is not None:
submit_deps = [submit_deps_dict[submit_dep] for submit_dep in config["submit_deps"]]
if expected is ValueError:
with pytest.raises(expected) as e:
py_spark_processor._stage_submit_deps(submit_deps, config["input_channel_name"])
assert isinstance(e.value, expected)
else:
input_channel, spark_opt = py_spark_processor._stage_submit_deps(
submit_deps, config["input_channel_name"]
)
if expected[0] is None:
assert input_channel is None
assert spark_opt == expected[1]
else:
expected_source = "s3://bucket/None/input/channelName"
assert input_channel.source == expected_source
assert spark_opt == "/opt/ml/processing/input/channelName"
@pytest.mark.parametrize(
"config, expected",
[
(
{
"image_uri": None,
"framework_version": "2.4",
"py_version": "py37",
"container_version": "1",
"instance_type": "ml.c5.xlarge",
},
"153931337802.dkr.ecr.us-west-2.amazonaws.com/sagemaker-spark-processing:2.4-cpu-py37-v1",
),
(
{
"image_uri": None,
"framework_version": "2.4",
"py_version": None,
"container_version": None,
"instance_type": "ml.c5.xlarge",
},
"153931337802.dkr.ecr.us-west-2.amazonaws.com/sagemaker-spark-processing:2.4-cpu",
),
(
{
"image_uri": None,
"framework_version": "3.0",
"py_version": None,
"container_version": None,
"instance_type": "ml.c5.xlarge",
},
"153931337802.dkr.ecr.us-west-2.amazonaws.com/sagemaker-spark-processing:3.0-cpu",
),
(
{
"image_uri": "image_uri",
"framework_version": "2.4",
"py_version": None,
"container_version": None,
"instance_type": "ml.c5.xlarge",
},
"image_uri",
),
(
{
"image_uri": None,
"framework_version": "invalidFrameworkVersion",
"py_version": None,
"container_version": None,
"instance_type": "ml.c5.xlarge",
},
ValueError,
),
(
{
"image_uri": None,
"framework_version": "2.4",
"py_version": None,
"container_version": "1",
"instance_type": "ml.c5.xlarge",
},
ValueError,
),
(
{
"image_uri": None,
"framework_version": "2.4",
"py_version": "py37",
"container_version": None,
"instance_type": "ml.c5.xlarge",
},
ValueError,
),
],
)
def test_retrieve_image_uri(py_spark_processor, config, expected):
if expected is ValueError:
with pytest.raises(expected):
py_spark_processor._retrieve_image_uri(
config["image_uri"],
config["framework_version"],
config["py_version"],
config["container_version"],
"us-west-2",
config["instance_type"],
)
else:
assert expected == py_spark_processor._retrieve_image_uri(
config["image_uri"],
config["framework_version"],
config["py_version"],
config["container_version"],
"us-west-2",
config["instance_type"],
)
@patch("sagemaker.spark.processing._HistoryServer")
def test_terminate_history_server(mock_history_server, py_spark_processor):
py_spark_processor.history_server = mock_history_server
py_spark_processor.terminate_history_server()
mock_history_server.down.assert_called_once()
@patch("sagemaker.spark.processing._ecr_login_if_needed")
@patch("sagemaker.spark.processing._pull_image")
@patch("sagemaker.spark.processing._SparkProcessorBase._prepare_history_server_env_variables")
@patch("sagemaker.spark.processing._HistoryServer.run")
@patch("sagemaker.spark.processing._SparkProcessorBase._check_history_server")
def test_start_history_server(
mock_check_history_server,
mock_history_server_run,
mock_prepare_history_server_env_variables,
mock_pull_image,
mock_ecr_login,
py_spark_processor,
):
mock_ecr_login.return_value = True
py_spark_processor.start_history_server()
mock_pull_image.assert_called_once()
mock_prepare_history_server_env_variables.assert_called_once()
mock_history_server_run.assert_called_once()
mock_check_history_server.assert_called_once()
@patch("sagemaker.spark.processing._SparkProcessorBase._get_notebook_instance_domain")
@patch("sagemaker.spark.processing._SparkProcessorBase._config_aws_credentials")
@patch("sagemaker.spark.processing._SparkProcessorBase._is_notebook_instance")
def test_prepare_history_server_env_variables(
mock_is_notebook_instance,
mock_config_aws_credentials,
mock_get_notebook_instance_domain,
py_spark_processor,
):
mock_is_notebook_instance.return_value = True
mock_get_notebook_instance_domain.return_value = "domain"
result = py_spark_processor._prepare_history_server_env_variables(SPARK_EVENT_LOGS_S3_URI)
assert len(result) == 3
assert result[_HistoryServer.arg_remote_domain_name] == "domain"
assert result[_HistoryServer.arg_event_logs_s3_uri] == SPARK_EVENT_LOGS_S3_URI
assert result["AWS_REGION"] == REGION
mock_is_notebook_instance.return_value = False
mock_get_notebook_instance_domain.return_value = "domain"
mock_config_aws_credentials.return_value = {
"AWS_ACCESS_KEY_ID": "123",
"AWS_SECRET_ACCESS_KEY": "456",
"AWS_SESSION_TOKEN": "789",
}
result = py_spark_processor._prepare_history_server_env_variables(SPARK_EVENT_LOGS_S3_URI)
assert len(result) == 5
assert result[_HistoryServer.arg_event_logs_s3_uri] == SPARK_EVENT_LOGS_S3_URI
assert result["AWS_ACCESS_KEY_ID"] == "123"
assert result["AWS_SECRET_ACCESS_KEY"] == "456"
assert result["AWS_SESSION_TOKEN"] == "789"
assert result["AWS_REGION"] == REGION
py_spark_processor._spark_event_logs_s3_uri = SPARK_EVENT_LOGS_S3_URI
mock_is_notebook_instance.return_value = True
mock_get_notebook_instance_domain.return_value = "domain"
result = py_spark_processor._prepare_history_server_env_variables(None)
assert len(result) == 6
assert result[_HistoryServer.arg_remote_domain_name] == "domain"
assert result[_HistoryServer.arg_event_logs_s3_uri] == SPARK_EVENT_LOGS_S3_URI
assert result["AWS_REGION"] == REGION
@patch("os.path.isfile")
def test_is_notebook_instance(mock_is_file, py_spark_processor):
mock_is_file.return_value = True
assert py_spark_processor._is_notebook_instance()
@pytest.mark.parametrize(
"config, expected",
[(True, "--network host"), (False, "-p 80:80 -p 15050:15050")],
)
@patch("sagemaker.spark.processing._SparkProcessorBase._is_notebook_instance")
def test_get_network_config(mock_is_notebook_instance, py_spark_processor, config, expected):
mock_is_notebook_instance.return_value = config
assert py_spark_processor._get_network_config() == expected
@patch(
"sagemaker.spark.processing.open", new_callable=mock_open, read_data='{"ResourceName":"abc"}'
)
@patch("sagemaker.spark.processing._SparkProcessorBase._is_notebook_instance")
def test_get_notebook_instance_domain(
mock_is_notebook_instance, mock_open_file, py_spark_processor
):
mock_is_notebook_instance.return_value = True
assert (
py_spark_processor._get_notebook_instance_domain()
== "https://abc.notebook.us-east-1.sagemaker.aws"
)
@pytest.mark.parametrize(
"config, expected",
[
(
{
"is_history_server_started": True,
"is_note_book_instance": True,
"instance_domain": "http://test",
},
"History server is up on http://test/proxy/15050",
),
(
{"is_history_server_started": True, "is_note_book_instance": False},
"History server is up on http://0.0.0.0/proxy/15050",
),
(
{"is_history_server_started": False, "is_note_book_instance": False},
"History server failed to start. Please run 'docker logs history_server' to see logs",
),
],
)
@patch("sagemaker.spark.processing._SparkProcessorBase._get_notebook_instance_domain")
@patch("sagemaker.spark.processing._SparkProcessorBase._is_history_server_started")
@patch("sagemaker.spark.processing._SparkProcessorBase._is_notebook_instance")
def test_check_history_server(
mock_is_notebook_instance,
mock_is_history_server_started,
mock_get_notebook_instance_domain,
py_spark_processor,
config,
expected,
):
mock_is_notebook_instance.return_value = config["is_note_book_instance"]
mock_is_history_server_started.return_value = config["is_history_server_started"]
if "instance_domain" in config:
mock_get_notebook_instance_domain.return_value = config["instance_domain"]
with TestCase.assertLogs("sagemaker", level="INFO") as cm:
py_spark_processor._check_history_server(ping_timeout=3)
assert expected in cm.output[0]
@pytest.mark.parametrize(
"config, expected",
[
(
{
"inputs": None,
"submit_files": None,
"files_input": [processing_input],
"files_opt": "opt",
"file_type": FileType.JAR,
},
{"command": ["smspark-submit"], "inputs": None},
),
(
{
"inputs": None,
"submit_files": ["file1"],
"files_input": processing_input,
"files_opt": "opt",
"file_type": FileType.JAR,
},
{"command": ["smspark-submit", "--jars", "opt"], "inputs": [processing_input]},
),
(
{
"inputs": [processing_input],
"submit_files": ["file1"],
"files_input": processing_input,
"files_opt": "opt",
"file_type": FileType.PYTHON,
},
{
"command": ["smspark-submit", "--py-files", "opt"],
"inputs": [processing_input, processing_input],
},
),
(
{
"inputs": [processing_input],
"submit_files": ["file1"],
"files_input": None,
"files_opt": "",
"file_type": FileType.PYTHON,
},
{"command": ["smspark-submit"], "inputs": [processing_input]},
),
],
)
@patch("sagemaker.spark.processing._SparkProcessorBase._stage_submit_deps")
def test_handle_script_dependencies(mock_stage_submit_deps, py_spark_processor, config, expected):
mock_stage_submit_deps.return_value = (config["files_input"], config["files_opt"])
inputs = py_spark_processor._handle_script_dependencies(
config["inputs"], config["submit_files"], config["file_type"]
)
assert py_spark_processor.command == expected["command"]
assert inputs == expected["inputs"]
@pytest.mark.parametrize(
"config, expected",
[
({"response": MagicMock(status=200)}, True),
({"response": MagicMock(status=500)}, False),
({"response": ValueError}, False),
],
)
@patch("urllib.request.urlopen")
def test_is_history_server_started(mock_urlopen, py_spark_processor, config, expected):
mock_urlopen.return_value = config["response"]
assert py_spark_processor._is_history_server_started() == expected
def test_validate_s3_uri(py_spark_processor):
with pytest.raises(ValueError) as e:
py_spark_processor._validate_s3_uri("http")
assert isinstance(e.value, ValueError)
def test_config_aws_credentials(py_spark_processor):
expected_result = {
"AWS_ACCESS_KEY_ID": "123",
"AWS_SECRET_ACCESS_KEY": "456",
"AWS_SESSION_TOKEN": "<PASSWORD>",
}
creds = MagicMock(access_key="123", secret_key="456", token="<PASSWORD>")
py_spark_processor.sagemaker_session.boto_session.get_credentials = MagicMock(
name="get_credentials", return_value=creds
)
assert py_spark_processor._config_aws_credentials() == expected_result
py_spark_processor.sagemaker_session.boto_session.get_credentials = MagicMock(
name="get_credentials", side_effect=ValueError
)
assert not py_spark_processor._config_aws_credentials()
@pytest.mark.parametrize(
"config, expected",
[
({"submit_app": None, "files": ["test"], "inputs": [], "opt": None}, ValueError),
(
{"submit_app": "test.py", "files": None, "inputs": [processing_input], "opt": None},
[processing_input],
),
(
{
"submit_app": "test.py",
"files": ["test"],
"inputs": [processing_input],
"opt": None,
},
[processing_input, processing_input, processing_input, processing_input],
),
(
{"submit_app": "test.py", "files": ["test"], "inputs": None, "opt": None},
[processing_input, processing_input, processing_input],
),
(
{"submit_app": "test.py", "files": ["test"], "inputs": None, "opt": "opt"},
[processing_input, processing_input, processing_input],
),
],
)
@patch("sagemaker.spark.processing._SparkProcessorBase.run")
@patch("sagemaker.spark.processing._SparkProcessorBase._stage_submit_deps")
@patch("sagemaker.spark.processing._SparkProcessorBase._generate_current_job_name")
def test_py_spark_processor_run(
mock_generate_current_job_name,
mock_stage_submit_deps,
mock_super_run,
py_spark_processor,
config,
expected,
):
mock_stage_submit_deps.return_value = (processing_input, "opt")
mock_generate_current_job_name.return_value = "jobName"
if expected is ValueError:
with pytest.raises(expected):
py_spark_processor.run(
submit_app=config["submit_app"],
submit_py_files=config["files"],
submit_jars=config["files"],
submit_files=config["files"],
inputs=config["inputs"],
)
else:
py_spark_processor.run(
submit_app=config["submit_app"],
submit_py_files=config["files"],
submit_jars=config["files"],
submit_files=config["files"],
inputs=config["inputs"],
)
mock_super_run.assert_called_with(
submit_app=config["submit_app"],
inputs=expected,
outputs=None,
arguments=None,
wait=True,
logs=True,
job_name="jobName",
experiment_config=None,
)
@patch("sagemaker.spark.processing._SparkProcessorBase.run")
@patch("sagemaker.spark.processing._SparkProcessorBase._stage_submit_deps")
@patch("sagemaker.spark.processing._SparkProcessorBase._generate_current_job_name")
def test_py_spark_processor_run_twice(
mock_generate_current_job_name, mock_stage_submit_deps, mock_super_run, py_spark_processor
):
mock_stage_submit_deps.return_value = (processing_input, "opt")
mock_generate_current_job_name.return_value = "jobName"
expected_command = ["smspark-submit", "--py-files", "opt", "--jars", "opt", "--files", "opt"]
py_spark_processor.run(
submit_app="submit_app",
submit_py_files="files",
submit_jars="test",
submit_files="test",
inputs=[],
)
py_spark_processor.run(
submit_app="submit_app",
submit_py_files="files",
submit_jars="test",
submit_files="test",
inputs=[],
)
assert | |
singular simple past subjunctive of",
"second-person singular simple present form of",
"simple past and past participle of",
"simple past of",
"simple past plural of",
"simple past singular of",
"simple past tense and past participle of" ,
"simple past tense of",
"singular of",
"spurious plural of",
"superlative degree of",
"superlative form of",
"third-person plural present indicative of",
"third-person plural simple present of",
"third-person simple past of",
"third-person singular of",
"third-person singular present simple form of",
"third-person singular simple past indicative of",
"third-person singular simple present indicative of",
"third-person singular simple present subjunctive of",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
]
spelling_pattern = [
"Alternative spelling of",
"American spelling and Oxford British English standard spelling of",
"American spelling form of",
"American spelling spelling of",
"American spelling standard form of",
"American spelling standard spelling of",
"An English surname, Alternative spelling of",
"An English surname, Alternative spelling of",
"An Irish surname, Alternative spelling of",
"Archaic spelling of",
"Australia, Canada, Ireland, New Zealand, and Britain standard spelling of",
"Britain and Australia spelling of",
"Britain and Canada spelling of",
"Britain and Canada standard spelling of",
"Britain and Ireland standard spelling of",
"Britain spelling of",
"Britain standard spelling of",
"Britain, Australia, New Zealand, and Canada spelling of",
"Britain, Australia, and Canada spelling of",
"Britain, Australia, and New Zealand standard spelling of",
"Britain, Canada, Australia, New Zealand, Ireland, and South Africa spelling of",
"Britain, Canada, Australia, and New Zealand standard spelling of",
"Britain, Canada, Ireland, South Africa, Australia, and New Zealand spelling of",
"Britain, Canada, New Zealand, Australia, and Ireland spelling of",
"Britain, Ireland, Australia, New Zealand, and South Africa spelling of",
"Britain, Ireland, Canada, Australia, New Zealand, and South Africa spelling of",
"British spelling and Canada standard spelling of",
"British spelling and Canadian spelling spelling of",
"British spelling and Canadian spelling standard spelling of",
"British spelling form of",
"British spelling spelling of",
"British spelling standard form of",
"British spelling standard spelling of",
"British spelling, Canadian spelling, Commonwealth of Nations, and Irelandstandard spelling of",
"British, Australian, New Zealand spelling and Canadian spelling standardspelling of",
"Canada spelling of",
"Canada standard spelling of",
"Canada, US standard spelling of",
"Canadian spelling of",
"Commonwealth of Nations spelling of",
"Commonwealth of Nations standard spelling of",
"Dated spelling of",
"Deliberate misspelling of",
"Etymologically incorrect rare spelling of",
"Etymologically incorrect spelling of",
"Euphemistic spelling of",
"Eye dialect spelling of",
"Federal Reserve System. Alternative spelling of",
"Feminist spelling of",
"Former spelling of",
"Latinised spelling of",
"Leet spelling of",
"Misspelling of",
"Most common English spelling of",
"New Zealand spelling of",
"Non-Oxford British English and New Zealand standard spelling of",
"Non-Oxford British English spelling of",
"Non-Oxford British English standard spelling of",
"Nonstandard spelling of",
"North American spelling standard spelling of",
"Obsolete spelling of",
"Oxford British English spelling of",
"Partly Latinised spelling of",
"Phonetic alternative spelling of",
"Pronunciation spelling of",
"Rare spelling of",
"Sixteenth-Century Scottish spelling of",
"Standard spelling of",
"US alternative spelling of",
"US and Oxford British English standard spelling of",
"US spelling of",
"US standard spelling of",
"US, Canada, and Oxford British English standard spelling of",
"Uncommon spelling of",
"alternative spelling of",
"archaic, chiefly Scottish spelling of",
"chiefly US spelling of",
"deliberate misspelling of",
"misspelled form of",
"nonstandard or archaic spelling of",
"rare spelling of",
"",
"",
"",
"",
"",
"",
]
short_pattern = [
"Ellipsis of" ,
"Contraction of",
"Initialism of" ,
"abbreviation of",
"contraction of",
"contracted form of",
"Postal abbreviation of",
"shortened form of",
"Abbreviated form of",
": Initialism of",
": Abbreviation of",
"",
]
dummy = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
]
#parse_csv_2()
def parse_csv_3():
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_emphasis = True
#print(h.handle("<p>Hello, <a href='https://www.google.com/earth/'>world</a>!"))
f_dump = open("d:\\enwiktionary_en_20200106_past_tense.csv", 'w', -1, encoding="utf_8_sig")
f_out = open("d:\\enwiktionary_en_20200106.csv_3", 'r', -1, encoding="utf_8_sig")
for line in f_out:
line = line.strip()
# 空行
if len(line)<1: continue
try:
m= re.search("^(?P<word>.+)[|]{3}(?P<lang>.+)[|]{3}(?P<pos>.+)[|]{3}(?P<definition>.*)$", line)
# 分析词的变形
word = m.group("word")
word = word.replace("_", " ")
lang = m.group("lang")
pos = m.group("pos")
html = m.group("definition")
text = h.handle(html).replace("\n", "")+"\n"
pattern1 = "^simple past tense and past participle of (?P<form>.+)$"
pattern2 = "^simple past tense and past participle of (?P<form>[a-zA-Z0-9_' -]+)[.;]?\s*$"
m = re.search(pattern1, text)
if m:
n = re.search(pattern2, text)
if n:
#f_dump.write("*,"+word+","+n.group('form')+"\n")
pass
else:
#f_dump.write(":,"+word+","+m.group('form')+"\n")
pass
continue
pattern1 = "^plural of (?P<form>.+)$"
pattern2 = "^plural of (?P<form>[a-zA-Z0-9_' -]+)[.;]?\s*$"
pattern3 = "^plural of (?P<form>[a-zA-Z0-9_' -]+)[.;]?\s*(?P<other>.+?)$"
m = re.search(pattern1, text)
if m:
n = re.search(pattern2, text)
if n:
# 如果有多个单词组成, 可能是不正确的
form = n.group('form')
real_word_list = form.split(" ")
if len(real_word_list) != len(word.split(" ")): # 与单词中词的个数必须相等
f_dump.write("*2,"+word+","+form+"\n")
else:
f_dump.write("*1,"+word+","+form+"\n")
pass
else:
k = re.search(pattern3, text)
if k:
if len(k.group('form').split(" ")) != len(word.split(" ")):
f_dump.write("*3,"+word+","+k.group('form')+","+k.group('other')+"\n")
else:
f_dump.write("*4,"+word+","+k.group('form')+","+k.group('other')+"\n")
else:
f_dump.write("*5,"+word+","+m.group('form')+"\n")
pass
continue
# 缩写单词不处理
# Abbreviation of (fire) extinguisher.
# Abbreviated form of deceased.
# Acronym of (an animal of) any solid colour other than black.
#print(word)
except Exception as e:
print(line+str(e))
return
f_out.close()
f_dump.close()
def parse_csv_4():
#print(h.handle("<p>Hello, <a href='https://www.google.com/earth/'>world</a>!"))
f_dump = open("d:\\enwiktionary_en_20200106_past_tense.csv", 'w', -1, encoding="utf_8_sig")
f_out = open("d:\\enwiktionary_en_20200106.csv_3", 'r', -1, encoding="utf_8_sig")
full_pattern = "==="
p = simple_form_pattern + important_pattern + spelling_pattern + short_pattern
for item in p:
if len(item.strip()) < 1: continue
item = item.replace("-", "[-]")
full_pattern += "|" + item
full_pattern = "("+full_pattern.replace("===|", "")+")"
#print(full_pattern)
for line in f_out:
line = line.strip()
# 空行
if len(line)<1: continue
try:
m= re.search("^(?P<word>.+)[|]{3}(?P<lang>.+)[|]{3}(?P<pos>.+)[|]{3}(?P<definition>.*)$", line)
# 分析词的变形
word = m.group("word")
word = word.replace("_", " ")
lang = m.group("lang")
pos = m.group("pos")
html = m.group("definition")
subword_count = len(word.split(" ")) # 当前复合单词由几个单词组成
text = h.handle(html).replace("\n", "")
pattern1 = "^"+full_pattern+"(?P<form>.+)$"
pattern2 = "^"+full_pattern+"(?P<form>[a-zA-Z0-9_' -]+)\s*[.;]?(?P<more>\s*: (most|most)\s*[a-zA-Z0-9_' -]+)?\s*$"
pattern3 = "^"+full_pattern+"(?P<form>[a-zA-Z0-9_' -]+)\s*[.;]?\s*(?P<other>.+?)$"
m = re.search(pattern1, text, re.I)
if m:
n = re.search(pattern2, text, re.I)
if n:
# 如果有多个单词组成, 可能是不正确的
form = n.group('form').strip()
if len(form.split(" ")) == subword_count: # 与单词中词的个数必须相等
f_dump.write("*1,"+word+","+form+"\n")
else:
f_dump.write("*2,"+word+","+form+"\n")
pass
else:
k = re.search(pattern3, text, re.I)
if k:
form = k.group('form').strip()
if len(form.split(" ")) != subword_count:
f_dump.write("*3,"+word+","+form+","+k.group('other')+"\n")
else:
f_dump.write("*4,"+word+","+form+","+k.group('other')+"==="+text+"\n")
else:
f_dump.write("*5,"+word+","+m.group('form')+"\n")
pass
continue
else:
f_dump.write("*6,"+text+"\n")
except Exception as e:
print(line+str(e))
return
f_out.close()
f_dump.close()
#parse_csv_4()
def parse_csv_5():
f_dump = open("d:\\enwiktionary_en_20200106.csv_dump_split", 'w', -1, encoding="utf_8_sig")
f_input = open("d:\\enwiktionary_en_20200106.csv", 'r', -1, encoding="utf_8_sig")
for line in f_input:
line = line.strip()
# 空行
if len(line)<1: continue
try:
m= re.search("^(?P<word>.+)[|]{3}(?P<lang>.+)[|]{3}(?P<pos>.+)[|]{3}(?P<definition>.*)$", line)
# 分析词的变形
word = m.group("word")
word = word.replace("_", " ")
lang = m.group("lang")
pos = m.group("pos")
html = None
html = m.group("definition")
subword_count = len(word.split(" ")) # 当前复合单词由几个单词组成
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_emphasis = True
text = h.handle(html).replace("\n", " ")
#if re.search("participle|past|present|singular|plural|possessive|tense|comparative|superlative", text):
# f_dump.write(text+"\n")
# continue
#else:
# continue
text_segs = re.split("[;.:,()]", text)
word_pattern = "[a-zA-Z0-9_'&-]+"
word2_pattern = "^(?P<word2>"+word_pattern+"\s+"+word_pattern+"\s+"+word_pattern+")"
for seg in text_segs:
seg = seg.strip().replace("\r\n", " ").replace("\n", " ")
# 取前两个单词
m = re.search(word2_pattern, seg)
if m:
f_dump.write("** "+m.group("word2")+"\n")
else:
f_dump.write(seg+"\n")
#print(word)
except Exception as e:
print(line+str(e))
return
f_input.close()
f_dump.close()
pattern = [
"third-person singular simple present subjunctive of",
"third-person singular simple present indicative of",
"third-person singular simple present indicative form of",
"third-person singular simple past indicative of",
"third-person singular present simple form of",
"third-person singular of",
"third-person simple past of",
"third-person plural simple present of",
"third-person plural present indicative of",
"simple past tense of",
"simple past tense and past participle of",
"simple past singular of",
"simple past plural of",
"simple past and past participle of",
"second-person singular simple present of",
"second-person singular simple present form of",
"second-person singular simple past subjunctive of",
"second-person singular simple past indicative of",
"second-person singular simple past form of",
"second-person singular present subjunctive of",
"second-person singular of",
"second-person plural simple present of",
"second-person plural",
"present participle of",
"plural simple past of",
"first-person singular present indicative of",
"first-person simple past of",
"first-person plural simple present of",
"first-person plural simple past indicative of",
"alternative third-person singular past of",
"alternative simple past of",
"alternative plural of",
"alternative past participle of",
"alternative past of",
"Third-person singular simple present indicative form of",
"Present participle and gerund of",
"singular of",
"simple past of",
"plural of",
"plural simple present of",
"past participle of",
"past tense of",
]
whole_pattern = "|".join(p.strip().replace("-", "[-]") for p in pattern)
#print(whole_pattern)
#parse_csv_5()
def parse_csv_6():
f_dump = open("d:\\enwiktionary_en_20200106_without_form.csv", 'w', -1, encoding="utf_8_sig")
f_input = open("d:\\enwiktionary_en_20200106.csv", 'r', -1, encoding="utf_8_sig")
for line in f_input:
line = line.strip()
# 空行
if len(line)<1: continue
try:
m= re.search("^(?P<word>.+)[|]{3}(?P<lang>.+)[|]{3}(?P<pos>.+)[|]{3}(?P<definition>.*)$", line)
# 分析词的变形
word = | |
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # 2019 MolSSI Summer School QM project: semiempirical model of Argon
#
# ## 1. Introduction
#
# In this project, we will simulate a cluster of Argon atoms using quantum mechanics (QM). First-principles (a.k.a. ab initio) QM simulations are complicated and expensive, and a quick implementation would rely on a substantial amount of pre-existing software infrastructure (e.g. PySCF or Psi4). Instead, we will implement a much simpler semiempirical QM simulation that has been designed and parameterized to reproduce first-principles QM data using a minimal model. We can then limit our external dependencies to the standard numerical functionality of Python:
# In[1]:
import numpy as np
# As is typically the case in quantum chemistry, we will input a set of atomic coordinates $\vec{r}_i$ and compute the ground state energy of Argon atoms with those coordinates. All physical quantities in this project will be in Hartree atomic units, where the bohr is the unit of length and the hartree is the unit of energy.
# In[2]:
atomic_coordinates = np.array([ [0.0,0.0,0.0], [3.0,4.0,5.0] ])
number_of_atoms = len(atomic_coordinates)
print('coordinates =\n',atomic_coordinates)
print('# of atoms =',number_of_atoms)
# More complicated and featureful software would be able to compute other properties besides the ground state energy like atomic forces. It would also have more convenient options for specifying inputs and returning outputs.
#
# Note that this QM project is primarily meant as a programming exercise. It contains a theoretical specification of a model, and we will implement the components of the model in software as they are specified. If you have a strong background in quantum chemistry, then you should find the theory reasonably familiar. However, it is only really necessary for you to understand each mathematical expression one-at-a-time to effectively implement this software.
#
# ## 2. Model Hamiltonian
#
# As is standard in quantum chemistry, we will assume that the total energy of our system is defined to be the ground state energy of a quantum many-body Hamiltonian $\hat{H}$. In second quantization notation, we can write it as
#
# $$ \hat{H} = E_{\mathrm{ion}} + \sum_{p,q} \sum_{\sigma \in \{ \uparrow , \downarrow \} } h_{p,q} \hat{a}_{p,\sigma}^{\dagger} \hat{a}_{q,\sigma} + \tfrac{1}{2}\sum_{p,q,r,s} \sum_{\sigma,\sigma' \in \{ \uparrow , \downarrow \} } V_{p,q,r,s} \hat{a}_{p,\sigma}^{\dagger} \hat{a}_{r,\sigma'}^{\dagger} \hat{a}_{s,\sigma'} \hat{a}_{q,\sigma} , $$
#
# where $\hat{a}_{p,\sigma}^{\dagger}$ and $\hat{a}_{p,\sigma}$ are the electron raising and lowering operators for an atomic orbital index $p$ and spin $\sigma$. We will not be using $\hat{H}$ itself in our calculations, but we will make use of the coefficient tensors $h_{p,q}$ and $V_{p,q,r,s}$. In first-principles calculations, each element of $h_{p,q}$ and $V_{p,q,r,s}$ would require the evaluation of a complicated integral. In our semiempirical model, we will set most of them to zero and assign a simple analytical form to the rest of them. The notation being used here is mostly consistent with modern quantum chemistry notation, but some objects, particularly $V_{p,q,r,s}$, have multiple conventions in practice.
#
# ### A. Model design & parameters
#
# This semiempirical model combines some standard concepts and methods used in physics and chemistry. First, it will use a minimal number of electronic degrees of freedom. Because Argon is a noble gas, it interacts primarily through London dispersion forces that are mediated by quantum dipole fluctuations. The lowest energy dipole transition is from the occupied $3p$ states to the unoccupied $4s$ state, and we will include these 4 atomic orbitals per atom. Similarly, we will use a multipole expansion to simplify electronic excitations and retain only the monopole and dipole terms, which also restricts electronic polarization to 4 degrees of freedom per atom. We will use $\{s, p_x, p_y, p_z\}$ to label both atomic orbitals and multipole moments on each atom. The nuclear charge of Argon is 18, but our model combines the nucleus and the 12 neglected electrons ($1s^2$, $2s^2$, $2p^6$, and $3s^2$) into an ionic point charge with $Z = 6$.
# In[3]:
ionic_charge = 6
orbital_types = ['s', 'px' ,'py', 'pz']
orbitals_per_atom = len(orbital_types)
p_orbitals = orbital_types[1:]
print('all orbitals =', orbital_types)
print('p orbitals =', p_orbitals)
# The index of an atomic orbital specifies which atom it is located on and what type it is. We will often extract these individual pieces of information using $\mathrm{atom}(p)$ to denote the atom's index and $\mathrm{orb}(p)$ to denote the orbital type. This is the first of many instances in this project where we could either represent something as a pre-tabulate list or a function. We will always make the simpler choice, in this case functions:
# In[52]:
def atom(ao_index):
'''Returns the atom index part of an atomic orbital index.'''
return ao_index // orbitals_per_atom
def orb(ao_index):
'''Returns the orbital type of an atomic orbital index.'''
orb_index = ao_index % orbitals_per_atom
return orbital_types[orb_index]
def ao_index(atom_p,orb_p):
'''Returns the atomic orbital index for a given atom index and orbital type.'''
p = atom_p*orbitals_per_atom
p += orbital_types.index(orb_p)
return p
for index in range(number_of_atoms*orbitals_per_atom):
print('index',index,'atom',atom(index),'orbital',orb(index))
print('index test:')
for index in range(number_of_atoms*orbitals_per_atom):
print(index, ao_index(atom(index),orb(index)))
# We will discuss the model parameters in more detail as they are used, but it is a good idea to first collect them all in a common data structure, a Python dictionary, for convenient access throughout the notebook:
# In[5]:
# REMINDER: atomic units w/ energies in hartree, distances in bohr
model_parameters = { 'r_hop' : 3.1810226927827516, # hopping length scale
't_ss' : 0.03365982238611262, # s-s hopping energy scale
't_sp' : -0.029154833035109226, # s-p hopping energy scale
't_pp1' : -0.0804163845390335, # 1st p-p hopping energy scale
't_pp2' : -0.01393611496959445, # 2nd p-p hopping energy scale
'r_pseudo' : 2.60342991362958, # pseudopotential length scale
'v_pseudo' : 0.022972992186364977, # pseudopotential energy scale
'dipole' : 2.781629275106456, # dipole strength of s-p transition
'energy_s' : 3.1659446174413004, # onsite energy of s orbital
'energy_p' : -2.3926873325346554, # onsite energy of p orbital
'coulomb_s' : 0.3603533286088998, # Coulomb self-energy of monopole
'coulomb_p' : -0.003267991835806299 } # Coulomb self-energy of dipole
# There are no parameters related to orbital overlap because all atomic orbitals are assumed to be orthogonal. The parameter values have been pre-optimized for this project, but the fitting process and reference data are both listed at the end of the project if you'd like to learn more about them.
#
# ### B. Slater-Koster tight-binding model
#
# We will describe the kinetic energy of electrons using a simplified [Slater-Koster tight-binding method](https://en.wikipedia.org/wiki/Tight_binding). Because of the symmetry of atomic orbitals and the translational invariance of the kinetic energy operator, there are 4 distinct, distance-dependent "hopping" energies that characterize the interatomic kinetic energy between s and p orbitals:
#
# 
#
# All other atomic orientations can be related to these cases by a change of coordinates. While it is compatible with very general functional forms, we will use a Gaussian form to simplify the model and its implementation. The distance-dependence of this simple version is controlled by a single hopping length scale $r_{\mathrm{hop}}$ and the strength of each type of hopping energy,
#
# $$ t_{o,o'}(\vec{r}) = \exp(1-r^2/r_{\mathrm{hop}}^2) \times \begin{cases}
# t_{ss} , & o = o' = s \\
# [\vec{o}' \cdot (\vec{r}/r_{\mathrm{hop}})] t_{sp}, & o = s \ \& \ o' \in \{p_x, p_y, p_z\} \\
# -[\vec{o} \cdot (\vec{r}/r_{\mathrm{hop}})] t_{sp} , & o' = s \ \& \ o \in \{p_x, p_y, p_z\} \\
# (r^2/r_{\mathrm{SK}}^2)\,(\vec{o} \cdot \vec{o}') t_{pp2} - [\vec{o} \cdot (\vec{r}/r_{\mathrm{SK}})] [\vec{o}' \cdot (\vec{r}/r_{\mathrm{SK}})] (t_{pp1} + t_{pp2}), & o,o' \in \{p_x, p_y, p_z\}
# \end{cases} $$
#
# where $o$ and $o'$ are the orbital types of the 1st and 2nd atoms and $\vec{r}$ is a vector pointing from the 2nd atom to the 1st atom. We are assigning direction vectors to the p orbitals, $\vec{p}_x \equiv (1,0,0)$, $\vec{p}_y \equiv (0,1,0)$, and $\vec{p}_z \equiv (0,0,1)$, to simplify the notation. This project has multiple case-based formulas, and we will implement them using a code structure similar to each formula:
# In[33]:
vec = { 'px':[1,0,0], 'py':[0,1,0], 'pz':[0,0,1] }
def hopping_energy(o1, o2, r12, model_parameters):
'''Returns the hopping matrix element for a pair of orbitals of type o1 & o2 separated by a vector | |
def getFactorizedArea(self, factorize, invertband = True):
factorizedArea = []
nonfactorizedArea = []
self.soundArea = []
if(factorize == True):
factor = (REF307200/(self.resolution[0]*self.resolution[1]))
else:
factor = 1
for contour in self.contours:
area = cv2.contourArea(contour)
if 'defDebug' in globals():
nonfactorizedArea.append(int(area))
if (invertband):
area = math.fabs(area-(MAX88116/factor)-(MIN103/factor))
factorizedArea.append(int(area * factor))
printDebug (("Factor by", factor))
printDebug (("AREA before factor: ",nonfactorizedArea))
printDebug (("AREA after factor: ",factorizedArea))
self.soundArea = factorizedArea
return factorizedArea
## return a log scaling area tab, default log basis is 10 (for use with -b)
def getLogArea(self, scaletolog = True, logbasis = 10):
# ~ TODO:should be autoselected with logscale and -b option
logArea = []
nonlogArea = []
logMax = log(MAX88116/MIN103, logbasis)
if(scaletolog == True):
for area in self.soundArea:
if 'defDebug' in globals():
nonlogArea.append(area)
newarea = (MAX88116 * log(area / MIN103, logbasis)) / logMax
logArea.append(int(newarea))
printDebug (("AREA before log: ",nonlogArea))
printDebug (("AREA after log: ",logArea))
self.soundArea = logArea
return logArea
# ~ Hi, I have a program which reads a file containing integers in [0,10].
# ~ The program reads the value of a variable every 2 seconds, then maps it
# ~ to another interval, say [20,22000],
# ---------------------------
# ~ You are looking for a function that maps a linear variable x onto an
# ~ exponential variable y.
# ~ y = A*exp(bx)
# ~ using a for log(A), this is the same as
# ~ log(y) = a + bx
# ~ We have a form with two unknowns and we have two data points:
# ~ log(20) = a + 0*b
# ~ log(22000) = a + 10b
# ~ so we know immediately that
# ~ a = log(20)
# ~ or A = 20
# ~ and that
# ~ b = (log(22000) - log(20)) / 10
# ~ or
# ~ b = log(1100) / 10
# ~ so the mapping is
# ~ log(y) = log(20) + x * log(1100)/10
# ~ y = 20 * pow(1100, x/10);
# maps a linear variable x onto an exponential variable y.
def getLogAreaMA(self, scaletolog = True, logbasis = 10):
logArea = []
nonlogArea = []
# ~ logMax = (log(MAX88116, logbasis) - log(MIN103, logbasis)) / MAX88116
if(scaletolog == True):
for area in self.soundArea:
if 'defDebug' in globals():
nonlogArea.append(area)
newarea = MIN103 * pow(MAX88116/MIN103, area/MAX88116)
logArea.append(int(newarea))
printDebug (("AREA before log: ",nonlogArea))
printDebug (("AREA after log: ",logArea))
self.soundArea = logArea
return logArea
# ~ Instead of the function log(x), rather you have
# ~ to use the following one: log(x - 1), for x >= 0.
# ~ Then the interpolation formula for x in [x_1,x_2]
# ~ with ratio f = a/(a+b), looks as follows:
# ~ (log(x_2 -1) - log(x - 1)) / (log(x - 1) - log(x_1 - 1)) = (1/f) - 1
# ~ After a simple calculation one can get
# ~ x = (x_1 - 1)^{f - 1} * (x_2 - 1)^{f} + 1 ,
## TESTUING should return a inverse? of log scaling area tab, default log basis is 10
def getLogAreaN(self, scaletolog = True, logbasis = 10):
logArea = []
nonlogArea = []
# ~ logMax = log(MAX88116/MIN103, logbasis)
if(scaletolog == True):
for area in self.soundArea:
if 'defDebug' in globals():
nonlogArea.append(area)
newarea = MIN103 + (((MAX88116-MIN103) * log(area)) / sqrt(MAX88116))
logArea.append(int(newarea))
printDebug (("AREA before log: ",nonlogArea))
printDebug (("AREA after log: ",logArea))
self.soundArea = logArea
return logArea
## TESTUING should return a inverse? of log scaling area tab, default log basis is 10
def getLogAreaY(self, scaletolog = True, logbasis = 10):
logArea = []
nonlogArea = []
logMax = log(MAX88116/MIN103, logbasis)
if(scaletolog == True):
for area in self.soundArea:
if 'defDebug' in globals():
nonlogArea.append(area)
newarea = (MIN103 * logbasis ** ((logMax * area) / MAX88116))
logArea.append(int(newarea))
printDebug (("AREA before log: ",nonlogArea))
printDebug (("AREA after log: ",logArea))
self.soundArea = logArea
return logArea
def getSimpleBound(self, cnt):
top = tuple(cnt[cnt[:,:,1].argmin()][0])
bottom = tuple(cnt[cnt[:,:,1].argmax()][0])
right = tuple(cnt[cnt[:,:,0].argmax()][0])
left = tuple(cnt[cnt[:,:,0].argmin()][0])
return (top, bottom, right, left)
def getSimpleBounds(self):
simpleBounds = []
for cnt in self.contours:
simpleBounds.append(self.getSimpleBound(cnt))
return simpleBounds
class ArchiMusik():
"""Explicit lyrics"""
def __init__(self, mode, direction, matchshape, thershold, normalize, factorize, invertband, scaletolog):
self.mode = mode
self.direction = direction
self.matchshape = matchshape
self.thershold = thershold
self.normalize = normalize
self.factorize = factorize
self.invertband = invertband
self.scaletolog = scaletolog
self.logbasis = 10 #FIXME has arg
def play(self, typeaudio):
self.output = typeaudio
if self.mode == MODE_HEAD:
self.LoopReadHead()
elif self.mode == MODE_SEQUENCE:
self.LoopSequence()
def loadImage(self, img):
# load the image, convert it to grayscale, blur it slightly,
# and threshold it by given value
self.image = cv2.imread(img)
if (self.image is None):
raise ValueError('A very bad thing happened : can\'t load file : ' + img)
else :
self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
self.blurred = cv2.GaussianBlur(self.gray, (5, 5), 0)
self.thresh = cv2.threshold(self.blurred, self.thershold, 255, cv2.THRESH_BINARY)[1]
self.resolution = (int(self.image.shape[1]), int(self.image.shape[0]))
def showImage(self, img, delay=0):
cv2.imshow("ARchiMusik", img)
cv2.waitKey(delay) # Refresh the opencv window, needed from 3.4
def setSoundServer(self, sndSrv):
if (sndSrv is None):
raise ValueError('A very bad thing happened : sound server is not valid ')
self.soundServer = sndSrv
def prepareGUI(self):
# ~ cv2.startWindowThread()
cv2.namedWindow("ARchiMusik", cv2.WND_PROP_FULLSCREEN | cv2.WINDOW_GUI_NORMAL)
cv2.setWindowTitle("ARchiMusik", "Listen to the facade")
cv2.setWindowProperty("ARchiMusik",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
def findMonitorRes(self):
"""return a lenght and height monitor resolution"""
imgRect = cv2.getWindowImageRect("ARchiMusik")
self.monitorX = (imgRect[0]*2+imgRect[2])
self.monitorY = (imgRect[1]*2+imgRect[3])
printDebug(("monitor image rect",imgRect, "X:Y",self.monitorX,self.monitorY))
def findContours(self, normalize=None):
self.contoursHelper = ContoursHelper(self.thresh, self.resolution)
if (normalize != None):
self.normalize = normalize
if (self.normalize):
self.contoursHelper.normalizedContours() #FIXME less contours loop
#drawing data
self.simpleBounds = self.contoursHelper.getSimpleBounds()
self.approxContours = self.contoursHelper.approxContours()
#sound data
self.soundArea = self.contoursHelper.getFactorizedArea (self.factorize, self.invertband)
self.soundArea = self.contoursHelper.getLogAreaMA(self.scaletolog, self.logbasis)
def LoopReadHead (self):
rows,cols = self.thresh.shape[:2]
readSpeed = .05 #FIXME hardcoded
if (False): #TEST single point TEST
cnt = self.contours[1]
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
# ~ cv2.circle(thresh, topmost, 5, (255,255,0))
printDebug ("mon topmost",topmost)
# ~ simpleBounds = getSimpleBounds(cnts)
# ~ printDebug (len(simpleBounds))
self.soundServer.start()
tpFactory = ThreadPlayFactory()
tpType = ''
if (self.output == AUDIOCONFIG):
tpType = 'ThreadPlaySineLoop'
elif (self.output == MIDICONFIG):
tpType = 'ThreadPlayMidiNote'
dh = DirectionHelper(self.direction, rows, cols)
midiChannel = 0
for readhead_position in range(dh.index):
readheadImg = readHeadDraw((dh.x0,dh.y0), (dh.x1,dh.y1), self)
i = 0
# ~ printDebug (("x0y0 x1y1", (dh.x0,dh.y0), (dh.x1,dh.y1)))
for sb in self.simpleBounds:
# ~ isCollision((x0, y0, x1-x0,1),())
if (sb[dh.shapeENTRY][dh.Axe] == dh.readHead):
# ~ Yes!!!! Let's do something with that now!
#self.drawsomething(i)
cv2.drawContours(self.thresh, [self.approxContours[i]], 0, (80,80,80), 5)
dh.getTextCoord(sb)
self.contoursHelper.drawName(self.approxContours[i], self.thresh, dh.textX, dh.textY)
self.contoursHelper.drawFourPoints(sb, self.thresh)
# ~ Test code for MIDI channels / WTF Pyo? channel is zero based, check TPMidiNote channel+=1
# ~ print (midiChannel)
midiChannel = not midiChannel
length = sb[dh.shapeMAX][dh.Axe] - sb[dh.shapeMIN][dh.Axe]
tpFactory.create_tp(tpType, self.soundArea[i], length*readSpeed, self.soundServer, midiChannel).start()
i+=1
self.showImage(cv2.add (readheadImg, self.thresh),1)
dh.next(readhead_position)
time.sleep(readSpeed)
self.soundServer.stop()
def LoopSequence(self):
self.showImage(self.thresh)
# ~ imgRect = cv2.getWindowImageRect("ARchiMusik")
# ~ print(imgRect)
# FIXME
# ~ tpFactory = ThreadPlayFactory()
# ~ tpType = ''
# ~ if (self.output == AUDIOCONFIG):
# ~ tpType = 'ThreadPlaySine'
# ~ elif (self.output == MIDICONFIG):
# ~ tpType = 'ThreadPlayMidiNote'
i = 0
for approx in self.approxContours:
# ~ approx = approxContour(cnt)
cv2.drawContours(self.thresh, [approx], 0, (0), 5)
self.contoursHelper.drawName(approx, self.thresh, approx.ravel()[0], approx.ravel()[1])
simpleBound = self.simpleBounds[i]
self.contoursHelper.drawFourPoints(self.simpleBounds[i], self.thresh)
self.showImage(self.thresh)
# FIXME !
# ~ tpFactory.create_tp(tpType, self.soundArea[i], length*readSpeed, self.soundServer).start()
playSine(approx)
i = i + 1
cv2.destroyAllWindows()
def exitme(code=0):
sys.exit(code)
def printDebug (data): #FIXME defDebug = True (comment for no debug)
if 'defDebug' in globals():
print (data)
def printError (data):
print ("***Fatal ERROR detected***\n------------------------------")
print (data)
print ("----------------------------------------\nProgram stop now")
def playSine (contour):
freq = cv2.contourArea(contour)
# ~ a = Sine(mul=0.01).out()
# add a bit of dissonance to left channel TODO rnd +/- ?
bit_of_disso = 100
a = Sine(freq=[freq, freq+bit_of_disso], mul=0.3).out()#FIXME area aka freq
soundServer.start()
time.sleep(.3)#FIXME smrater sleep
soundServer.stop()
time.sleep(.1)
def readHeadDraw (startPos, endPos, archiMusik):
if (False):#TODO full img white and ghost readHead
readhead = np.full((archiMusik.image.shape[0],archiMusik.image.shape[1]), 255, np.uint8) #FIXME clean the image in place of create a new one
cv2.line(readhead, startPos, endPos, (0,255,255), 2)
if (True): # img thre + readhead white
readhead = np.full((archiMusik.image.shape[0],archiMusik.image.shape[1]), 0, np.uint8) #FIXME clean the image in place of create a new one
cv2.line(readhead, startPos, endPos, (255,255,255), 2)
return readhead
# ~ def isCollision (a, b): # (x,y,width,height)
# ~ return ((abs(a[0] - b[0]) * 2 < (a[2] + b[2])) & (abs(a[1] - b[1]) | |
# -*- coding: utf8 -*-
import pytest
from unittest.mock import Mock
from pandas import DataFrame
import pandas as pd
from scipy import sparse
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import (
StandardScaler, OneHotEncoder, LabelBinarizer)
from sklearn.impute import SimpleImputer as Imputer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.base import BaseEstimator, TransformerMixin
import sklearn.decomposition
import numpy as np
from numpy.testing import assert_array_equal
import pickle
from sklearn.compose import make_column_selector
from sklearn_pandas import DataFrameMapper
from sklearn_pandas.dataframe_mapper import _handle_feature, _build_transformer
from sklearn_pandas.pipeline import TransformerPipeline
class MockXTransformer(object):
"""
Mock transformer that accepts no y argument.
"""
def fit(self, X):
return self
def transform(self, X):
return X
class MockTClassifier(object):
"""
Mock transformer/classifier.
"""
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def predict(self, X):
return True
class DateEncoder():
def fit(self, X, y=None):
return self
def transform(self, X):
dt = X.dt
return pd.concat([dt.year, dt.month, dt.day], axis=1)
class ToSparseTransformer(BaseEstimator, TransformerMixin):
"""
Transforms numpy matrix to sparse format.
"""
def fit(self, X):
return self
def transform(self, X):
return sparse.csr_matrix(X)
class CustomTransformer(BaseEstimator, TransformerMixin):
"""
Example of transformer in which the number of classes
is not equals to the number of output columns.
"""
def fit(self, X, y=None):
self.min = X.min()
self.classes_ = np.unique(X)
return self
def transform(self, X):
classes = np.unique(X)
if len(np.setdiff1d(classes, self.classes_)) > 0:
raise ValueError('Unknown values found.')
return X - self.min
@pytest.fixture
def simple_dataframe():
return pd.DataFrame({'a': [1, 2, 3]})
@pytest.fixture
def complex_dataframe():
return pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'c', 'c'],
'feat1': [1, 2, 3, 4, 5, 6],
'feat2': [1, 2, 3, 2, 3, 4]})
@pytest.fixture
def multiindex_dataframe():
"""Example MultiIndex DataFrame, taken from pandas documentation
"""
iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
index = pd.MultiIndex.from_product(iterables, names=['first', 'second'])
df = pd.DataFrame(np.random.randn(10, 8), columns=index)
return df
@pytest.fixture
def multiindex_dataframe_incomplete(multiindex_dataframe):
"""Example MultiIndex DataFrame with missing entries
"""
df = multiindex_dataframe
mask_array = np.zeros(df.size)
mask_array[:20] = 1
np.random.shuffle(mask_array)
mask = mask_array.reshape(df.shape).astype(bool)
df.mask(mask, inplace=True)
return df
def test_transformed_names_simple(simple_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for simple transformation
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None)])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['a']
def test_transformed_names_binarizer(complex_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for a transformation that multiplies the number of columns
"""
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['target_a', 'target_b', 'target_c']
def test_logging(caplog, complex_dataframe):
"""
Get transformed names of features in `transformed_names` attribute
for a transformation that multiplies the number of columns
"""
import logging
logger = logging.getLogger('sklearn_pandas')
logger.setLevel(logging.INFO)
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
assert '[FIT_TRANSFORM] target:' in caplog.text
def test_transformed_names_binarizer_unicode():
df = pd.DataFrame({'target': [u'ñ', u'á', u'é']})
mapper = DataFrameMapper([('target', LabelBinarizer())])
mapper.fit_transform(df)
expected_names = {u'target_ñ', u'target_á', u'target_é'}
assert set(mapper.transformed_names_) == expected_names
def test_transformed_names_transformers_list(complex_dataframe):
"""
When using a list of transformers, use them in inverse order to get the
transformed names
"""
df = complex_dataframe
mapper = DataFrameMapper([
('target', [LabelBinarizer(), MockXTransformer()])
])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['target_a', 'target_b', 'target_c']
def test_transformed_names_simple_alias(simple_dataframe):
"""
If we specify an alias for a single output column, it is used for the
output
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None, {'alias': 'new_name'})])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['new_name']
def test_transformed_names_complex_alias(complex_dataframe):
"""
If we specify an alias for a multiple output column, it is used for the
output
"""
df = complex_dataframe
mapper = DataFrameMapper([('target', LabelBinarizer(), {'alias': 'new'})])
mapper.fit_transform(df)
assert mapper.transformed_names_ == ['new_a', 'new_b', 'new_c']
def test_exception_column_context_transform(simple_dataframe):
"""
If an exception is raised when transforming a column,
the exception includes the name of the column being transformed
"""
class FailingTransformer(object):
def fit(self, X):
pass
def transform(self, X):
raise Exception('Some exception')
df = simple_dataframe
mapper = DataFrameMapper([('a', FailingTransformer())])
mapper.fit(df)
with pytest.raises(Exception, match='a: Some exception'):
mapper.transform(df)
def test_exception_column_context_fit(simple_dataframe):
"""
If an exception is raised when fit a column,
the exception includes the name of the column being fitted
"""
class FailingFitter(object):
def fit(self, X):
raise Exception('Some exception')
df = simple_dataframe
mapper = DataFrameMapper([('a', FailingFitter())])
with pytest.raises(Exception, match='a: Some exception'):
mapper.fit(df)
def test_simple_df(simple_dataframe):
"""
Get a dataframe from a simple mapped dataframe
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', None)], df_out=True)
transformed = mapper.fit_transform(df)
assert type(transformed) == pd.DataFrame
assert len(transformed["a"]) == len(simple_dataframe["a"])
def test_complex_df(complex_dataframe):
"""
Get a dataframe from a complex mapped dataframe
"""
df = complex_dataframe
mapper = DataFrameMapper(
[('target', None), ('feat1', None), ('feat2', None)],
df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(complex_dataframe)
for c in df.columns:
assert len(transformed[c]) == len(df[c])
def test_numeric_column_names(complex_dataframe):
"""
Get a dataframe from a complex mapped dataframe with numeric column names
"""
df = complex_dataframe
df.columns = [0, 1, 2]
mapper = DataFrameMapper(
[(0, None), (1, None), (2, None)], df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(complex_dataframe)
for c in df.columns:
assert len(transformed[c]) == len(df[c])
def test_multiindex_df(multiindex_dataframe_incomplete):
"""
Get a dataframe from a multiindex dataframe with missing data
"""
df = multiindex_dataframe_incomplete
mapper = DataFrameMapper([([c], Imputer()) for c in df.columns],
df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(multiindex_dataframe_incomplete)
for c in df.columns:
assert len(transformed[str(c)]) == len(df[c])
def test_binarizer_df():
"""
Check level names from LabelBinarizer
"""
df = pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'c', 'a']})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 3
assert cols[0] == 'target_a'
assert cols[1] == 'target_b'
assert cols[2] == 'target_c'
def test_binarizer_int_df():
"""
Check level names from LabelBinarizer for a numeric array.
"""
df = pd.DataFrame({'target': [5, 5, 6, 6, 7, 5]})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 3
assert cols[0] == 'target_5'
assert cols[1] == 'target_6'
assert cols[2] == 'target_7'
def test_binarizer2_df():
"""
Check level names from LabelBinarizer with just one output column
"""
df = pd.DataFrame({'target': ['a', 'a', 'b', 'b', 'a']})
mapper = DataFrameMapper([('target', LabelBinarizer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 1
assert cols[0] == 'target'
def test_onehot_df():
"""
Check level ids from one-hot
"""
df = pd.DataFrame({'target': [0, 0, 1, 1, 2, 3, 0]})
mapper = DataFrameMapper([(['target'], OneHotEncoder())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 4
assert cols[0] == 'target_x0_0'
assert cols[3] == 'target_x0_3'
def test_customtransform_df():
"""
Check level ids from a transformer in which
the number of classes is not equals to the number of output columns.
"""
df = pd.DataFrame({'target': [6, 5, 7, 5, 4, 8, 8]})
mapper = DataFrameMapper([(['target'], CustomTransformer())], df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(mapper.features[0][1].classes_) == 5
assert len(cols) == 1
assert cols[0] == 'target'
def test_preserve_df_index():
"""
The index is preserved when df_out=True
"""
df = pd.DataFrame({'target': [1, 2, 3]},
index=['a', 'b', 'c'])
mapper = DataFrameMapper([('target', None)],
df_out=True)
transformed = mapper.fit_transform(df)
assert_array_equal(transformed.index, df.index)
def test_preserve_df_index_rows_dropped():
"""
If df_out=True but the original df index length doesn't
match the number of final rows, use a numeric index
"""
class DropLastRowTransformer(object):
def fit(self, X):
return self
def transform(self, X):
return X[:-1]
df = pd.DataFrame({'target': [1, 2, 3]},
index=['a', 'b', 'c'])
mapper = DataFrameMapper([('target', DropLastRowTransformer())],
df_out=True)
transformed = mapper.fit_transform(df)
assert_array_equal(transformed.index, np.array([0, 1]))
def test_pca(complex_dataframe):
"""
Check multi in and out with PCA
"""
df = complex_dataframe
mapper = DataFrameMapper(
[(['feat1', 'feat2'], sklearn.decomposition.PCA(2))],
df_out=True)
transformed = mapper.fit_transform(df)
cols = transformed.columns
assert len(cols) == 2
assert cols[0] == 'feat1_feat2_0'
assert cols[1] == 'feat1_feat2_1'
def test_fit_transform(simple_dataframe):
"""
Check that custom fit_transform methods of the transformers are invoked.
"""
df = simple_dataframe
mock_transformer = Mock()
# return something of measurable length but does nothing
mock_transformer.fit_transform.return_value = np.array([1, 2, 3])
mapper = DataFrameMapper([("a", mock_transformer)])
mapper.fit_transform(df)
assert mock_transformer.fit_transform.called
def test_fit_transform_equiv_mock(simple_dataframe):
"""
Check for equivalent results for code paths fit_transform
versus fit and transform in DataFrameMapper using the mock
transformer which does not implement a custom fit_transform.
"""
df = simple_dataframe
mapper = DataFrameMapper([('a', MockXTransformer())])
transformed_combined = mapper.fit_transform(df)
transformed_separate = mapper.fit(df).transform(df)
assert np.all(transformed_combined == transformed_separate)
def test_fit_transform_equiv_pca(complex_dataframe):
"""
Check for equivalent results for code paths fit_transform
versus fit and transform in DataFrameMapper and transformer
using PCA which implements a custom fit_transform. The
equivalence of both paths in the transformer only can be
asserted since this is tested in the sklearn tests
scikit-learn/sklearn/decomposition/tests/test_pca.py
"""
df = complex_dataframe
mapper = DataFrameMapper(
[(['feat1', 'feat2'], sklearn.decomposition.PCA(2))],
df_out=True)
transformed_combined = mapper.fit_transform(df)
transformed_separate = mapper.fit(df).transform(df)
assert np.allclose(transformed_combined, transformed_separate)
def test_input_df_true_first_transformer(simple_dataframe, monkeypatch):
"""
If input_df is True, the first transformer is passed
a pd.Series instead | |
= bottom + (top - bottom) / 2.0
verts = [
(left, top),
(right, top),
(right, bottom),
(left, bottom),
(right, middle),
(very_right, middle)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.MOVETO,
Path.LINETO
]
path = Path(verts, codes)
patch = patches.PathPatch(path, lw=1, facecolor="none")
patch.set_clip_on(False)
self.fig.axes[0].add_patch(patch)
factor = self._normalization_factor
# Manually determine the number of digits after decimal
if factor >= 1000:
fmt_string = "%.0f %s"
elif factor >= 100:
fmt_string = "%.1f %s"
else:
fmt_string = "%.2f %s"
self.fig.axes[0].text(
very_right + 3, middle,
fmt_string % (self._normalization_factor, unit), ha="left",
va="center", fontsize="small")
def __plotStraight(self, trace, ax, *args, **kwargs): # @UnusedVariable
"""
Just plots the data samples in the self.stream. Useful for smaller
datasets up to around 1000000 samples (depending on the machine on
which it's being run).
Slow and high memory consumption for large datasets.
"""
# trace argument seems to actually be a list of traces..
st = Stream(traces=trace)
self._draw_overlap_axvspans(st, ax)
for trace in st:
# Check if it is a preview file and adjust accordingly.
# XXX: Will look weird if the preview file is too small.
if trace.stats.get('preview'):
# Mask the gaps.
trace.data = np.ma.masked_array(trace.data)
trace.data[trace.data == -1] = np.ma.masked
# Recreate the min_max scene.
dtype = trace.data.dtype
old_time_range = trace.stats.endtime - trace.stats.starttime
data = np.empty(2 * trace.stats.npts, dtype=dtype)
data[0::2] = trace.data / 2.0
data[1::2] = -trace.data / 2.0
trace.data = data
# The times are not supposed to change.
trace.stats.delta = (
old_time_range / float(trace.stats.npts - 1))
trace.data = np.require(trace.data, np.float64) * trace.stats.calib
if self.type == 'relative':
# use seconds of relative sample times and shift by trace's
# start time, which was set relative to `reftime`.
x_values = (
trace.times() + (trace.stats.starttime - self.reftime))
else:
# convert seconds of relative sample times to days and add
# start time of trace.
x_values = ((trace.times() / SECONDS_PER_DAY) +
date2num(trace.stats.starttime.datetime))
ax.plot(x_values, trace.data, color=self.color,
linewidth=self.linewidth, linestyle=self.linestyle)
# Write to self.ids
trace = st[0]
if trace.stats.get('preview'):
tr_id = trace.id + ' [preview]'
elif hasattr(trace, 'label'):
tr_id = trace.label
else:
tr_id = trace.id
self.ids.append(tr_id)
def __plotMinMax(self, trace, ax, *args, **kwargs): # @UnusedVariable
"""
Plots the data using a min/max approach that calculated the minimum and
maximum values of each "pixel" and then plots only these values. Works
much faster with large data sets.
"""
self._draw_overlap_axvspans(Stream(traces=trace), ax)
# Some variables to help calculate the values.
starttime = self._time_to_xvalue(self.starttime)
endtime = self._time_to_xvalue(self.endtime)
# The same trace will always have the same sampling_rate.
sampling_rate = trace[0].stats.sampling_rate
# width of x axis in seconds
x_width = endtime - starttime
# normal plots have x-axis in days, so convert x_width to seconds
if self.type != "relative":
x_width = x_width * SECONDS_PER_DAY
# number of samples that get represented by one min-max pair
pixel_length = int(
np.ceil((x_width * sampling_rate + 1) / self.width))
# Loop over all the traces. Do not merge them as there are many samples
# and therefore merging would be slow.
for _i, tr in enumerate(trace):
trace_length = len(tr.data)
pixel_count = int(trace_length // pixel_length)
remaining_samples = int(trace_length % pixel_length)
remaining_seconds = remaining_samples / sampling_rate
if self.type != "relative":
remaining_seconds /= SECONDS_PER_DAY
# Reference to new data array which does not copy data but can be
# reshaped.
if remaining_samples:
data = tr.data[:-remaining_samples]
else:
data = tr.data
data = data.reshape(pixel_count, pixel_length)
min_ = data.min(axis=1) * tr.stats.calib
max_ = data.max(axis=1) * tr.stats.calib
# Calculate extreme_values and put them into new array.
if remaining_samples:
extreme_values = np.empty((pixel_count + 1, 2), dtype=np.float)
extreme_values[:-1, 0] = min_
extreme_values[:-1, 1] = max_
extreme_values[-1, 0] = \
tr.data[-remaining_samples:].min() * tr.stats.calib
extreme_values[-1, 1] = \
tr.data[-remaining_samples:].max() * tr.stats.calib
else:
extreme_values = np.empty((pixel_count, 2), dtype=np.float)
extreme_values[:, 0] = min_
extreme_values[:, 1] = max_
# Finally plot the data.
start = self._time_to_xvalue(tr.stats.starttime)
end = self._time_to_xvalue(tr.stats.endtime)
if remaining_samples:
# the last minmax pair is inconsistent regarding x-spacing
x_values = np.linspace(start, end - remaining_seconds,
num=extreme_values.shape[0] - 1)
x_values = np.concatenate([x_values, [end]])
else:
x_values = np.linspace(start, end, num=extreme_values.shape[0])
x_values = np.repeat(x_values, 2)
y_values = extreme_values.flatten()
ax.plot(x_values, y_values, color=self.color)
# remember xlim state and add callback to warn when zooming in
self._initial_xrange = (self._time_to_xvalue(self.endtime) -
self._time_to_xvalue(self.starttime))
self._minmax_plot_xrange_dangerous = False
ax.callbacks.connect("xlim_changed", self._warn_on_xaxis_zoom)
# set label, write to self.ids
if hasattr(trace[0], 'label'):
tr_id = trace[0].label
else:
tr_id = trace[0].id
self.ids.append(tr_id)
def __plotSetXTicks(self, *args, **kwargs): # @UnusedVariable
"""
Goes through all axes in pyplot and sets time ticks on the x axis.
"""
self.fig.subplots_adjust(hspace=0)
# Loop over all but last axes.
for ax in self.axis[:-1]:
plt.setp(ax.get_xticklabels(), visible=False)
# set bottom most axes:
ax = self.axis[-1]
if self.type == "relative":
locator = MaxNLocator(5)
else:
ax.xaxis_date()
# if getMatplotlibVersion() < [1, 0, 0]:
locator = AutoDateLocator()
# else:
# locator = AutoDateLocator(minticks=3, maxticks=6)
# locator.intervald[MINUTELY] = [1, 2, 5, 10, 15, 30]
# locator.intervald[SECONDLY] = [1, 2, 5, 10, 15, 30]
ax.xaxis.set_major_formatter(ObsPyAutoDateFormatter(locator))
ax.xaxis.set_major_locator(locator)
plt.setp(ax.get_xticklabels(), fontsize='small',
rotation=self.tick_rotation)
def __plotSetYTicks(self, *args, **kwargs): # @UnusedVariable
"""
"""
if self.equal_scale:
ylims = np.vstack([ax.get_ylim() for ax in self.axis])
yranges = np.diff(ylims).flatten()
yrange_max = yranges.max()
yrange_paddings = -yranges + yrange_max
ylims[:, 0] -= yrange_paddings[:] / 2
ylims[:, 1] += yrange_paddings[:] / 2
for ax, ylims_ in zip(self.axis, ylims):
ax.set_ylim(*ylims_)
for _i, ax in enumerate(self.axis):
# Set the title of each plot.
ax.text(0.02, 0.95, self.ids[_i], transform=ax.transAxes,
fontdict=dict(fontsize="small", ha='left', va='top'),
bbox=dict(boxstyle="round", fc="w", alpha=0.8))
plt.setp(ax.get_yticklabels(), fontsize='small')
ax.yaxis.set_major_locator(MaxNLocator(7, prune="both"))
ax.yaxis.set_major_formatter(ScalarFormatter())
def __dayplotGetMinMaxValues(self, *args, **kwargs): # @UnusedVariable
"""
Takes a Stream object and calculates the min and max values for each
pixel in the dayplot.
Writes a three dimensional array. The first axis is the step, i.e
number of trace, the second is the pixel in that step and the third
contains the minimum and maximum value of the pixel.
"""
# Helper variables for easier access.
trace = self.stream[0]
trace_length = len(trace.data)
# Samples per interval.
spi = int(self.interval * trace.stats.sampling_rate)
# Check the approximate number of samples per pixel and raise
# error as fit.
spp = float(spi) / self.width
if spp < 1.0:
msg = """
Too few samples to use dayplot with the given arguments.
Adjust your arguments or use a different plotting method.
"""
msg = " ".join(msg.strip().split())
raise ValueError(msg)
# Number of intervals plotted.
noi = float(trace_length) / spi
inoi = int(round(noi))
# Plot an extra interval if at least 2 percent of the last interval
# will actually contain data. Do it this way to lessen floating point
# inaccuracies.
if abs(noi - inoi) > 2E-2:
noi = inoi + 1
else:
noi = inoi
# Adjust data. Fill with masked values in case it is necessary.
number_of_samples = noi * spi
delta = number_of_samples - trace_length
if delta < 0:
trace.data = trace.data[:number_of_samples]
elif delta > 0:
trace.data = np.ma.concatenate(
[trace.data, createEmptyDataChunk(delta, trace.data.dtype)])
# Create array for min/max values. Use masked arrays to handle gaps.
extreme_values = np.ma.empty((noi, self.width, 2))
trace.data.shape = (noi, spi)
ispp = int(spp)
fspp = spp % 1.0
if fspp == 0.0:
delta = None
else:
delta = spi - ispp * self.width
# Loop over each interval to avoid larger errors towards the end.
for _i in range(noi):
if delta:
cur_interval = trace.data[_i][:-delta]
rest = trace.data[_i][-delta:]
else:
cur_interval = trace.data[_i]
cur_interval.shape = (self.width, ispp)
extreme_values[_i, :, 0] = cur_interval.min(axis=1)
extreme_values[_i, :, 1] = cur_interval.max(axis=1)
# Add the rest.
if delta:
extreme_values[_i, -1, 0] = min(extreme_values[_i, -1, 0],
rest.min())
extreme_values[_i, -1, 1] = max(extreme_values[_i, -1, 0],
rest.max())
# Set class variable.
self.extreme_values = extreme_values
def __dayplotNormalizeValues(self, *args, **kwargs): # @UnusedVariable
"""
Normalizes all values in the 3 dimensional array, so that the minimum
value will be 0 and the maximum value will be 1.
It will also convert all values to floats.
"""
# Convert to native floats.
self.extreme_values = self.extreme_values.astype(np.float) * \
self.stream[0].stats.calib
# Make sure that the mean value is at 0
self.extreme_values -= self.extreme_values.mean()
# Scale so that 99.5 % of the data will fit the given range.
if self.vertical_scaling_range | |
(6 тонн)':1,
'|---Производство 6-тонных автокранов (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (бурильная машина)'] = {
# http://saper.isnet.ru/texnica/bgm.html
'Платформа военного грузовика':1,
'Оборудование бурильной машины':1,
'|---Производство бурильных машин (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (пожарная машина)'] = {
'Платформа военного грузовика':1,
'Оборудование пожарной машины':1,
'|---Производство пожарных машин (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (авторазливочная станция)'] = {
# Одна заправка -- обработка/дезактивация 0.5 км участка дороги.
'Платформа военного грузовика':1,
'Оборудование авторазливочной станции':1,
'|---Производство авторазливочных станций (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (тепловая машина специальной обработки)'] = {
# http://www.russianarms.ru/forum/index.php?topic=5491.0
'Платформа военного грузовика':1,
'Оборудование тепловой машины специальной обработки':1,
'|---Производство тепловых машин специальной обработки (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (автомобильная фильтровальная станция)'] = {
# http://saper.isnet.ru/texnica-2/MAFS.html
# http://www.arms-expo.ru/armament/samples/811/76755/
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование автомобильной фильтровальной станции':1,
'|---Производство автомобильных фильтровальных станций (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (минный заградитель)'] = {
# http://saper.isnet.ru/texnica/umz.html
'Платформа военного грузовика':1,
'Оборудование универсального минного заградителя':1,
'|---Производство минных заградителей (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
#----
# Грузовики (разведка и связь):
metadict_detail['Военный грузовик (радиопеленгационный метеорологический комплекс)'] = {
# РПМК-1 «Улыбка» (1Б44) - радиопеленгационный метеорологический комплекс
# http://www.russianarms.ru/forum/index.php?topic=4193.0
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование радиопеленгационного метеорологического комплекса':1,
'|---Производство радиопеленг. метео. комплексов (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (станция РТР)'] = {
# 1Л222 Автобаза
# http://militaryrussia.ru/blog/topic-598.html
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование станции дальней радиотехнической разведки':1,
'|---Производство станций дальней радиотехнической разведки (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (станция помех наведению)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование станции помех наведению':1,
'|---Производство станций помех наведению (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (КВ-радиостанция)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование КВ-радиосвязи':1,
'|---Производство КВ-радиостанций (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (КШМ)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование командно-штабной машины':1,
'|---Производство командно-штабных машин (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (сервер АСУ)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование сервера АСУ':1,
'|---Производство серверов АСУ (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (спутниковая связь)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование спутниковой связи':1,
'|---Производство станций спутниковой связи (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
#----
# Грузовики (обеспечение):
metadict_detail['Военный грузовик (КУНГ)'] = {
# Исправить
# Кузов из армированного пенопласта К500
# Но пони предпочитают пегасо-переносимые контейнеры
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'|---Производство унифицированных герметичных кузовов (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (дезинфекционно-душевой комплекс)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование дезинфекционно-душевого комплекса':1,
'|---Производство дезинфекционно-душевых комплексов (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (полевая баня)'] = {
# БПО-32
# http://www.russianarms.ru/forum/index.php?topic=5320.0
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование полевой бани':1,
'|---Производство полевых бань (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (полевая кухня)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование автомобильной полевой кухни':1,
'|---Производство полевых кухонь (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (мастерская)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование мастерской':1,
'|---Производство мастерских (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (автомастерская)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование автомастерской':1,
'|---Производство автомастерских (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (мастерская бронетехники)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование мастерской бронетехники':1,
'|---Производство мастерских бронетехники (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
metadict_detail['Военный грузовик (слесарная мастерская)'] = {
'Платформа военного грузовика':1,
'Унифицированный герметичный кузов':1,
'Оборудование слесарной мастерской':1,
'|---Производство слесарных мастерских (на базе военного грузовика)':1,
'-Длина колонны обеспечения (метров)':50,
}
#----
# Грузовики (снабжение):
metadict_detail['Военный грузовик (буксировщик техники)'] = {
'Платформа военного грузовика':1,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (перевозчик зенитных ракет большой дальности)'] = {
'Платформа военного грузовика':1,
'Ложемент зенитной ракеты большой дальности':1,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (перевозчик авиации)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (перевозчик пусковой установки ЗРК)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (перевозчик артиллерии)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (самосвал)'] = {
'Платформа военного грузовика':1,
'Кузов самосвала (8 кубометров)':1,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство самосвалов (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (перевозчик артиллерии и боеприпасов)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под боеприпасы (тонн)':3,
'-Грузооборот (тонно-километров/час)':3 * 30,
'|---Производство буксировщиков (на базе военного грузовика)':1,
'-Длина колонны буксировщиков (метров)':50,
}
metadict_detail['Военный грузовик (водная цистерна)'] = {
'Платформа военного грузовика':1,
'Автомобильная водная цистерна':1,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство автоцистерн (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (топливная цистерна)'] = {
'Платформа военного грузовика':1,
'Автомобильная топливная цистерна':1,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство автоцистерн (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (боеприпасы)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под боеприпасы (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (зенитные ракеты)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под зенитные ракеты (тонн)':6,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (маховики)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под маховики (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (взрывчатка и мины)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под взрывчатку и мины (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (продовольствие)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под продовольствие (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (снаряжение)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под снаряжение (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (строительные материалы)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под стройматериалы (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
metadict_detail['Военный грузовик (твёрдое топливо)'] = {
'Платформа военного грузовика':1,
'Тентованный кузов':1,
'-Место под уголь (тонн)':5,
'-Грузооборот (тонно-километров/час)':5 * 30,
'|---Производство грузовых машин (на базе военного грузовика)':1,
'-Длина колонны снабжения (метров)':50,
}
#----
# Прицепы:
metadict_detail['Прицеп (передвижная хлебопекарная печь)'] = {
'Автомобильный прицеп':1,
'Оборудование передвижной хлебопекарной печи':1,
'|---Производство передвижных хлебопекарных печей (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (передвижной тестоприготовительный агрегат)'] = {
'Автомобильный прицеп':1,
'Оборудование передвижного тестоприготовительного агрегата':1,
'|---Производство тестоприготовительных агрегатов (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (углевыжигательная печь)'] = {
'Автомобильный прицеп':1,
'Оборудование углевыжигательной печи':1,
'|---Производство углевыжигательных печей (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (60-квт газогенератор)'] = {
'Автомобильный прицеп':1,
'Оборудование 60-квт газогенератора':1,
'|---Производство 60-квт газогенераторов (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (200-квт газогенератор)'] = {
'Автомобильный прицеп':1,
'Оборудование 200-квт газогенератора':1,
'|---Производство 200-квт газогенераторов (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (60-квт электрогенератор)'] = {
'Автомобильный прицеп':1,
'Оборудование 60-квт электрогенератора':1,
'|---Производство 60-квт электрогенераторов (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (200-квт дизель-электрическая станция)'] = {
# Исправить
# Обычного прицепа явно недостаточно.
# ЭДС-200, масса -- 12 тонн, КПД -- 0.65:
# http://saper.isnet.ru/texnica-2/esd-20.html
'Автомобильный прицеп':1,
'Оборудование 200-квт дизель-электрической станции':1,
'|---Производство 200-квт дизель-электрических станций (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (центральный распределительный пост)'] = {
# Мобильной электростанции:
'Автомобильный прицеп':1,
'Унифицированный герметичный кузов':1,
'Оборудование центрального распределительного поста':1,
'|---Производство центральных распределительных постов (на базе автомобильного прицепа)':1,
}
metadict_detail['Прицеп (радиолокационный высотомер)'] = {
# Есть мнение, что высотомеры не нужны, у понек полноценные трёхкоординатные РЛС.
# https://ru.wikipedia.org/wiki/ПРВ-13
# https://ru.wikipedia.org/wiki/ПРВ-16
'Автомобильный прицеп':1,
'Унифицированный герметичный кузов':1,
'Оборудование радиолокационного высотомера':1,
'|---Производство радиолокационных высотомеров (на базе автомобильного | |
<reponame>bbbboom/nonebot-plugin
# -*- coding: utf-8 -*-
import os
import random
import aiofiles
import ujson
from . import Utils2plus
from .customException import GrailExcept
# Non fatal error type
error = 'error'
ok = 'ok'
insufficient = 'insufficient'
async def takeTheNameOfThePerson(bot, userQQ, userGroup):
nickname = '您'
try:
info = await bot.get_group_member_info(user_id = int(userQQ), group_id = int(userGroup),
no_cache = True)
nickname = info['card']
if nickname == '':
nickname = info['nickname']
if nickname == '':
nickname = ' '
except:
pass
return nickname
async def getUserNickname(bot, userQQ, userGroup):
nickname = '您'
try:
info = await bot.get_group_member_info(user_id = int(userQQ), group_id = int(userGroup),
no_cache = True)
nickname = info['nickname']
if nickname == '':
nickname = ' '
except:
pass
return nickname
async def atQQ(userQQ):
return '[CQ:at,qq=' + str(userQQ) + ']\n'
async def readFileToJSON(p):
if not os.path.exists(p):
return error
async with aiofiles.open(p, 'r', encoding='utf-8') as f:
content = await f.read()
content = ujson.loads(content)
return content
async def writeTo(p, info):
async with aiofiles.open(p, 'w', encoding='utf-8') as f:
await f.write(ujson.dumps(info))
return ok
async def getItemInformation(id):
p = './HolyGrailWar/Config/Goods/Goods.json'
content = await readFileToJSON(p)
if content == error:
raise GrailExcept
if 'goodslist' not in content:
raise GrailExcept
goodsList = content['goodslist']
for g in goodsList:
if int(id) == g['id']:
return g
return error
async def userInformationQuery(userQQ):
p = './HolyGrailWar/User/Data/' + str(userQQ) + '.json'
if not os.path.exists(p):
return error
async with aiofiles.open(p, 'r', encoding='utf-8') as f:
content = await f.read()
content = ujson.loads(content)
return content
async def getUserRemainingResources(userQQ):
user = await userInformationQuery(int(userQQ))
if user == error:
raise GrailExcept
resources = user['resources']
msg = '剩余粮食:' + str( resources['foodstuff'] ) + ',金币:' + str( resources['gold'] )
return msg
async def writeUserInformation(userQQ, info):
p = './HolyGrailWar/User/Data/' + str(userQQ) + '.json'
if not os.path.exists(p):
return error
async with aiofiles.open(p, 'w', encoding='utf-8') as f:
info = ujson.dumps(info)
await f.write(info)
return ok
async def generalUserInformationOperation(userQQ, classification, field, afterOperation):
user = await userInformationQuery(userQQ)
if user == error:
raise GrailExcept
# Native
if classification == 'native':
user[field] = afterOperation
else:
if classification not in user:
raise GrailExcept
if field not in user[classification]:
raise GrailExcept
user[classification][field] = afterOperation
# Write information
status = await writeUserInformation(userQQ, user)
if status == ok:
return ok
return error
async def additionAndSubtractionOfUserInformationData(userQQ, classification, field, operationValue):
user = await userInformationQuery(userQQ)
if user == error:
raise GrailExcept
# Native
if classification == 'native':
surplus = user[field] + operationValue
if surplus >= 0:
user[field] += operationValue
else:
return insufficient + ' ' + str(user[field])
else:
if classification not in user:
raise GrailExcept
if field not in user[classification]:
raise GrailExcept
surplus = user[classification][field] + operationValue
if surplus >= 0:
user[classification][field] += operationValue
else:
return insufficient + ' ' + str(user[classification][field])
# Write information
status = await writeUserInformation(userQQ, user)
if status == error:
return error
return surplus
async def rigidGoldCoinAdditionAndSubtraction(userQQ, operationValue):
user = await userInformationQuery(userQQ)
if user == error:
raise GrailExcept
gold = user['resources']['gold']
user['resources']['gold'] += operationValue
surplus = user['resources']['gold']
# Write information
status = await writeUserInformation(userQQ, user)
if status == error:
return error
return surplus
async def addItems(userQQ, itemNumber, number):
user = await userInformationQuery(userQQ)
if user == error:
raise GrailExcept
knapsack = user['knapsack']
existence = 0
for k in knapsack:
if k['id'] == itemNumber:
k['number'] += number
existence = 1
break
if existence == 0:
goods = {
"id": int(itemNumber),
"number": int(number)
}
knapsack.append(goods)
# Write information
user['knapsack'] = knapsack
status = await writeUserInformation(userQQ, user)
if status == error:
return error
return ok
async def singleAdventureExtract(bot, userQQ, userGroup):
p = './HolyGrailWar/Config/Explore/SingleAdventure.json'
content = await readFileToJSON(p)
if content == error:
raise GrailExcept
replyMessage = ''
replyMessage += await atQQ(userQQ)
# Grain consumption
status = await additionAndSubtractionOfUserInformationData(userQQ, 'resources', 'foodstuff', -100)
if status == error:
return error
if str(status).find(insufficient) != -1:
return replyMessage + '探险一次至少需要100粮食,你还剩下' + status.split(' ')[1] + '粮食,无法继续探险。'
# Take the maximum number of events
if 'eventlist' not in content:
raise GrailExcept
idList = []
eventList = content['eventlist']
for e in eventList:
idList.append(e['id'])
# Take random ID
selectedID = int( random.choice(idList) )
event = {}
for e in eventList:
if int( e['id'] ) == selectedID:
event = e
if event == {}:
return error
# event processing
replyMessage += '【探险】' + await takeTheNameOfThePerson(bot, userQQ, userGroup)
settlementInformation = []
# Settlement of gold coins
selectGold = 0
if 'gold' in event:
selectGold = event['gold']
event['event'] = event['event'].replace(r'{gold}',str( event['gold'] ))
settlementInformation.append('获得' + str(selectGold) + '金币')
else:
if ('goldmax' in event) and ('goldmin' in event):
selectGold = random.randint(event['goldmin'], event['goldmax'])
event['event'] = event['event'].replace(r'{gold}',str( selectGold ))
if 'type' in event:
if event['type'] == 'loss':
settlementInformation.append('失去' + str(selectGold) + '金币')
selectGold *= -1
else:
settlementInformation.append('获得' + str(selectGold) + '金币')
# Operating user gold
if selectGold != 0:
status = await rigidGoldCoinAdditionAndSubtraction(userQQ, selectGold)
if status == error:
return error
# Settlement items
if 'goodsid' in event:
goods = await getItemInformation(int( event['goodsid'] ))
if goods == error:
raise GrailExcept
event['event'] = event['event'].replace(r'{goodsid}',('[' + str( goods['name'] ) + 'x' +
str( event['amount'] ) + ']'))
settlementInformation.append('获得' + ('[' + str( goods['name'] ) + 'x' +
str( event['amount'] ) + ']'))
# Operate user items
status = await addItems(userQQ, goods['id'], event['amount'])
if status == error:
return error
replyMessage += event['event'] + '\n\n' + '探险结算:\n' + str(','.join(settlementInformation)) + '\n\n'
replyMessage += await getUserRemainingResources(userQQ)
return replyMessage
async def getMultipleExplorationEventTable(number):
p = './HolyGrailWar/Config/Explore/MultipleExploration.json'
if not os.path.exists(p):
return error
events = await readFileToJSON(p)
eventList = events['eventlist']
profit = {
"addGold": 0,
"lossGold": 0,
"addFood": 0,
"addItem": []
}
profit['event'] = set()
if number <= 0:
return error
for n in range(number):
isThereMistake = 0
e = random.choice(eventList)
# Goods handling
if 'goodsid' in e:
isThereMistake = 1
if 'amount' not in e:
return error
# Failed item type exists
if 'goodsid_fail' in e:
# Success or failure
for number in range(e['amount']):
rollPoint = random.randint(1,100)
if rollPoint > 60:
profit['addItem'].append(e['goodsid'])
else:
profit['addItem'].append(e['goodsid_fail'])
else:
for j in range(e['amount']):
profit['addItem'].append(e['goodsid'])
# Settlement of gold coins
if ('goldmax' in e) and ('goldmin' in e):
isThereMistake = 1
selectTheNumberOfGoldCoins = random.randint(e['goldmin'],e['goldmax'])
if 'type' in e:
profit['lossGold'] += int(selectTheNumberOfGoldCoins)
else:
profit['addGold'] += int(selectTheNumberOfGoldCoins)
# Settling grain
if ('foodmax' in e) and ('foodmin' in e):
isThereMistake = 1
selectGrainQuantity = random.randint(e['foodmin'],e['foodmax'])
profit['addFood'] += int(selectGrainQuantity)
if isThereMistake == 0:
return error
# Add event
profit['event'].add(str(e['event']))
profit['event'] = str('、'.join(profit['event']))
return profit
async def grainOperation(userQQ):
user = await userInformationQuery(userQQ)
if user == error:
raise GrailExcept
resources = user['resources']
foodstuff = resources['foodstuff']
if foodstuff < 100:
return insufficient + ' ' + str(foodstuff)
number = int(int(foodstuff) / 100)
# Grain reduction
await additionAndSubtractionOfUserInformationData(userQQ, 'resources', 'foodstuff', -(100*number))
return number
async def multipleExplorationExtraction(bot, userQQ, userGroup):
# Judge whether it can be consumed
status = await grainOperation(userQQ)
if status == error:
raise GrailExcept
replyMessage = ''
replyMessage += await atQQ(userQQ)
if str(status).find(insufficient) != -1:
return replyMessage + '探险一次至少需要100粮食,你还剩下' + status.split(' ')[1] + '粮食,无法继续探险。'
# Continue event
profit = await getMultipleExplorationEventTable(int(status))
replyMessage += ('【探险】' + await takeTheNameOfThePerson(bot, userQQ, userGroup) + '进行了' +
str(status) + '次探险,经历了' + profit['event'] +
'等大量的磨炼,脱胎换骨,收获颇丰。\n\n')
replyMessage += '探险结算:\n'
settlementContents = []
isThereAnyOperationOnGoldCoin = 0
# add Gold coin settlement
if profit['addGold'] != 0 :
isThereAnyOperationOnGoldCoin = 1
settlementContents.append('获得' + str(profit['addGold']) + '金币')
whetherThereIsGrainOperation = 0
# foodstuff
if profit['addFood'] != 0:
whetherThereIsGrainOperation = 1
settlementContents.append('获得' + str(profit['addFood']) + '粮食')
status = await additionAndSubtractionOfUserInformationData(userQQ, 'resources', 'foodstuff',
int(profit['addFood']))
if status == error:
return error
# loss Gold coin settlement
if profit['lossGold'] != 0:
isThereAnyOperationOnGoldCoin = 1
settlementContents.append('失去' + str(profit['lossGold']) + '金币')
if isThereAnyOperationOnGoldCoin:
status = await rigidGoldCoinAdditionAndSubtraction(userQQ,
int(profit['addGold'])-int(profit['lossGold']))
if status == error:
return error
# Goods settlement
if profit['addItem'] != []:
listOfConvertedItems = {}
for item in profit['addItem']:
if str(item) in listOfConvertedItems:
listOfConvertedItems[str(item)] += 1
else:
listOfConvertedItems[str(item)] = 1
if listOfConvertedItems == {}:
return error
listOfSettlementItems = []
for key,value in listOfConvertedItems.items():
goods = await getItemInformation(int( key ))
if goods == error:
raise GrailExcept
listOfSettlementItems.append('[' + str( goods['name'] ) + 'x' +
str( value ) + ']')
# Operate user items
status = await addItems(userQQ, goods['id'], int(value))
if status == error:
return error
settlementContents.append('获得' + (','.join(listOfSettlementItems)))
# Add settlement message
replyMessage += ','.join(settlementContents) + '\n\n'
# Determine | |
return data
class AddAgeFeature(MapTransformation):
"""
Adds an 'age' feature to the data_entry.
The age feature starts with a small value at the start of the time series
and grows over time.
If `is_train=True` the age feature has the same length as the `target` field.
If `is_train=False` the age feature has length len(target) + pred_length
Parameters
----------
target_field
Field with target values (array) of time series
output_field
Field name to use for the output.
pred_length
Prediction length
log_scale
If set to true the age feature grows logarithmically otherwise linearly over time.
"""
@validated()
def __init__(
self,
target_field: str,
output_field: str,
pred_length: int,
log_scale: bool = True,
) -> None:
self.pred_length = pred_length
self.target_field = target_field
self.feature_name = output_field
self.log_scale = log_scale
self._age_feature = np.zeros(0)
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
length = target_transformation_length(
data[self.target_field], self.pred_length, is_train=is_train
)
if self.log_scale:
age = np.log10(2.0 + np.arange(length, dtype=np.float32))
else:
age = np.arange(length, dtype=np.float32)
data[self.feature_name] = age.reshape((1, length))
return data
class InstanceSplitter(FlatMapTransformation):
"""
Selects training instances, by slicing the target and other time series
like arrays at random points in training mode or at the last time point in
prediction mode. Assumption is that all time like arrays start at the same
time point.
The target and each time_series_field is removed and instead two
corresponding fields with prefix `past_` and `future_` are included. E.g.
If the target array is one-dimensional, the resulting instance has shape
(len_target). In the multi-dimensional case, the instance has shape (dim,
len_target).
target -> past_target and future_target
The transformation also adds a field 'past_is_pad' that indicates whether
values where padded or not.
Convention: time axis is always the last axis.
Parameters
----------
target_field
field containing the target
is_pad_field
output field indicating whether padding happened
start_field
field containing the start date of the time series
forecast_start_field
output field that will contain the time point where the forecast starts
train_sampler
instance sampler that provides sampling indices given a time-series
past_length
length of the target seen before making prediction
future_length
length of the target that must be predicted
output_NTC
whether to have time series output in (time, dimension) or in
(dimension, time) layout
time_series_fields
fields that contains time-series, they are split in the same interval
as the target
pick_incomplete
whether training examples can be sampled with only a part of
past_length time-units
present for the time series. This is useful to train models for
cold-start. In such case, is_pad_out contains an indicator whether
data is padded or not.
"""
@validated()
def __init__(
self,
target_field: str,
is_pad_field: str,
start_field: str,
forecast_start_field: str,
train_sampler: InstanceSampler,
past_length: int,
future_length: int,
output_NTC: bool = True,
time_series_fields: Optional[List[str]] = None,
pick_incomplete: bool = True,
) -> None:
assert future_length > 0
self.train_sampler = train_sampler
self.past_length = past_length
self.future_length = future_length
self.output_NTC = output_NTC
self.ts_fields = (
time_series_fields if time_series_fields is not None else []
)
self.target_field = target_field
self.is_pad_field = is_pad_field
self.start_field = start_field
self.forecast_start_field = forecast_start_field
self.pick_incomplete = pick_incomplete
def _past(self, col_name):
return f'past_{col_name}'
def _future(self, col_name):
return f'future_{col_name}'
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
pl = self.future_length
slice_cols = self.ts_fields + [self.target_field]
target = data[self.target_field]
len_target = target.shape[-1]
if is_train:
if len_target < self.future_length:
# We currently cannot handle time series that are shorter than
# the prediction length during training, so we just skip these.
# If we want to include them we would need to pad and to mask
# the loss.
sampling_indices: List[int] = []
else:
if self.pick_incomplete:
sampling_indices = self.train_sampler(
target, 0, len_target - self.future_length
)
else:
sampling_indices = self.train_sampler(
target,
self.past_length,
len_target - self.future_length,
)
else:
sampling_indices = [len_target]
for i in sampling_indices:
pad_length = max(self.past_length - i, 0)
if not self.pick_incomplete:
assert pad_length == 0
d = data.copy()
for ts_field in slice_cols:
if i > self.past_length:
# truncate to past_length
past_piece = d[ts_field][..., i - self.past_length : i]
elif i < self.past_length:
pad_block = np.zeros(
d[ts_field].shape[:-1] + (pad_length,),
dtype=d[ts_field].dtype,
)
past_piece = np.concatenate(
[pad_block, d[ts_field][..., :i]], axis=-1
)
else:
past_piece = d[ts_field][..., :i]
d[self._past(ts_field)] = past_piece
d[self._future(ts_field)] = d[ts_field][..., i : i + pl]
del d[ts_field]
pad_indicator = np.zeros(self.past_length)
if pad_length > 0:
pad_indicator[:pad_length] = 1
if self.output_NTC:
for ts_field in slice_cols:
d[self._past(ts_field)] = d[
self._past(ts_field)
].transpose()
d[self._future(ts_field)] = d[
self._future(ts_field)
].transpose()
d[self._past(self.is_pad_field)] = pad_indicator
d[self.forecast_start_field] = compute_date(d[self.start_field], i)
yield d
class CanonicalInstanceSplitter(FlatMapTransformation):
"""
Selects instances, by slicing the target and other time series
like arrays at random points in training mode or at the last time point in
prediction mode. Assumption is that all time like arrays start at the same
time point.
In training mode, the returned instances contain past_`target_field`
as well as past_`time_series_fields`.
In prediction mode, one can set `use_prediction_features` to get
future_`time_series_fields`.
If the target array is one-dimensional, the `target_field` in the resulting instance has shape
(`instance_length`). In the multi-dimensional case, the instance has shape (`dim`, `instance_length`),
where `dim` can also take a value of 1.
In the case of insufficient number of time series values, the
transformation also adds a field 'past_is_pad' that indicates whether
values where padded or not, and the value is padded with
`default_pad_value` with a default value 0.
This is done only if `allow_target_padding` is `True`,
and the length of `target` is smaller than `instance_length`.
Parameters
----------
target_field
fields that contains time-series
is_pad_field
output field indicating whether padding happened
start_field
field containing the start date of the time series
forecast_start_field
field containing the forecast start date
instance_sampler
instance sampler that provides sampling indices given a time-series
instance_length
length of the target seen before making prediction
output_NTC
whether to have time series output in (time, dimension) or in
(dimension, time) layout
time_series_fields
fields that contains time-series, they are split in the same interval
as the target
allow_target_padding
flag to allow padding
pad_value
value to be used for padding
use_prediction_features
flag to indicate if prediction range features should be returned
prediction_length
length of the prediction range, must be set if
use_prediction_features is True
"""
@validated()
def __init__(
self,
target_field: str,
is_pad_field: str,
start_field: str,
forecast_start_field: str,
instance_sampler: InstanceSampler,
instance_length: int,
output_NTC: bool = True,
time_series_fields: List[str] = [],
allow_target_padding: bool = False,
pad_value: float = 0.0,
use_prediction_features: bool = False,
prediction_length: Optional[int] = None,
) -> None:
self.instance_sampler = instance_sampler
self.instance_length = instance_length
self.output_NTC = output_NTC
self.dynamic_feature_fields = time_series_fields
self.target_field = target_field
self.allow_target_padding = allow_target_padding
self.pad_value = pad_value
self.is_pad_field = is_pad_field
self.start_field = start_field
self.forecast_start_field = forecast_start_field
assert (
not use_prediction_features or prediction_length is not None
), "You must specify `prediction_length` if `use_prediction_features`"
self.use_prediction_features = use_prediction_features
self.prediction_length = prediction_length
def _past(self, col_name):
return f'past_{col_name}'
def _future(self, col_name):
return f'future_{col_name}'
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
ts_fields = self.dynamic_feature_fields + [self.target_field]
ts_target = data[self.target_field]
len_target = ts_target.shape[-1]
if is_train:
if len_target < self.instance_length:
sampling_indices = (
# Returning [] for all time series will cause this to be in loop forever!
[len_target]
if self.allow_target_padding
else []
)
else:
sampling_indices = self.instance_sampler(
ts_target, self.instance_length, len_target
)
else:
sampling_indices = [len_target]
for i in sampling_indices:
d = data.copy()
pad_length = max(self.instance_length - i, 0)
# update start field
d[self.start_field] = compute_date(
data[self.start_field], i - self.instance_length
)
# set is_pad field
is_pad = np.zeros(self.instance_length)
if pad_length > 0:
is_pad[:pad_length] = 1
d[self.is_pad_field] = is_pad
# update time series fields
for ts_field in ts_fields:
full_ts = data[ts_field]
if pad_length > 0:
pad_pre = self.pad_value * np.ones(
shape=full_ts.shape[:-1] + (pad_length,)
)
past_ts = np.concatenate(
[pad_pre, full_ts[..., :i]], axis=-1
)
else:
past_ts = full_ts[..., (i - self.instance_length) : i]
past_ts = past_ts.transpose() if self.output_NTC else past_ts
d[self._past(ts_field)] = past_ts
if self.use_prediction_features and not is_train:
if not ts_field == self.target_field:
future_ts = full_ts[
..., i : i + self.prediction_length
]
future_ts = (
future_ts.transpose()
if self.output_NTC
else future_ts
)
d[self._future(ts_field)] = future_ts
del d[ts_field]
d[self.forecast_start_field] = compute_date(
d[self.start_field], self.instance_length
)
yield d
class SelectFields(MapTransformation):
"""
Only | |
#!/usr/bin/python
# Code for generating and reading graphrat mazes
# Parameters:
# k: Grid size will be k * k nodes
# Type:
# u : uniform
# t : tiled
# v : vertical slices
# h : horizontal slices
# p : parquet
# i : irregular
import getopt
import sys
import math
import string
import datetime
import rutil
def usage(name):
print "Usage: %s [-h] [-k K] [-t (u|t|v|h|p|i)] [-l L:H] [-o OUT] [-s SEED]"
print "\t-h Print this message"
print "\t-k K Base graph as k x k grid"
print "\t-t Specify graph type:"
print "\t u : uniform"
print "\t t : tiled"
print "\t v : vertical slices"
print "\t h : horizontal slices"
print "\t p : parquet"
print "\t i : irregular"
print "\t-l L:H Specify range of ideal load factors"
print "\t-o OUT Specify output file"
sys.exit(0)
def trim(s):
while len(s) > 0 and s[-1] in '\n\r':
s = s[:-1]
return s
class RatMode:
# Different options for specifying initial rat state
(uniform, diagonal, upleft, lowright) = range(4)
modeNames = ["uniform", "diagonal", "upper-left", "lower-right"]
class GraphType:
# Different graph types
(uniform, tiled, vertical, horizontal, parquet, irregular) = range(6)
modeNames = ["uniform", " tiled", " vertical", " horizontal", " parquet", " irregular"]
modeTags = ['u', 't', 'v', 'h', 'p', 'i']
def __init__(self):
pass
def getType(self, tag):
found = False
for gtype in range(len(self.modeTags)):
if tag == self.modeTags[gtype]:
found = True
break
return gtype if found else -1
def feasible(self, gtype, k):
cells = 1
if gtype in [self.vertical, self.horizontal, self.irregular]:
cells = 12
if gtype in [self.tiled, self.parquet]:
cells = 6
return k % cells == 0
class Graph:
k = 0
nodeCount = 0
edges = {} # Maps edges to True. Include both directions
commentList = [] # Documentation about how generated
nodeList = [] # Node ideal load factors
ilfRange = (1.2,1.8) # Range of ideal load factors
rng = None
def __init__(self, k = 0, gtype = GraphType.uniform, ilf = None):
self.generate(k, gtype = gtype, ilf = ilf)
def generate(self, k = 12, gtype = GraphType.uniform, ilf = None, seed = rutil.DEFAULTSEED):
gt = GraphType()
if not gt.feasible(gtype, k):
print "Cannot generate graph of type %s for k = %d" % (gt.modeNames[gtype], k)
return
self.rng = rutil.RNG([seed])
if ilf is not None:
self.ilfRange = ilf
self.commentList = []
tgen = datetime.datetime.now()
self.commentList.append("# Generated %s" % tgen.ctime())
self.commentList.append("# Parameters: k = %d, type = %s, ilf = (%.2f,%.2f)" % (k, gt.modeNames[gtype], self.ilfRange[0], self.ilfRange[1]))
self.k = k
self.nodeCount = k * k
self.nodeList = [self.assignIlf(i) for i in range(self.nodeCount)]
self.edges = {}
# Generate grid edges
for r in range(k):
for c in range(k):
own = self.id(r, c)
north = self.id(r-1, c)
if north >= 0:
self.addEdge(own, north)
south = self.id(r+1, c)
if south >= 0:
self.addEdge(own, south)
west = self.id(r, c-1)
if west >= 0:
self.addEdge(own, west)
east = self.id(r, c+1)
if east >= 0:
self.addEdge(own, east)
if gtype in [gt.tiled, gt.vertical, gt.horizontal]:
cells = 6 if gtype == gt.tiled else 12
unit = self.k/cells
tileX = unit if gtype in [gt.tiled, gt.vertical] else self.k
tileY = unit if gtype in [gt.tiled, gt.horizontal] else self.k
self.tile(tileX, tileY)
elif gtype == gt.parquet:
self.parquet()
elif gtype == gt.irregular:
self.irregular()
elif gtype != gt.uniform:
print "Unknown graph type %d" % gtype
def tile(self, tileX, tileY):
if tileY == 0:
tileY = tileX
if tileX == 0:
tileX = tileY
for x in range(0, self.k, tileX):
w = min(tileX, self.k - x)
for y in range(0, self.k-tileY+1, tileY):
h = min(tileY, self.k - y)
self.makeHubs(x, y, w, h)
def parquet(self):
cells = 6
unit = self.k/cells
# Upper Left
x = 0
w = unit * 3
h = unit
ystart = 0
yend = ystart + 3 * h
for y in range(ystart, yend, h):
self.makeHubs(x, y, w, h)
# Upper Right
w = unit
y = 0
h = unit * 3
xstart = unit * 3
xend = xstart + 3 * w
for x in range(xstart, xend, w):
self.makeHubs(x, y, w, h)
# Lower Left
w = unit
y = unit * 3
h = unit * 3
xstart = 0
xend = xstart + 3 * w
for x in range(xstart, xend, w):
self.makeHubs(x, y, w, h)
# Lower Right
x = unit * 3
w = unit * 3
h = unit
ystart = unit * 3
yend = ystart + 3 * h
for y in range(ystart, yend, h):
self.makeHubs(x, y, w, h)
def irregular(self):
cells = 12
unit = self.k/cells
# Upper Left
x = 0
w = unit * 4
y = 0
h = unit * 6
self.makeHubs(x, y, w, h)
# Upper Right
x = unit * 4
w = unit * 8
h = unit * 2
ystart = 0
yend = ystart + 3 * h
for y in range(ystart, yend, h):
self.makeHubs(x, y, w, h)
# Lower Left
x = 0
w = unit * 6
h = unit * 3
ystart = unit * 6
yend = ystart + 2 * h
for y in range(ystart, yend, h):
self.makeHubs(x, y, w, h)
# Lower Right
w = unit * 3
y = unit * 6
h = unit * 6
xstart = unit * 6
xend = xstart + 2 * w
for x in range(xstart, xend, w):
self.makeHubs(x, y, w, h)
def makeHubs(self, x, y, w, h, xcount=1, ycount=1):
if w <= 2*xcount:
wsep = w/xcount
else:
wsep = w/(xcount + 1)
hsep = h/(ycount + 1)
if w <= xcount:
cxList = [x + wsep * i for i in range(xcount)]
elif w <= 2*xcount:
cxList = [1 + x + wsep * i for i in range(xcount)]
else:
cxList = [x + wsep * (i + 1) for i in range(xcount)]
cyList = [y + hsep * (i + 1) for i in range(ycount)]
for cx in cxList:
for cy in cyList:
cid = self.id(cy, cx)
for j in range(w):
for i in range(h):
id = self.id(y+i, x+j)
self.addEdge(cid, id)
# Check whether string is a comment
def isComment(self, s):
# Strip off leading whitespace
while len(s) > 0 and s[0] in string.whitespace:
s = s[1:]
return len(s) == 0 or s[0] == '#'
# Load graph from file
def load(self, fname = ""):
self.k = 0
self.nodeList = []
self.edges = {}
if fname == "":
f = sys.stdin
else:
try:
f = open(fname, "r")
except:
sys.stderr.write("Could not open file '%s'\n" % fname)
return False
expectedEgeCount = 0
realEdgeCount = 0
realNodeCount = 0
for line in f:
if self.isComment(line):
continue
args = line.split()
if len(args) == 0:
continue
cmd = args[0]
# Header information
if self.k == 0:
self.nodeCount = int(args[0])
self.nodeList = [1.5 for i in range(self.nodeCount)]
self.k = int(math.sqrt(self.nodeCount))
expectedEdgeCount = int(args[1])
elif cmd == 'n':
ilf = float(args[1])
self.nodeList[realNodeCount] = ilf
realNodeCount += 1
elif cmd == 'e':
i = int(args[1])
j = int(args[2])
if self.addEdge(i,j):
# Since addEdge puts both (i,j) and (j,i) into set, only half of the
# edges will return True from addEdge
realEdgeCount += 2
else:
sys.stderr.write("Error reading graph file '%s'. Invalid line: '%'" % fname, trim(line))
return False
if fname != "":
f.close()
if realNodeCount != self.nodeCount:
sys.stderr.write("Error reading graph file '%s'. Expected %d nodes. Found %d\n" % (fname, self.nodeCount, realNodeCount))
return False
if realEdgeCount != expectedEdgeCount:
sys.stderr.write("Error reading graph file '%s'. Expected %d edges. Found %d\n" % (fname, expectedEdgeCount, realEdgeCount))
return False
else:
sys.stderr.write("Read graph with %d nodes and %d edges\n" % (self.nodeCount, realEdgeCount))
return True
def id(self, r, c):
if r < 0 or r >= self.k:
return -1
if c < 0 or c >= self.k:
return -1
return r * self.k + c
def rowColumn(self, id):
r = id/self.k
c = id - r*self.k
return (r, | |
<gh_stars>0
'''
Backends
--------
NDB stores all the records in an SQL database. By default it uses
the SQLite3 module, which is a part of the Python stdlib, so no
extra packages are required::
# SQLite3 -- simple in-memory DB
ndb = NDB()
# SQLite3 -- file DB
ndb = NDB(db_provider='sqlite3', db_spec='test.db')
It is also possible to use a PostgreSQL database via psycopg2
module::
# PostgreSQL -- local DB
ndb = NDB(db_provider='psycopg2',
db_spec={'dbname': 'test'})
# PostgreSQL -- remote DB
ndb = NDB(db_provider='psycopg2',
db_spec={'dbname': 'test',
'host': 'db1.example.com'})
SQL schema
----------
A file based SQLite3 DB or PostgreSQL may be useful for inspection
of the collected data. Here is an example schema::
rtnl=# \\dt
List of relations
Schema | Name | Type | Owner
--------+-----------------+-------+-------
public | addresses | table | root
public | ifinfo_bond | table | root
public | ifinfo_bridge | table | root
public | ifinfo_gre | table | root
public | ifinfo_vlan | table | root
public | ifinfo_vrf | table | root
public | ifinfo_vti | table | root
public | ifinfo_vti6 | table | root
public | ifinfo_vxlan | table | root
public | interfaces | table | root
public | neighbours | table | root
public | nh | table | root
public | routes | table | root
public | sources | table | root
public | sources_options | table | root
(15 rows)
rtnl=# select f_index, f_ifla_ifname from interfaces;
f_index | f_ifla_ifname
---------+---------------
1 | lo
2 | eth0
28 | ip_vti0
31 | ip6tnl0
32 | ip6_vti0
36445 | br0
11434 | dummy0
3 | eth1
(8 rows)
rtnl=# select f_index, f_ifla_br_stp_state from ifinfo_bridge;
f_index | f_ifla_br_stp_state
---------+---------------------
36445 | 0
(1 row)
There are also some useful views, that join `ifinfo` tables with
`interfaces`::
rtnl=# \\dv
List of relations
Schema | Name | Type | Owner
--------+--------+------+-------
public | bond | view | root
public | bridge | view | root
public | gre | view | root
public | vlan | view | root
public | vrf | view | root
public | vti | view | root
public | vti6 | view | root
public | vxlan | view | root
(8 rows)
'''
import io
import sys
import time
import random
import sqlite3
import threading
import traceback
from functools import partial
from collections import OrderedDict
from pr2modules import config
from pr2modules.common import uuid32
from pr2modules.common import basestring
from .objects import address
from .objects import interface
from .objects import neighbour
from .objects import netns
from .objects import route
from .objects import rule
#
from .messages import cmsg
try:
import queue
except ImportError:
import Queue as queue
#
# the order is important
#
plugins = [interface,
address,
neighbour,
route,
netns,
rule]
MAX_ATTEMPTS = 5
def publish(method):
#
# this wrapper will be published in the DBM thread
#
def _do_local(self, target, request):
try:
for item in method(self, *request.argv, **request.kwarg):
request.response.put(item)
request.response.put(StopIteration())
except Exception as e:
request.response.put(e)
#
# this class will be used to map the requests
#
class cmsg_req(cmsg):
def __init__(self, response, *argv, **kwarg):
self['header'] = {'target': None}
self.response = response
self.argv = argv
self.kwarg = kwarg
#
# this method will replace the source one
#
def _do_dispatch(self, *argv, **kwarg):
if self.thread == id(threading.current_thread()):
# same thread, run method locally
for item in method(self, *argv, **kwarg):
yield item
else:
# another thread, run via message bus
self._allow_read.wait()
response = queue.Queue()
request = cmsg_req(response, *argv, **kwarg)
self.ndb._event_queue.put((request, ))
while True:
item = response.get()
if isinstance(item, StopIteration):
return
elif isinstance(item, Exception):
raise item
else:
yield item
#
# announce the function so it will be published
#
_do_dispatch.publish = (cmsg_req, _do_local)
return _do_dispatch
def publish_exec(method):
#
# this wrapper will be published in the DBM thread
#
def _do_local(self, target, request):
try:
(request
.response
.put(method(self, *request.argv, **request.kwarg)))
except Exception as e:
(request
.response
.put(e))
#
# this class will be used to map the requests
#
class cmsg_req(cmsg):
def __init__(self, response, *argv, **kwarg):
self['header'] = {'target': None}
self.response = response
self.argv = argv
self.kwarg = kwarg
#
# this method will replace the source one
#
def _do_dispatch(self, *argv, **kwarg):
if self.thread == id(threading.current_thread()):
# same thread, run method locally
return method(self, *argv, **kwarg)
else:
# another thread, run via message bus
response = queue.Queue(maxsize=1)
request = cmsg_req(response, *argv, **kwarg)
self.ndb._event_queue.put((request, ))
ret = response.get()
if isinstance(ret, Exception):
raise ret
else:
return ret
#
# announce the function so it will be published
#
_do_dispatch.publish = (cmsg_req, _do_local)
return _do_dispatch
class DBSchema(object):
connection = None
thread = None
event_map = None
key_defaults = None
snapshots = None # <table_name>: <obj_weakref>
spec = OrderedDict()
classes = {}
#
# OBS: field names MUST go in the same order as in the spec,
# that's for the load_netlink() to work correctly -- it uses
# one loop to fetch both index and row values
#
indices = {}
foreign_keys = {}
def __init__(self, ndb, connection, mode, rtnl_log, tid):
self.ndb = ndb
# collect all the dispatched methods and publish them
for name in dir(self):
obj = getattr(self, name, None)
if hasattr(obj, 'publish'):
event, fbody = obj.publish
self.ndb._event_map[event] = [partial(fbody, self)]
self.mode = mode
self.stats = {}
self.thread = tid
self.connection = connection
self.rtnl_log = rtnl_log
self.log = ndb.log.channel('schema')
self.snapshots = {}
self.key_defaults = {}
self.event_map = {}
self._cursor = None
self._counter = 0
self._allow_read = threading.Event()
self._allow_read.set()
self._allow_write = threading.Event()
self._allow_write.set()
self.share_cursor()
if self.mode == 'sqlite3':
# SQLite3
self.connection.execute('PRAGMA foreign_keys = ON')
self.plch = '?'
elif self.mode == 'psycopg2':
# PostgreSQL
self.plch = '%s'
else:
raise NotImplementedError('database provider not supported')
self.gctime = self.ctime = time.time()
#
# compile request lines
#
self.compiled = {}
for table in self.spec.keys():
self.compiled[table] = (self
.compile_spec(table,
self.spec[table],
self.indices[table]))
self.create_table(table)
#
# service tables
#
self.execute('''
DROP TABLE IF EXISTS sources_options
''')
self.execute('''
DROP TABLE IF EXISTS sources
''')
self.execute('''
CREATE TABLE IF NOT EXISTS sources
(f_target TEXT PRIMARY KEY,
f_kind TEXT NOT NULL)
''')
self.execute('''
CREATE TABLE IF NOT EXISTS sources_options
(f_target TEXT NOT NULL,
f_name TEXT NOT NULL,
f_type TEXT NOT NULL,
f_value TEXT NOT NULL,
FOREIGN KEY (f_target)
REFERENCES sources(f_target)
ON UPDATE CASCADE
ON DELETE CASCADE)
''')
def merge_spec(self, table1, table2, table, schema_idx):
spec1 = self.compiled[table1]
spec2 = self.compiled[table2]
names = spec1['names'] + spec2['names'][:-1]
all_names = spec1['all_names'] + spec2['all_names'][2:-1]
norm_names = spec1['norm_names'] + spec2['norm_names'][2:-1]
idx = ('target', 'tflags') + schema_idx
f_names = ['f_%s' % x for x in all_names]
f_set = ['f_%s = %s' % (x, self.plch) for x in all_names]
f_idx = ['f_%s' % x for x in idx]
f_idx_match = ['%s.%s = %s' % (table2, x, self.plch) for x in f_idx]
plchs = [self.plch] * len(f_names)
return {'names': names,
'all_names': all_names,
'norm_names': norm_names,
'idx': idx,
'fnames': ','.join(f_names),
'plchs': ','.join(plchs),
'fset': ','.join(f_set),
'knames': ','.join(f_idx),
'fidx': ' AND '.join(f_idx_match)}
def compile_spec(self, table, schema_names, schema_idx):
# e.g.: index, flags, IFLA_IFNAME
#
names = []
#
# same + two internal fields
#
all_names = ['target', 'tflags']
#
#
norm_names = ['target', 'tflags']
bclass = self.classes.get(table)
for name in schema_names:
names.append(name[-1])
all_names.append(name[-1])
iclass = bclass
if len(name) > 1:
for step in name[:-1]:
imap = dict(iclass.nla_map)
iclass = getattr(iclass, imap[step])
norm_names.append(iclass.nla2name(name[-1]))
#
# escaped names: f_index, f_flags, f_IFLA_IFNAME
#
# the reason: words like "index" are keywords in SQL
# and we can not use them; neither can we change the
# C structure
#
f_names = ['f_%s' % x for x in all_names]
#
# set the fields
#
# e.g.: f_flags = ?, f_IFLA_IFNAME = ?
#
# there are different placeholders:
# ? -- SQLite3
# %s -- PostgreSQL
# so use self.plch here
#
f_set = ['f_%s = %s' % (x, self.plch) for x in all_names]
#
# the set of the placeholders to use in the INSERT statements
#
plchs = [self.plch] * len(f_names)
#
# the index schema; use target and tflags in every index
#
idx = ('target', 'tflags') + schema_idx
#
# the same, escaped: f_target, f_tflags etc.
#
f_idx = ['f_%s' % x for x in idx]
#
# normalized idx | |
when failed.
References
----------
>>> oModule.DeleteReports
"""
self.oreportsetup.DeleteReports([PlotName])
return True
@aedt_exception_handler
def rename_report(self, PlotName, newname):
"""Rename a plot.
Parameters
----------
PlotName : str
Name of the plot.
newname : str
New name of the plot.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.RenameReport
"""
self.oreportsetup.RenameReport(PlotName, newname)
return True
@aedt_exception_handler
def get_report_data(
self, expression="dB(S(1,1))", setup_sweep_name="", domain="Sweep", families_dict=None, report_input_type=None
):
"""Generate report data.
This method returns the data object and the arrays ``solData`` and
``FreqVals``.
Parameters
----------
expression : str or list
One or more formulas to add to the report. The default is
``"dB(S(1,1))"``.
setup_sweep_name : str, optional
Name of the setup for computing the report. The
default is ``""``, in which case the nominal sweep is
used.
domain : str or list, optional
Context type. The options are ``"Sweep"`` or
``"Time"``. The default is ``"Sweep".``
families_dict : dict, optional
Dictionary of all families including the primary
sweep. The default is ``{"Freq": ["All"]}``.
report_input_type : str
Type of input data for the report.
Returns
-------
pyaedt.modules.PostProcessor.SolutionData
References
----------
>>> oModule.GetSolutionDataPerVariation
Examples
--------
Generate a report with the default sweep and default variation.
>>> hfss = HFSS()
>>> hfss.post.get_report_data("S(1,1)")
>>> m3d = Maxwell3D()
>>> m3d.post.get_report_data("SurfaceLoss") # Eddy Current examples
>>> m3d.post.get_report_data("Wind(LoadA,LaodA)") # TransientAnalsysis
"""
if self.post_solution_type in ["3DLayout", "NexximLNA", "NexximTransient"]:
if domain == "Sweep":
did = 3
else:
did = 1
ctxt = [
"NAME:Context",
"SimValueContext:=",
[did, 0, 2, 0, False, False, -1, 1, 0, 1, 1, "", 0, 0, "IDIID", False, "1"],
]
elif isinstance(domain, list):
ctxt = domain
else:
ctxt = ["Domain:=", domain]
if not isinstance(expression, list):
expression = [expression]
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_sweep
if not report_input_type:
report_input_type = self._app.design_solutions.report_type
if families_dict is None:
families_dict = {"Freq": ["All"]}
solution_data = self.get_solution_data_per_variation(
report_input_type, setup_sweep_name, ctxt, families_dict, expression
)
if not solution_data:
warnings.warn("No Data Available. Check inputs")
return False
return solution_data
@aedt_exception_handler
def create_rectangular_plot(
self,
expression="dB(S(1,1))",
setup_sweep_name="",
families_dict={"Freq": ["All"]},
primary_sweep_variable="Freq",
context=None,
plotname=None,
report_category=None,
plot_type="Rectangular Plot",
):
"""Create a 2D rectangular plot in AEDT.
Parameters
----------
expression : str or list, optional
One or more formulas to add to the report. The default is value = ``"dB(S(1,1))"``.
setup_sweep_name : str, optional
Setup name with the sweep. The default is ``""``.
families_dict : dict, optional
Dictionary of all families including the primary sweep. The default is ``{"Freq": ["All"]}``.
primary_sweep_variable : str, optional
Name of the primary sweep. The default is ``"Freq"``.
context : str, optional
The default is ``None``.
plotname : str, optional
Name of the plot. The default is ``None``.
report_category : str, optional
Type of the Report to be created. If `None` default data Report will be used
plot_type : str, optional
The format of Data Visualization. Default is ``Rectangular Plot``
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.CreateReport
"""
ctxt = []
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_sweep
if self.post_solution_type in ["HFSS 3D Layout Design", "NexximLNA", "NexximTransient"]:
if "Freq" == primary_sweep_variable or "Freq" in list(families_dict.keys()):
did = 3
else:
did = 1
ctxt = [
"NAME:Context",
"SimValueContext:=",
[did, 0, 2, 0, False, False, -1, 1, 0, 1, 1, "", 0, 0, "IDIID", False, "1"],
]
elif context:
if type(context) is list:
ctxt = context
else:
ctxt = ["Context:=", context]
if not isinstance(expression, list):
expression = [expression]
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_sweep
if not report_category and not self._app.design_solutions.report_type:
self.logger.info("Solution not supported")
return False
if not report_category:
modal_data = self._app.design_solutions.report_type
else:
modal_data = report_category
if not plotname:
plotname = generate_unique_name("Plot")
families_input = []
families_input.append(primary_sweep_variable + ":=")
if not primary_sweep_variable in families_dict:
families_input.append(["All"])
elif isinstance(families_dict[primary_sweep_variable], list):
families_input.append(families_dict[primary_sweep_variable])
else:
families_input.append([families_dict[primary_sweep_variable]])
for el in families_dict:
if el == primary_sweep_variable:
continue
families_input.append(el + ":=")
if isinstance(families_dict[el], list):
families_input.append(families_dict[el])
else:
families_input.append([families_dict[el]])
self.oreportsetup.CreateReport(
plotname,
modal_data,
plot_type,
setup_sweep_name,
ctxt,
families_input,
["X Component:=", primary_sweep_variable, "Y Component:=", expression],
)
self.logger.info("Report %s correctly created.", plotname)
return True
@aedt_exception_handler
def get_solution_data_per_variation(
self, soltype="Far Fields", setup_sweep_name="", ctxt=None, sweeps=None, expression=""
):
"""Retrieve solution data for each variation.
Parameters
----------
soltype : str, optional
Type of the solution. For example, ``"Far Fields"`` or ``"Modal Solution Data"``. The default
is ``"Far Fields"``.
setup_sweep_name : str, optional
Name of the setup for computing the report. The default is ``""``,
in which case ``"nominal adaptive"`` is used.
ctxt : list, optional
List of context variables. The default is ``None``.
sweeps : dict, optional
Dictionary of variables and values. The default is ``None``,
in which case this list is used:
``{'Theta': 'All', 'Phi': 'All', 'Freq': 'All'}``.
expression : str or list, optional
One or more traces to include. The default is ``""``.
Returns
-------
pyaedt.modules.PostProcessor.SolutionData
References
----------
>>> oModule.GetSolutionDataPerVariation
"""
if sweeps is None:
sweeps = {"Theta": "All", "Phi": "All", "Freq": "All"}
if not ctxt:
ctxt = []
if not isinstance(expression, list):
expression = [expression]
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_adaptive
sweep_list = []
for el in sweeps:
sweep_list.append(el + ":=")
if type(sweeps[el]) is list:
sweep_list.append(sweeps[el])
else:
sweep_list.append([sweeps[el]])
data = list(
self.oreportsetup.GetSolutionDataPerVariation(soltype, setup_sweep_name, ctxt, sweep_list, expression)
)
self.logger.info("Solution Data Correctly Loaded.")
return SolutionData(data)
@aedt_exception_handler
def steal_focus_oneditor(self):
"""Remove the selection of an object that would prevent the image from exporting correctly.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesktop.RestoreWindow
"""
self._desktop.RestoreWindow()
param = ["NAME:SphereParameters", "XCenter:=", "0mm", "YCenter:=", "0mm", "ZCenter:=", "0mm", "Radius:=", "1mm"]
attr = ["NAME:Attributes", "Name:=", "DUMMYSPHERE1", "Flags:=", "NonModel#"]
self._oeditor.CreateSphere(param, attr)
self._oeditor.Delete(["NAME:Selections", "Selections:=", "DUMMYSPHERE1"])
return True
@aedt_exception_handler
def export_report_to_file(self, output_dir, plot_name, extension, unique_file=False):
"""Export the 2D Plot data to a file.
This method leaves the data in the plot (as data) as a reference
for the Plot after the loops.
Parameters
----------
output_dir : str
Path to the directory of exported report
plot_name : str
Name of the plot to export.
extension : str
Extension of export , one of
* (CSV) .csv
* (Tab delimited) .tab
* (Post processor format) .txt
* (Ensight XY data) .exy
* (Anosft Plot Data) .dat
* (Ansoft Report Data Files) .rdat
unique_file : bool
If set to True, generates unique file in output_dit
Returns
-------
str
path of exported file
References
----------
>>> oModule.ExportReportDataToFile
>>> oModule.ExportToFile
"""
npath = os.path.normpath(output_dir)
if "." not in extension:
extension = "." + extension
supported_ext = [".csv", ".tab", ".txt", ".exy", ".dat", ".rdat"]
if extension not in supported_ext:
msg = "Extension {} is not supported. Use one of {}".format(extension, ", ".join(supported_ext))
raise ValueError(msg)
file_path = os.path.join(npath, plot_name + extension)
if unique_file:
while os.path.exists(file_path):
file_name = generate_unique_name(plot_name)
file_path = os.path.join(npath, file_name + extension)
if extension == ".rdat":
self.oreportsetup.ExportReportDataToFile(plot_name, file_path)
else:
self.oreportsetup.ExportToFile(plot_name, file_path)
return file_path
@aedt_exception_handler
def export_report_to_csv(self, project_dir, plot_name):
"""Export the 2D Plot data to a CSV file.
This method leaves the data in the plot (as data) as a reference
for the Plot after the loops.
Parameters
----------
project_dir : str
Path to the project directory. The csv file will be plot_name.csv.
plot_name : str
Name of the plot to export.
Returns
-------
str
path of exported file
References
----------
>>> oModule.ExportReportDataToFile
>>> oModule.ExportToFile
"""
return self.export_report_to_file(project_dir, plot_name, extension=".csv")
@aedt_exception_handler
def export_report_to_jpg(self, project_dir, plot_name):
"""Export the SParameter plot to a JPG file.
Parameters
----------
project_dir : str
Path to the project directory.
plot_name : str
Name of the plot to export.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ExportImageToFile
"""
# path
npath = os.path.normpath(project_dir)
file_name = os.path.join(npath, plot_name + ".jpg") # name of the image file
self.oreportsetup.ExportImageToFile(plot_name, file_name, 0, 0)
return True
class PostProcessor(PostProcessorCommon, object):
"""Manages the main AEDT postprocessing functions.
The inherited `AEDTConfig` class contains all `_desktop`
hierarchical calls needed for the class inititialization data
`_desktop` and the design types ``"HFSS"``, ``"Icepak"``, and
``"HFSS3DLayout"``.
.. note::
Some functionalities are available only when AEDT is running in
the graphical mode.
Parameters
----------
app : :class:`pyaedt.application.Analsis3D.FieldAnalysis3D`
Inherited parent object. The parent object must provide the members
``_modeler``, ``_desktop``, ``_odesign``, and ``logger``.
Examples
--------
Basic usage demonstrated with an | |
":is_savearea", pos57, pos59),
(val_add, ":var2", 1),
(assign, ":distance", 10),
(val_mul, ":distance", ":var2"),
(le, ":is_savearea", ":distance"),
(assign, ":safe_zone", 1),
(set_trigger_result, 0),
(try_end),
#(else_try), # check if damage should bleed through the armor due to unmet requirements
# (agent_slot_ge, ":attacked_agent_id", slot_agent_armor_damage_through, 5),
# (agent_get_slot, ":damage_through_multiplier", ":attacked_agent_id", slot_agent_armor_damage_through),
# (gt, reg0, -1),
# (item_get_slot, ":damage_through", reg0, slot_item_max_raw_damage),
# (val_mul, ":damage_through", ":damage_through_multiplier"),
# (val_div, ":damage_through", 100),
# (gt, ":damage_through", ":damage_dealt"),
# (store_random_in_range, ":damage_through", ":damage_dealt", ":damage_through"),
# (set_trigger_result, ":damage_through"),
#(try_end),
#GGG:reduce damage
(else_try),
(agent_is_human, ":attacked_agent_id"),
(agent_is_human, ":attacker_agent_id"),
(gt, ":damage_dealt", 0),
(gt, reg0, -1),
#(item_get_type, ":no_ranged", reg0),
#(neq, ":no_ranged", itp_type_bow),
#(neq, ":no_ranged", itp_type_crossbow),
#(neq, ":no_ranged", itp_couchable),
(try_begin),
(agent_get_item_slot, ":attacked_agent_armor", ":attacked_agent_id", 5),
(gt, ":attacked_agent_armor", -1),
(item_get_body_armor, ":body_armor", ":attacked_agent_armor"),
(ge, ":body_armor", 20),
(try_begin),
(eq, ":body_armor", 43),
(assign, ":reduced_damage", 13),
(else_try),
(eq, ":body_armor", 44),
(assign, ":reduced_damage", 16),
(else_try),
(eq, ":body_armor", 45),
(assign, ":reduced_damage", 19),
(else_try),
(eq, ":body_armor", 46),
(assign, ":reduced_damage", 22),
(else_try),
(eq, ":body_armor", 47),
(assign, ":reduced_damage", 25),
(else_try),
(eq, ":body_armor", 48),
(assign, ":reduced_damage", 28),
(else_try),
(is_between, ":body_armor", 49, 55),
(assign, ":reduced_damage", 31),
(else_try),
(ge, ":body_armor", 55),
(assign, ":reduced_damage", 34),
(else_try),
(assign, ":reduced_damage", 10),
(try_end),
#skl_ironflesh
(try_begin),
(agent_get_troop_id, ":attacked_troop", ":attacked_agent_id"),
(store_skill_level, ":ironflesh_level", "skl_ironflesh", ":attacked_troop"),
(val_mul, ":ironflesh_level", 5),
(val_add, ":reduced_damage", ":ironflesh_level"),
(try_end),
(val_mul, ":reduced_damage", ":damage_dealt"),
(val_div, ":reduced_damage", 100),
(val_sub, ":damage_dealt", ":reduced_damage"),
(val_max, ":damage_dealt", 0),
(set_trigger_result, ":damage_dealt"),
(try_end),
(else_try),
#GGG:skl_riding
(neg|agent_is_human, ":attacked_agent_id"),
(agent_is_human, ":attacker_agent_id"),
(agent_get_rider, ":last_rider_agent", ":attacked_agent_id"),
(agent_is_active, ":last_rider_agent"),
(agent_is_alive, ":last_rider_agent"),
(agent_get_troop_id, ":last_rider_troop", ":last_rider_agent"),
(store_skill_level, ":riding_level", "skl_riding", ":last_rider_troop"),
(try_begin),
(store_mul, ":reduced_damage", ":riding_level", 7),
(val_mul, ":reduced_damage", ":damage_dealt"),
(val_div, ":reduced_damage", 100),
(val_sub, ":damage_dealt", ":reduced_damage"),
(val_max, ":damage_dealt", 0),
(set_trigger_result, ":damage_dealt"), #
(else_try),
(set_trigger_result, ":damage_dealt"),
(try_end),
(try_end),
(agent_get_player_id, ":attacker_player_id", ":attacker_agent_id"),
(agent_get_player_id, ":attacked_player_id", ":attacked_agent_id"),
(try_begin),
(neq, ":attacked_agent_id", ":attacker_agent_id"),
(eq, ":safe_zone", 0),
(ge, ":damage_dealt", 15),
(agent_is_active, ":attacked_agent_id"),
(agent_is_active, ":attacker_agent_id"),
(player_is_active, ":attacker_player_id"),
(player_is_active, ":attacked_player_id"),
(assign, ":hostile_faction", 0),
(player_get_slot, ":attack_fac", ":attacker_player_id", slot_player_faction_id),
(player_get_slot, ":defend_fac", ":attacked_player_id", slot_player_faction_id),
(player_get_slot, ":last_attack", ":attacked_player_id", slot_player_last_attacked_by),
(neq, ":last_attack", ":attacker_player_id"),
(try_begin),
(call_script, "script_cf_factions_are_hostile", ":attack_fac", ":defend_fac"),
(assign, ":hostile_faction", 1),
(try_end),
(agent_get_wielded_item, ":wielded_item_id", ":attacker_agent_id", 0),
(neq, ":wielded_item_id", "itm_surgeon_scalpel"),
(this_or_next|eq, ":hostile_faction", 1),
(this_or_next|eq, ":defend_fac", 1),
(eq, ":attack_fac", 1), #outlaw or hostile
(player_set_slot, ":attacker_player_id", slot_player_battle_time, 3), #2 minute break in between hits
(player_set_slot, ":attacked_player_id", slot_player_last_attacked_by, ":attacker_player_id"),
(try_end),
(try_begin),
(agent_get_wielded_item, ":wielded_item_id", ":attacker_agent_id", 0),
(eq, ":wielded_item_id", "itm_surgeon_scalpel"),
(player_is_active, ":attacked_player_id"),
(player_set_slot, ":attacked_player_id", slot_player_battle_time, 0),
(try_end),
#
(try_begin),
(agent_slot_ge, ":attacked_agent_id", slot_agent_animal_birth_time, 1),
(call_script, "script_animal_hit", ":attacked_agent_id", ":attacker_agent_id", ":damage_dealt", reg0),
(try_end),
(try_begin),
(is_between, reg0, scripted_items_begin, scripted_items_end),
(call_script, "script_agent_hit_with_scripted_item", ":attacked_agent_id", ":attacker_agent_id", ":damage_dealt", reg0),
#GGG:lance break system
(else_try),
(eq, 1, 2),
(this_or_next|eq, reg0, "itm_light_lance"),
(this_or_next|eq, reg0, "itm_lance"),
(this_or_next|eq, reg0, "itm_heavy_lance"),
#(this_or_next|eq, reg0, "itm_great_lance"),
(this_or_next|eq, reg0, "itm_thin_lance"),
(eq, reg0, "itm_wand_of_law"),
(call_script, "script_agent_hit_with_scripted_item", ":attacked_agent_id", ":attacker_agent_id", ":damage_dealt", reg0),
(try_end),
(call_script, "script_cf_hit_logging", ":attacked_agent_id", ":attacker_agent_id", ":damage_dealt", reg0),#GGG:debug add reg0
])
item_picked_up = (ti_on_item_picked_up, 0, 0, [], # handle agents picking up an item
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":item_id"),
(store_trigger_param_3, ":instance_id"),
(call_script, "script_agent_calculate_stat_modifiers_for_item", ":agent_id", ":item_id", 1, 1),
(multiplayer_is_server),
(call_script, "script_check_on_item_picked_up", ":agent_id", ":item_id", ":instance_id"),
(call_script, "script_cf_pickup", ":agent_id", ":item_id"),
(call_script, "script_is_skin", ":item_id"),
(eq, reg97, 1),
(call_script, "script_cf_agent_consume_item", ":agent_id", ":item_id", 1),
])
item_dropped = (ti_on_item_dropped, 0, 0, [], # handle agents dropping an item
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":item_id"),
(store_trigger_param_3, ":instance_id"),
(call_script, "script_agent_calculate_stat_modifiers_for_item", ":agent_id", ":item_id", 0, 1),
(multiplayer_is_server),
(call_script, "script_check_on_item_dropped", ":agent_id", ":item_id", ":instance_id", 0),
(call_script, "script_cf_dropd", ":agent_id", ":item_id"),
])
item_wielded = (ti_on_item_wielded, 0, 0, [], # handle agents wielding an item
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":item_id"),
(call_script, "script_agent_calculate_stat_modifiers_for_item", ":agent_id", ":item_id", 1, 1),
])
item_unwielded = (ti_on_item_unwielded, 0, 0, [], # handle agents un-wielding an item
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":item_id"),
(call_script, "script_agent_calculate_stat_modifiers_for_item", ":agent_id", ":item_id", 0, 1),
])
agent_mount = (ti_on_agent_mount, 0, 0, [], # server: check speed factor and attached carts when agents mount a horse
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":horse_agent_id"),
(agent_set_slot, ":horse_agent_id", slot_agent_horse_last_rider, ":agent_id"),
(agent_set_slot, ":agent_id", slot_agent_last_horse_ridden, ":horse_agent_id"),
(multiplayer_is_server),
(call_script, "script_check_agent_horse_speed_factor", ":agent_id", ":horse_agent_id", 0),
(try_begin),
(agent_get_player_id, ":player_id", ":agent_id"),
(player_is_active, ":player_id"),
(agent_get_item_id, ":item_id", ":horse_agent_id"),
(call_script, "script_is_skin", ":item_id"),
(eq, reg97, 1),
(player_get_slot, ":stored_horse", ":player_id", slot_player_horse_skin),
(neq, ":item_id", ":stored_horse"),
(agent_set_hit_points, ":horse_agent_id", 1, 1),
(agent_deliver_damage_to_agent, ":horse_agent_id", ":horse_agent_id", 100), #kill horse if not yours
(player_get_troop_id, ":troop_id", ":player_id"),
(call_script, "script_player_add_equipped_items", ":player_id", ":troop_id"),
(call_script, "script_player_add_spawn_items", ":player_id", 1),
(call_script, "script_player_respawn_in_place", ":player_id"),
(else_try),
(call_script, "script_cf_attach_cart", ":agent_id", -1, ":agent_id"),
(try_end),
(agent_get_player_id, ":player_id", ":agent_id"),
(str_store_player_username, s1, ":player_id"),
(server_add_message_to_log, "@{s1} mounted a horse"),
])
agent_dismount = (ti_on_agent_dismount, 0, 0, [], # server: make horses stand still after being dismounted from
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":horse_agent_id"),
(agent_set_slot, ":horse_agent_id", slot_agent_horse_last_rider, ":agent_id"),
(agent_set_slot, ":agent_id", slot_agent_last_horse_ridden, ":horse_agent_id"),
(multiplayer_is_server),
(agent_get_position, pos1, ":horse_agent_id"),
(agent_set_scripted_destination, ":horse_agent_id", pos1, 0),
(agent_get_player_id, ":player_id", ":agent_id"),
(str_store_player_username, s1, ":player_id"),
(server_add_message_to_log, "@{s1} dismounted a horse"),
(player_set_slot, ":player_id", slot_player_equip_horse, -1), #horse glitch fix
])
player_combat_check_loop = (0, 0, 60, [],
#(send_message_to_url, "@http://api.persistentworld.cn/server/logip/?guid={reg44}&ip={s77}&ApiKey={s99}"),
[(multiplayer_is_server),
(str_store_string, s98, "str_api_address"),
(str_store_string, s99, "str_api_key"),
(str_store_string, s98, "@{s98}combatshard/?ApiKey={s99}&guids=0,"),
(try_for_players, ":curPlayer", 1),
(player_get_slot, ":combat_minutes", ":curPlayer", slot_player_battle_time),
(ge, ":combat_minutes", 1),
(player_get_unique_id, reg99, ":curPlayer"),
(str_store_string, s98, "@{s98}{reg99},"),
(val_sub, ":combat_minutes", 1),
(player_set_slot, ":curPlayer", slot_player_battle_time, ":combat_minutes"),
(multiplayer_send_string_to_player, ":curPlayer", server_event_local_chat, "@You have gained 1 X shard for fighting"),
(try_end),
#(server_add_message_to_log, s98),
(send_message_to_url, s98),
])
player_check_loop = (0, 0, 0.5, # server: check all players to see if any need agents spawned, also periodically lowering outlaw ratings
[(multiplayer_is_server),
(store_mission_timer_a, ":time"),
(get_max_players, ":max_players"),
(assign, ":loop_end", ":max_players"),
(try_for_range, ":player_id", "$g_loop_player_id", ":loop_end"), # continue from the last player id checked
(player_is_active, ":player_id"),
(player_get_slot, ":kick_at_time", ":player_id", slot_player_kick_at_time),
(try_begin), # kick after an interval if rejected by the name server
(gt, ":kick_at_time", 0),
(try_begin),
(ge, ":time", ":kick_at_time"),
(player_set_is_admin, ":player_id", 0),
(kick_player, ":player_id"),
(try_end),
(else_try),
(try_begin),
(this_or_next|player_slot_eq, ":player_id", slot_player_spawn_state, player_spawn_state_dead),
(player_slot_eq, ":player_id", slot_player_spawn_state, player_spawn_state_invulnerable),
(call_script, "script_cf_player_check_spawn_agent", ":player_id"),
(assign, ":loop_end", -1), # if the spawn checks were run, end the loop to give other triggers a chance to run, then immediately continue
(store_add, "$g_loop_player_id", ":player_id", 1),
(try_end),
#GGG:outlaw rating
(try_begin),
(eq, "$g_loop_player_check_outlaw", 1),
(player_get_slot, ":outlaw_rating", ":player_id", slot_player_outlaw_rating),
(try_begin),
(ge, ":outlaw_rating", 1),
(call_script, "script_player_change_check_outlaw_rating", ":player_id", -1, 0),
(try_end),
(try_end),
#
(try_end),
(try_end),
(eq, ":loop_end", ":max_players"), # if all players were checked, the trigger will succeed and wait the rearm interval before checking again
(assign, "$g_loop_player_id", 1), # go back to the start (player id 0 is the server)
#GGG:outlaw rating
(try_begin), # only decrease outlaw ratings at certain intervals, not every time
(ge, ":time", "$g_loop_player_check_outlaw_time"),
(val_add, "$g_loop_player_check_outlaw_time", loop_player_check_outlaw_interval),
(assign, "$g_loop_player_check_outlaw", 1),
(else_try),
(assign, "$g_loop_player_check_outlaw", 0),
(try_end),
], [])
drowningCheck = (0, 0, 1, [], # handle agents picking up an item
[ #(multiplayer_is_server),
#(try_for_agents, ":cur_agent", 1),
# (call_script, "script_check_agent_drowning", ":cur_agent"),
# (try_end),
])
agent_check_loop = (0, 0, 0.5, # server: loop over all agents, doing all common repetitive checks together for each agent, to minimize the penalty of using try_for_agents
[(multiplayer_is_server),
(try_begin), # if the loop was not restarted
(gt, "$g_loop_agent_last_checked", -2),
(assign, ":agent_id", -1),
#(assign, ":server_players", 0), ###GGG
(try_for_agents, ":loop_agent_id"), # find the next agent id greater than the previous checked
#(call_script, "script_check_agent_drowning", ":loop_agent_id"),
###GGG:no global chat
#(agent_get_player_id, ":player_id", ":loop_agent_id"),
#(player_is_active, ":player_id"),
#(val_add, ":server_players", 1),
#(try_begin),
# (ge, ":server_players", 2),
# (neg|player_slot_eq, ":player_id", slot_player_no_global_chat, 2),
# (neg|player_is_admin, ":player_id"),
# (player_set_slot, ":player_id", slot_player_no_global_chat, 1),
# (player_set_is_muted, ":player_id", 1, 1),
#(else_try),
# (player_set_slot, ":player_id", slot_player_no_global_chat, 0),
# (player_set_is_muted, ":player_id", 0, 1),
#(try_end),
#(try_begin),
# (player_slot_eq, ":player_id", slot_player_no_global_chat, 2),
# (player_set_is_muted, ":player_id", 0, 1),
#(try_end),
###
(eq, ":agent_id", -1),
(gt, ":loop_agent_id", "$g_loop_agent_last_checked"),
(assign, ":agent_id", ":loop_agent_id"),
###GGG:hunger system
#(try_begin),
# (agent_get_troop_id, ":troop_id", ":agent_id"),
# (neq, ":troop_id", "trp_serf"),
# (neq, ":troop_id", "trp_huntsman"),
# (neq, ":troop_id", "trp_peasant"),
# (neq, ":troop_id", "trp_craftsman"),
# (neq, ":troop_id", "trp_healer"),
# (neq, ":troop_id", "trp_engineer"),
# (neq, ":troop_id", "trp_herdsman"),
# (neq, ":troop_id", "trp_master_smith"),
# (neq, ":troop_id", "trp_doctor"),
# (neq, ":troop_id", "trp_temp_troop"),
# (neq, ":troop_id", "trp_godlike_hero"),
# (try_begin),
# (agent_get_slot, ":food_amount", ":agent_id", slot_agent_food_amount),
# (lt, ":food_amount", max_food_amount),
# (val_add, ":food_amount", 50),
# (val_min, ":food_amount", max_food_amount),
# (assign, ":modifier_number", ":food_amount"),
# (agent_set_damage_modifier, ":agent_id", ":modifier_number"),
# (agent_set_reload_speed_modifier, ":agent_id", ":modifier_number"),
# (agent_set_accuracy_modifier, ":agent_id", ":modifier_number"),
# (try_end),
# ###GGG:thirst system
# (try_begin),
# (agent_get_slot, ":water_amount", ":agent_id", slot_agent_water_amount),
# (lt, ":water_amount", max_food_amount),
# (val_add, ":water_amount", 50),
# (val_min, ":water_amount", max_food_amount),
# (assign, ":modifier_number", ":water_amount"),
# (agent_set_speed_modifier, ":agent_id", ":modifier_number"),
# (try_end),
#(try_end),
###
###GGG:new hunger and thirst system
# (call_script, "script_agent_calculate_stat_modifiers_for_item", ":agent_id", ":item_id", 1, 1),
# (assign, "$g_extra_item_details_damage", reg11),
# (assign, "$g_extra_item_details_speed", reg12),
#(try_begin),
# (agent_get_slot, ":food_amount", ":agent_id", slot_agent_food_amount),
# (lt, ":food_amount", max_food_amount),
# (val_min, ":food_amount", max_food_amount),
# | |
<filename>traffic_control/tests/test_additional_sign_real_api.py<gh_stars>1-10
import datetime
import pytest
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework_gis.fields import GeoJsonDict
from traffic_control.models import AdditionalSignContentReal, AdditionalSignReal
from .factories import (
add_additional_sign_real_operation,
get_additional_sign_content_real,
get_additional_sign_real,
get_api_client,
get_owner,
get_traffic_control_device_type,
get_traffic_sign_real,
get_user,
)
from .test_base_api_3d import test_point_2_3d
# AdditionalSignReal tests
# ===============================================
@pytest.mark.parametrize("geo_format", ("", "geojson"))
@pytest.mark.django_db
def test__additional_sign_real__list(geo_format):
client = get_api_client()
for owner_name in ["foo", "bar", "baz"]:
asr = get_additional_sign_real(owner=get_owner(name_fi=owner_name))
get_additional_sign_content_real(parent=asr)
response = client.get(
reverse("v1:additionalsignreal-list"), data={"geo_format": geo_format}
)
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["count"] == 3
for result in response_data["results"]:
obj = AdditionalSignReal.objects.get(pk=result["id"])
assert result["content"][0]["id"] == str(obj.content.first().pk)
if geo_format == "geojson":
assert result["location"] == GeoJsonDict(obj.location.json)
else:
assert result["location"] == obj.location.ewkt
@pytest.mark.parametrize("geo_format", ("", "geojson"))
@pytest.mark.django_db
def test__additional_sign_real__detail(geo_format):
client = get_api_client()
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(parent=asr)
operation_1 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 5)
)
operation_2 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 15)
)
operation_3 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 10)
)
response = client.get(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}),
data={"geo_format": geo_format},
)
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(asr.parent.pk)
assert response_data["content"][0]["id"] == str(ascr.pk)
# verify operations are ordered by operation_date
operation_ids = [operation["id"] for operation in response_data["operations"]]
assert operation_ids == [operation_1.id, operation_3.id, operation_2.id]
if geo_format == "geojson":
assert response_data["location"] == GeoJsonDict(asr.location.json)
else:
assert response_data["location"] == asr.location.ewkt
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request doesn't raise
validation errors for missing content data and that the sign is created
successfully
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_201_CREATED
assert AdditionalSignReal.objects.count() == 1
assert AdditionalSignContentReal.objects.count() == 0
asr = AdditionalSignReal.objects.first()
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(data["parent"])
assert response_data["owner"] == str(data["owner"])
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request creates
AdditionalSignContent instances successfully
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
dt = get_traffic_control_device_type()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{"text": "Test content 1", "order": 1, "device_type": str(dt.pk)},
{"text": "Test content 2", "order": 2, "device_type": str(dt.pk)},
],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_201_CREATED
assert AdditionalSignReal.objects.count() == 1
asr = AdditionalSignReal.objects.first()
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(data["parent"])
assert response_data["owner"] == str(data["owner"])
assert AdditionalSignContentReal.objects.count() == 2
ascr_1 = asr.content.order_by("order").first()
assert ascr_1.text == "Test content 1"
assert ascr_1.order == 1
assert ascr_1.device_type.pk == dt.pk
ascr_2 = asr.content.order_by("order").last()
assert ascr_2.text == "Test content 2"
assert ascr_2.order == 2
assert ascr_2.device_type.pk == dt.pk
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request raises
an error if any of the content instances have a id defined.
Pre-existing content instances can not be assigned for newly
created additional signs.
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
dt = get_traffic_control_device_type()
ascr = get_additional_sign_content_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Test content",
"order": 1,
"device_type": str(dt.pk),
}
],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
asr = AdditionalSignReal.objects.exclude(pk=ascr.parent.pk).first()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [
{
"id": [
(
"Creating new additional sign with pre-existing "
"content instance is not allowed. Content objects "
'must not have "id" defined.'
)
]
}
]
}
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert not asr
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_incomplete_data(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request raises
validation error correctly if required data is missing.
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "Test content", "order": 1}],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [{"device_type": [_("This field is required.")]}]
}
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request update
is successful when content is not defined. Old content should
be deleted.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner(name_en="New owner").pk,
}
assert AdditionalSignContentReal.objects.count() == 1
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
assert AdditionalSignContentReal.objects.count() == 0
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request replaces
AdditionalSignContentReal instances when content does not have
id defined. A new content instance should be created.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
original_ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "New content", "order": 123, "device_type": str(dt.pk)}],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
new_ascr = asr.content.first()
content = response_data["content"][0]
assert content["id"] == str(new_ascr.pk)
assert content["text"] == "New content"
assert content["order"] == 123
assert not AdditionalSignContentReal.objects.filter(
pk=original_ascr.pk
).exists()
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert asr.content.count() == 1
original_ascr.refresh_from_db()
assert original_ascr.parent == asr
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request updates
AdditionalSignContent instances successfully when id is defined.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
content = response_data["content"][0]
assert content["id"] == str(ascr.pk)
assert content["text"] == "Updated content"
assert content["order"] == 100
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_unrelated_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request raises
validation error if content is not related to the parent
AdditionalSignReal.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(
parent=get_additional_sign_real(location=test_point_2_3d)
)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [
{
"id": [
(
"Updating content instances that do not belong to "
"this additional sign is not allowed."
)
]
}
]
}
assert ascr.parent != asr
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request update
is successful when content is not defined. Old content should
not be deleted.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner(name_en="New owner").pk,
}
assert AdditionalSignContentReal.objects.count() == 1
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
assert AdditionalSignContentReal.objects.count() == 1
assert asr.content.exists()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request replaces
AdditionalSignContentReal instances when content does not have
id defined. A new content instance should be created.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
original_ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "New content", "order": 123, "device_type": str(dt.pk)}],
}
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
| |
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Copyright (c) 2018, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by <NAME>, <EMAIL>.
LLNL-CODE-755518.
All rights reserved.
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Dispatch algorithm for grid services
<NAME> <EMAIL> 925-423-8982
Read device round trip efficincy (eff), ISO energy price (price), elasticity (strike),
and charge state at midnight (charge[0]). Compute optimal value for pairs of
charge-discharge timesteps [i,j].
-*- coding: utf-8 -*-
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
#from numba import jit
#from numpy import arange
#Constants
INF = 999999
timesteps = 288
nChg = 3 #Number of timesteps required to charge or discharge (< timesteps/2)
tFull = 500 #Time when system must have full charge
# Read data from csv files
# Read prices
price = np.genfromtxt('CAISO Apr 2016.csv', delimiter = ',' )
#print('Prices (price):', price)
#
"""
Read round trip eff and strike for pairs of charge & discharge hours
eff and strike are square matrices of dimensions [timesteps, timesteps]
elements on diagonal and above are for charge in timestep i and discharge in j
elements below the diagonal are for dicharge in timestep i and charge in j
"""
eff = np.genfromtxt('Round trip efficiency.csv', delimiter = ',' )
#print('Round trip efficiency (eff):', eff)
#Read charge state - for this implementation just use state at period 0
charged = np.genfromtxt('Charge state off.csv', delimiter = ',' )
#print('Charge state:', charged)
# Read price elasitity (strike price) file
strike = np.genfromtxt('Strike price.csv', delimiter = ',' )
#print('Strike:', strike)
# Construct profit table: profit = -price(charge) + eff(i,j)*price(discharge)
# Fill profit matrix with NaNs
profit = np.full([timesteps, timesteps], np.nan)
#
"""
Profit depends upon order of charge and discharge
if charge period i < discharge period j it must be discharged at midnight
if charge period i > discharge period j it must be charged at midnight
"""
for i in range(timesteps):
for j in range(timesteps):
profit[i,j] = -price[i] + eff[i,j]*price[j]
#print(i, j, profit[i,j], price[i], eff[i,j])
# Construct value table: value = profit - strike(i,j)
value = np.full([timesteps, timesteps], np.nan)
for i in range(timesteps):
for j in range(timesteps):
value[i,j] = profit[i,j] - strike[i,j]
#print(i, j, profit[i,j], strike[i,j], value[i,j])
def plotCurve(curve, title, xLabel, yLabel):
hour = range(timesteps)
plt.plot(hour, curve, label = title)
plt.legend()
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.grid(b=True, which='both')
plt.show()
def plotHeatMap(matrix, xLabel, yLabel, scaleLabel):
import matplotlib.cm as cm
#cmap = cm.get_cmap('nipy_spectral_r')
#cmap = cm.get_cmap('plasma_r')
cmap = cm.get_cmap('RdYlGn')
nx,ny = np.shape(matrix)
cs = plt.pcolor(matrix, cmap=cmap)
cb = plt.colorbar(cs,orientation = 'vertical')
cb.set_label(scaleLabel)
plt.xlim(0,nx)
plt.ylim(0,ny)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.grid(True)
plt.show()
return
def oneCycleNPer(INF, nChg, value, startT, stopT, tFull):
# One daily cycle with nChg period contiguous charge and discharge times
# Array indexs first period of chorge or discharge event
nDim = stopT - startT
print('nDim = ', nDim)
#Dimension matrices and fill with NANs
nProfit = np.full([nDim, nDim], np.nan)
nValue = np.full([nDim, nDim], np.nan)
# Compute profit array for charge period and discharge period lagged by nChg
# or more time periods
if charged[startT] == 0: # Discharged at start of period
for i in range(nDim - 2*nChg + 2):
for j in range (i + nChg, nDim - nChg + 2):
nProfit[i, j] = profit[startT + i, startT + j]
nValue[i, j] = value[startT + i, startT + j]
for k in range(1, nChg):
nProfit[i, j] = nProfit[i , j] + profit[startT + i + k, startT + j + k]
nValue[i, j] = nValue[i, j] + value[startT + i + k, startT + j + k]
#Test to see if charge too late, after time tFull
if i > (tFull - nChg):
nValue[i,j] = - INF
#Test to see if charged then discharged before time tFull
if j > tFull - nChg:
nValue[i,j] = - INF
# Find charge-discharge hours with maximum value for single cycle
maxValue = -INF
for i in range(nDim):
for j in range(i + nChg, nDim - nChg + 1):
if nValue[i,j] > maxValue:
maxValue = nValue[i,j]
chargeMax = i
dischargeMax = j
charged[stopT] = 1
else: # Charged at start of period (just change i and j indices in loops)
for j in range(nDim - 2*nChg + 2):
for i in range (j + nChg, nDim - nChg + 2):
nProfit[i, j] = profit[startT + i, startT + j]
nValue[i, j] = value[startT + i, startT + j]
for k in range(1, nChg):
nProfit[i, j] = nProfit[i , j] + profit[startT + i + k, startT + j + k]
nValue[i, j] = nValue[i, j] + value[startT + i + k, startT + j + k]
#Test to see if discharged before time tFull and charged after time tFull
if j > tFull:
if i > (tFull - nChg):
nValue[i,j] = - INF
# Find charge-discharge hours with maximum value for single cycle
maxValue = -INF
for j in range(nDim):
for i in range(j + nChg, nDim - nChg + 1):
if nValue[i,j] > maxValue:
maxValue = nValue[i,j]
chargeMax = i
dischargeMax = j
charged[stopT] = 1
return(chargeMax + startT, dischargeMax + startT, maxValue, charged[stopT])
plotCurve(price, 'CAISO Price Mar 2016', 'time', '$/MWh')
plotHeatMap(eff, 'Discharge time', 'Charge time', 'Round Trip efficiency (%)')
plotHeatMap(strike, 'Discharge time', 'Charge time', 'Strike price-elasticity ($/MWh)')
plotHeatMap(profit, 'Discharge time', 'Charge time', 'Energy arbitrage profit ($/MWh)')
plotHeatMap(value, 'Discharge time', 'Charge time', 'Value = Profit - Strike price ($/MWh)')
#Call oneCycleNPer for single daily cycle
startT = 0
stopT = timesteps - 1
chargeMax, dischargeMax, maxValue, charged[stopT] = oneCycleNPer(INF, nChg, value, startT, stopT, tFull)
print('startT = ', startT, 'stopT = ', stopT)
print('Charge state at beginning of period = ', charged[startT] )
print('Charge state at end of period = ', charged[stopT] )
print('Solution for single daily cycle ', nChg, 'timesteps to charge')
print('Max value = ', maxValue, '$/MWh')
print('Charge time = ', chargeMax)
print('Discharge time = ', dischargeMax)
print()
# Write dispatch orders for single daily cycle to file
dispatch = open('dispatchOrders.csv', 'a')
header = ['Single cycle output:, timesteps, startT, stopT, charged[startT], nChg, tFull, maxValue, chargeMax, dischargeMax \n']
dispatch.writelines( header )
data = [str(timesteps), '\n', str(startT), '\n', str(stopT), '\n', str(charged[startT]), '\n',
str(nChg), '\n', str(tFull), '\n', str(maxValue), '\n', str(chargeMax), '\n', str(dischargeMax), '\n']
dispatch.writelines( data )
#Call oneCycleNPer twice for the day
#Call oneCycleNPer for first cycle
startT = 0
stopT = int(timesteps/2) - 1
chargeMax, dischargeMax, maxValue, charged[stopT] = oneCycleNPer(INF, nChg, value, startT, stopT, tFull)
print('startT = ', startT, 'stopT = ', stopT)
print('Charge state at beginning of period = ', charged[startT] )
print('Charge state at end of period = ', charged[stopT] )
print('Solution for single cycle ', nChg, 'timesteps to charge')
print('Max value = ', maxValue, '$/MWh')
print('Charge time = ', chargeMax)
print('Discharge time = ', dischargeMax)
print()
# Write dispatch orders for first cycle to file
dispatch = open('dispatchOrders.csv', 'a')
header = ['First cycle output:, timesteps, startT, stopT, charged[startT], nChg, tFull, maxValue, chargeMax, dischargeMax \n']
dispatch.writelines( header )
data = [str(timesteps), '\n', str(startT), '\n', str(stopT), '\n', str(charged[startT]), '\n',
str(nChg), '\n', str(tFull), '\n', str(maxValue), '\n', str(chargeMax), '\n', str(dischargeMax), '\n']
dispatch.writelines( data )
#Call oneCycleNPer for second cycle
startT = int(timesteps/2)
#Charge state at beginning of period = charge state from previous period
charged[startT] = charged[stopT]
stopT = timesteps - 1
chargeMax, dischargeMax, maxValue, charged[stopT] = oneCycleNPer(INF, nChg, value, startT, stopT, tFull)
print('startT = ', startT, 'stopT = ', stopT)
print('Charge state at beginning of period = ', charged[startT] )
print('Charge state at end of period = ', charged[stopT] )
print('Solution for single cycle ', nChg, 'timesteps to charge')
print('Max value = ', maxValue, '$/MWh')
print('Charge time = ', chargeMax)
print('Discharge time = ', dischargeMax)
print()
# Write dispatch orders for second cycle to file
dispatch = open('dispatchOrders.csv', 'a')
header = ['Second cycle output:, timesteps, startT, stopT, charged[startT], nChg, tFull, maxValue, chargeMax, dischargeMax \n']
dispatch.writelines( header )
data = [str(timesteps), '\n', str(startT), '\n', str(stopT), '\n', str(charged[startT]), '\n',
str(nChg), '\n', str(tFull), '\n', str(maxValue), '\n', str(chargeMax), '\n', str(dischargeMax), '\n']
dispatch.writelines( data )
dispatch.close()
#Phase 1.1:
#Make data files to illustate 5 minute dispatch. Include some structure like varying
# round trip efficiwency and strike prices to reflect consumer behavior such as
# no discharge before morning or evening consumer demands for hot water or PEV readiness
#Phase 2:
#Build API to interface with devices - get round trip efficiency and charge time,
# and return dispatch orders
#Phase 3:
#Build mixed integer optimization model with parameters specified by
# device modelers and interface to open source MIP solver
# Allow non-integer charge state at time 0
# | |
<reponame>shadofren/deeposlandia<filename>tests/test_generators.py
"""Unit test related to the generator building and feeding
"""
import pytest
import numpy as np
from deeposlandia import generator, utils
def test_feature_detection_labelling_concise():
"""Test `feature_detection_labelling` function in `generator` module by considering a concise
labelling, *i.e.* all labels are represented into the array:
* as a preliminary verification, check if passing string labels raises an AttributeError
exception
* test if output shape is first input shape (batch size) + an additional dimension given by the
`label_ids` length
* test if both representation provides the same information (native array on the first hand and
its one-hot version on the second hand)
"""
a = np.array([[[[10, 10, 200], [10, 10, 200], [10, 10, 200]],
[[200, 200, 200], [200, 200, 200], [10, 10, 200]],
[[200, 200, 200], [200, 200, 200], [200, 200, 200]]],
[[[10, 200, 10], [10, 200, 10], [10, 10, 200]],
[[200, 10, 10], [10, 200, 10], [10, 10, 200]],
[[10, 200, 10], [200, 10, 10], [10, 10, 200]]]])
labels = np.unique(a.reshape(-1, 3), axis=0).tolist()
wrong_config = [{'id': '0', 'color': [10, 10, 200], 'is_evaluate': True},
{'id': '1', 'color': [200, 10, 10], 'is_evaluate': True},
{'id': '2', 'color': [10, 200, 10], 'is_evaluate': True},
{'id': '3', 'color': [200, 200, 200], 'is_evaluate': True}]
with pytest.raises(ValueError):
b = generator.feature_detection_labelling(a, wrong_config)
config = [{'id': 0, 'color': [10, 10, 200], 'is_evaluate': True},
{'id': 1, 'color': [200, 10, 10], 'is_evaluate': True},
{'id': 2, 'color': [10, 200, 10], 'is_evaluate': True},
{'id': 3, 'color': [200, 200, 200], 'is_evaluate': True}]
b = generator.feature_detection_labelling(a, config)
assert b.shape == (a.shape[0], len(labels))
assert b.tolist() == [[True, False, False, True],
[True, True, True, False]]
def test_feature_detection_labelling_sparse():
"""Test `feature_detection_labelling` function in `generator` module by considering a sparse
labelling, *i.e.* the array contains unknown values (to mimic the non-evaluated label
situations):
* as a preliminary verification, check if passing string labels raises an AttributeError
exception
* test if label length is different from the list of values in the array
* test if output shape is first input shape (batch size) + an additional dimension given by the
`label_ids` length
* test if both representation provides the same information (native array on the first hand and
its one-hot version on the second hand)
"""
a = np.array([[[[10, 10, 200], [10, 10, 200], [10, 10, 200], [200, 10, 10]],
[[200, 200, 200], [200, 200, 200], [10, 10, 200], [200, 10, 10]],
[[200, 200, 200], [200, 200, 200], [200, 200, 200], [10, 10, 200]],
[[200, 200, 200], [200, 200, 200], [200, 200, 200], [10, 10, 200]]],
[[[200, 10, 10], [200, 10, 10], [10, 200, 10], [200, 10, 10]],
[[200, 200, 200], [10, 200, 10], [10, 200, 10], [10, 200, 10]],
[[200, 10, 10], [200, 10, 10], [200, 10, 10], [200, 200, 200]],
[[200, 10, 10], [200, 10, 10], [10, 200, 10], [200, 200, 200]]]])
labels = np.unique(a.reshape(-1, 3), axis=0).tolist()[:-1]
wrong_config = [{'id': '0', 'color': [10, 10, 200], 'is_evaluate': True},
{'id': '1', 'color': [200, 10, 10], 'is_evaluate': True},
{'id': '2', 'color': [10, 200, 10], 'is_evaluate': True}]
with pytest.raises(ValueError):
b = generator.feature_detection_labelling(a, wrong_config)
config = [{'id': 0, 'color': [10, 10, 200], 'is_evaluate': True},
{'id': 1, 'color': [200, 10, 10], 'is_evaluate': True},
{'id': 2, 'color': [10, 200, 10], 'is_evaluate': True}]
b = generator.feature_detection_labelling(a, config)
assert len(labels) != np.amax(a) - np.amin(a) + 1
assert b.tolist() == [[True, True, False],
[False, True, True]]
assert b.shape == (a.shape[0], len(labels))
def test_featdet_mapillary_generator(mapillary_image_size,
mapillary_sample,
mapillary_sample_config,
nb_channels):
"""Test the data generator for the Mapillary dataset
"""
BATCH_SIZE = 10
config = utils.read_config(mapillary_sample_config)
label_ids = [x['id'] for x in config["labels"]]
gen = generator.create_generator("mapillary", "feature_detection",
mapillary_sample,
mapillary_image_size,
BATCH_SIZE,
config["labels"])
item = next(gen)
assert(len(item)==2)
im_shape = item[0].shape
assert im_shape == (BATCH_SIZE, mapillary_image_size, mapillary_image_size, nb_channels)
label_shape = item[1].shape
assert label_shape == (BATCH_SIZE, len(label_ids))
def test_featdet_shape_generator(shapes_image_size, shapes_sample, shapes_sample_config, nb_channels):
"""Test the data generator for the shape dataset
"""
BATCH_SIZE = 10
config = utils.read_config(shapes_sample_config)
label_ids = [x['id'] for x in config["labels"]]
gen = generator.create_generator("shapes", "feature_detection", shapes_sample, shapes_image_size, BATCH_SIZE, config["labels"])
item = next(gen)
assert len(item) == 2
im_shape = item[0].shape
assert im_shape == (BATCH_SIZE, shapes_image_size, shapes_image_size, nb_channels)
label_shape = item[1].shape
assert label_shape == (BATCH_SIZE, len(label_ids))
def test_semantic_segmentation_labelling_concise():
"""Test `semantic_segmentation_labelling` function in `generator` module by considering a
concise labelling, *i.e.* the labels correspond to array values
* as a preliminary verification, check if passing string labels raises an AttributeError
exception
* test if output shape is input shape + an additional dimension given by the
`label_ids` length
* test if both representation provides the same information (native array on the
first hand and its one-hot version on the second hand)
"""
a = np.array([[[[200, 10, 10], [200, 10, 10], [200, 200, 200]],
[[200, 200, 200], [200, 200, 200], [200, 10, 10]],
[[200, 200, 200], [200, 200, 200], [200, 200, 200]]],
[[[200, 10, 10], [200, 10, 10], [10, 10, 200]],
[[10, 200, 10], [10, 200, 10], [10, 10, 200]],
[[200, 10, 10], [200, 10, 10], [10, 10, 200]]]])
labels = np.unique(a.reshape(-1, 3), axis=0).tolist()
wrong_config = [{'id': '0', 'color': [10, 10, 200], 'is_evaluate': True},
{'id': '1', 'color': [200, 10, 10], 'is_evaluate': True},
{'id': '2', 'color': [10, 200, 10], 'is_evaluate': True},
{'id': '3', 'color': [200, 200, 200], 'is_evaluate': True}]
asum, _ = np.histogram(a.reshape(-1), range=(np.amin(a), np.amax(a)))
with pytest.raises(ValueError):
b = generator.semantic_segmentation_labelling(a, wrong_config)
config = [{'id': 0, 'color': [10, 10, 200], 'is_evaluate': True},
{'id': 1, 'color': [200, 10, 10], 'is_evaluate': True},
{'id': 2, 'color': [10, 200, 10], 'is_evaluate': True},
{'id': 3, 'color': [200, 200, 200], 'is_evaluate': True}]
b = generator.semantic_segmentation_labelling(a, config)
assert b.shape == (a.shape[0], a.shape[1], a.shape[2], len(labels))
assert b.tolist() == [[[[False, True, False, False],
[False, True, False, False],
[False, False, False, True]],
[[False, False, False, True],
[False, False, False, True],
[False, True, False, False]],
[[False, False, False, True],
[False, False, False, True],
[False, False, False, True]]],
[[[False, True, False, False],
[False, True, False, False],
[True, False, False, False]],
[[False, False, True, False],
[False, False, True, False],
[True, False, False, False]],
[[False, True, False, False],
[False, True, False, False],
[True, False, False, False]]]]
def test_semantic_segmentation_labelling_sparse():
"""Test `semantic_segmentation_labelling` function in `generator` module by considering a
sparse labelling, *i.e.* the array contains unknown values (to mimic the non-evaluated label
situations)
* as a preliminary verification, check if passing string labels raises an AttributeError
exception
* test if output shape is input shape + an additional dimension given by the
`label_ids` length
* test if both representation provides the same information (native array on the
first hand and its one-hot version on the second hand)
"""
a = np.array([[[[200, 10, 10], [200, 10, 10], [200, 200, 200]],
[[200, 200, 200], [200, 200, 200], [200, 10, 10]],
[[200, 200, 200], [100, 100, 100], [200, 200, 200]]],
[[[200, 10, 10], [200, 10, 10], [10, 10, 200]],
[[200, 200, 200], [100, 100, 100], [10, 10, 200]],
[[200, 10, 10], [200, 10, 10], [10, 10, 200]]]])
asum, _ = np.histogram(a.reshape(-1), range=(np.amin(a), np.amax(a)))
wrong_config = [{'id': '0', 'color': [10, 10, 200], 'is_evaluate': True},
{'id': '2', 'color': [10, 200, 10], 'is_evaluate': True},
{'id': '3', 'color': [200, 200, 200], 'is_evaluate': True}]
with pytest.raises(ValueError):
b = generator.semantic_segmentation_labelling(a, wrong_config)
config = [{'id': 0, 'color': [10, 10, 200], 'is_evaluate': True},
{'id': 2, 'color': [10, 200, 10], 'is_evaluate': True},
{'id': 3, 'color': [200, 200, 200], 'is_evaluate': True}]
labels = [item["id"] for item in config]
b = generator.semantic_segmentation_labelling(a, config)
assert len(labels) != np.amax(a) - np.amin(a) + 1
assert b.shape == (a.shape[0], a.shape[1], a.shape[2], len(labels))
assert b.tolist() == [[[[False, False, False],
[False, False, False],
[False, False, True]],
[[False, False, True],
[False, False, True],
[False, False, False]],
[[False, False, True],
[False, False, False],
[False, False, True]]],
[[[False, False, False],
[False, False, False],
[True, False, False]],
[[False, False, True],
[False, False, False],
[True, False, False]],
[[False, False, False],
[False, False, False],
[True, False, False]]]]
def test_semseg_mapillary_generator(mapillary_image_size,
mapillary_sample,
mapillary_sample_config,
nb_channels):
"""Test the data generator for the Mapillary dataset
"""
BATCH_SIZE = 10
config = utils.read_config(mapillary_sample_config)
label_ids = [x['id'] for x in config["labels"]]
gen = generator.create_generator("mapillary", "semantic_segmentation",
mapillary_sample,
mapillary_image_size,
BATCH_SIZE, config["labels"])
item = next(gen)
assert(len(item)==2)
im_shape = item[0].shape
assert im_shape == (BATCH_SIZE, mapillary_image_size, mapillary_image_size, nb_channels)
| |
"slack": "https://openmoneyico.slack.com",
"telegram": "https://t.me/joinchat/FDNbh0M079p5fnfOHFEJaw",
"twitter": "https://twitter.com/OpenPlatformICO",
"youtube": ""
}
},
"ZEUS": {
"symbol": "ZEUS",
"name": "ZeusNetwork",
"type": "ERC20",
"address": "0xe7E4279b80D319EDe2889855135A22021baf0907",
"ens_address": "",
"decimals": 18,
"website": "https://zeusfundme.com/",
"logo": {
"src": "https://zeusfundme.com/wp-content/uploads/2018/10/website-logo-e1540768468436.png",
"width": "626",
"height": "313",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/ZEUS-coin",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "http://t.me/zeuscrowdfundingofficial",
"twitter": "http://twitter.com/network_zeus",
"youtube": ""
}
},
"3LT": {
"symbol": "3LT",
"type": "ERC20",
"address": "0x430241368c1D293fdA21DBa8Bb7aF32007c59109",
"decimals": 8,
"name": "TrillionToken",
"ens_address": "",
"website": "https://3lt.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"LOCUS": {
"symbol": "LOCUS",
"address": "0xC64500DD7B0f1794807e67802F8Abbf5F8Ffb054",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://www.locuschain.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": " <EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CDL": {
"symbol": "CDL",
"address": "0x8a95ca448A52C0ADf0054bB3402dC5e09CD6B232",
"decimals": 18,
"name": "Confideal",
"ens_address": "",
"website": "https://confideal.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/confideal.io",
"forum": "",
"github": "https://github.com/confideal",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/11040676",
"reddit": "https://www.reddit.com/r/Confideal",
"slack": "",
"telegram": "https://t.me/confidealioeng",
"twitter": "https://twitter.com/confideal_io",
"youtube": ""
}
},
"SET": {
"symbol": "SET",
"address": "0xe06eda7435bA749b047380CEd49121ddE93334Ae",
"decimals": 0,
"name": "SET",
"ens_address": "",
"website": "http://sydeth.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/SydEthereum/meetup-token#meetup-token",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://chat.sydeth.com",
"twitter": "https://twitter.com/sydethereum",
"youtube": ""
}
},
"SCRL": {
"symbol": "SCRL",
"name": "SCRL",
"type": "ERC20",
"address": "0x24DCc881E7Dd730546834452F21872D5cb4b5293",
"ens_address": "",
"decimals": 18,
"website": "https://www.scroll.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@solutech.scrolla",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/OfficialScroll",
"youtube": ""
}
},
"TRDT": {
"symbol": "TRDT",
"name": "<NAME>",
"type": "ERC20",
"address": "0x33f90Dee07c6E8B9682dD20F73E6C358B2ED0f03",
"ens_address": "",
"decimals": 0,
"website": "https://www.tridentgroup.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@TrustTheTrident",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/TrustTheTrident",
"youtube": ""
}
},
"DIP": {
"symbol": "DIP",
"address": "0xc719d010B63E5bbF2C0551872CD5316ED26AcD83",
"decimals": 18,
"name": "Decentralized Insurance Protocol",
"ens_address": "",
"website": "",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"ECP": {
"symbol": "ECP",
"name": "ECrypto Coin",
"type": "ERC20",
"address": "0x8869b1F9bC8B246a4D7220F834E56ddfdd8255E7",
"ens_address": "",
"decimals": 18,
"website": "https://ecryptotokens.com",
"logo": {
"src": "https://ecryptotokens.com/images/resources/logo.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/ecryptopayOffical",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"FTI": {
"symbol": "FTI",
"name": "FansTime",
"type": "ERC20",
"address": "0x943ED852DadB5C3938ECdC6883718df8142DE4C8",
"ens_address": "",
"decimals": 18,
"website": "https://fanstime.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/FansTime_FTI",
"youtube": ""
}
},
"LATX": {
"symbol": "LATX",
"name": "LatiumX",
"type": "ERC20",
"address": "0x2f85E502a988AF76f7ee6D83b7db8d6c0A823bf9",
"ens_address": "",
"decimals": 8,
"website": "https://latium.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Latium",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/LatiumCoin",
"youtube": ""
}
},
"BCDN": {
"symbol": "BCDN",
"address": "0x1e797Ce986C3CFF4472F7D38d5C4aba55DfEFE40",
"decimals": 15,
"name": "BlockCDN",
"ens_address": "",
"website": "https://www.blockcdn.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/Blockcdnteam",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/BlockCDN",
"youtube": ""
}
},
"SXDT": {
"symbol": "SXDT",
"address": "0x12B306fA98F4CbB8d4457FdFf3a0A0a56f07cCdf",
"decimals": 18,
"name": "Spectre.ai D-Token",
"ens_address": "",
"website": "http://www.spectre.ai",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/teamspectreai",
"chat": "",
"facebook": "https://www.facebook.com/spectrepage",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/joinchat/GjkGkw7IfwUVuPiWxctD4g",
"twitter": "https://twitter.com/SpectreAI",
"youtube": ""
}
},
"FLIXX": {
"symbol": "FLIXX",
"address": "0xf04a8ac553FceDB5BA99A64799155826C136b0Be",
"decimals": 18,
"name": "Flixxo",
"ens_address": "",
"website": "https://www.flixxo.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": "<EMAIL>"
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Flixxo",
"slack": "",
"telegram": "t.me/flixxo",
"twitter": "https://twitter.com/flixxo",
"youtube": ""
}
},
"XOV": {
"symbol": "XOV",
"name": "XOVBank",
"type": "ERC20",
"address": "0x153eD9CC1b792979d2Bde0BBF45CC2A7e436a5F9",
"ens_address": "",
"decimals": 18,
"website": "http://www.xov.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Xovercoin",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/xovercoin",
"youtube": ""
}
},
"CARD": {
"symbol": "CARD",
"address": "0x1ed2B1eaEd8e968bc36EB90a914660A71827A5E9",
"decimals": 0,
"name": "<NAME>",
"ens_address": "",
"website": "https://cardstack.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/cardstack",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/cardstack",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://telegram.me/cardstack",
"twitter": "https://twitter.com/cardstack",
"youtube": ""
}
},
"PMA": {
"symbol": "PMA",
"name": "PumaPay",
"type": "ERC20",
"address": "0x846C66cf71C43f80403B51fE3906B3599D63336f",
"ens_address": "",
"decimals": 18,
"website": "https://pumapay.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://blog.pumapay.io/",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/PumaPay",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/PumaPay",
"youtube": ""
}
},
"SKIN": {
"symbol": "SKIN",
"address": "0x2bDC0D42996017fCe214b21607a515DA41A9E0C5",
"decimals": 6,
"name": "SKIN",
"ens_address": "",
"website": "https://skincoin.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/Steamtradenet/smart-contract",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"DGX1": {
"symbol": "DGX1",
"address": "0x55b9a11c2e8351b4Ffc7b11561148bfaC9977855",
"decimals": 9,
"name": "Digix Gold Token 1.0",
"ens_address": "",
"website": "https://digix.global",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/DigixGlobal",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CDX": {
"symbol": "CDX",
"address": "0x6fFF3806Bbac52A20e0d79BC538d527f6a22c96b",
"decimals": 18,
"name": "Commodity Ad Network",
"ens_address": "",
"website": "https://cdxnet.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://commodityadnetwork.com/contact"
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"SSP": {
| |
<reponame>Riusuky/udacity-full-stack
from redis import Redis
from oauth2client import client, crypt
import time
import sys
import os
from functools import update_wrapper
from flask import request, g, make_response
from flask import Flask, jsonify, render_template
from models import User, Category, Image, Item, engine
from models import uploaded_images
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, func
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import DataError
from sqlalchemy.exc import InternalError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import InvalidRequestError
from flask import session as login_session
from flask_uploads import configure_uploads, UploadNotAllowed
import random
import string
redis = Redis()
db_session = sessionmaker()
db_session.configure(bind=engine)
session = db_session()
app = Flask(__name__)
# Configure image upload destination
app.config['UPLOADED_IMAGES_DEST'] = 'static/item_images'
app.config['UPLOADED_IMAGES_URL'] = 'item_images/'
configure_uploads(app, uploaded_images)
def Response(response={}, error=None, response_code=200):
"""Helper function to generate a request response."""
if response_code == 200 and error:
response_code = 500
return (jsonify(
error if error
else response),
response_code)
def generate_state():
"""Generates a random token for session state."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
def requires_authentication(f):
"""Decorator that checks if the user is currently logged in."""
def decorator(*args, **kwargs):
if 'userid' not in login_session:
return Response(
error='Request requires authentication', response_code=401)
return f(*args, **kwargs)
return update_wrapper(decorator, f)
def requires_state_consistency(f):
"""Decorator taht checks if request state parameters is correct."""
def decorator(*args, **kwargs):
if ('state' not in request.headers)\
or (request.headers['state'] != login_session['state']):
return Response(error='Invalid state parameter',
response_code=401)
return f(*args, **kwargs)
return update_wrapper(decorator, f)
def check_authorization(userid):
if 'userid' in login_session:
return login_session['userid'] == userid
return False
def get_user(email):
"""Searches and returns the user that corresponds to the given email."""
try:
return session.query(User).filter(
User.email == email).one()
except NoResultFound:
return None
except:
return None
def add_user(email, name):
"""Creates user entry."""
try:
new_user = User(email=email, name=name)
session.add(new_user)
session.commit()
return new_user
except IntegrityError:
session.rollback()
return None
except InvalidRequestError as e:
print(e)
return None
except:
print("Unexpected error:", sys.exc_info()[0])
return None
def get_categories(id=None):
"""If id is set, then it returns the category that matches the id,
else it returns all category entries."""
if id is not None:
try:
response = Response(session.query(Category).filter(
Category.id == id).one().tojson())
except NoResultFound:
response = Response([])
except:
response = Response(
error='Unknown error',
response_code=500)
return response
else:
category_list = []
for category in session.query(Category).all():
category_list.append(category.tojson())
return Response(category_list) if len(category_list) > 0 \
else Response([])
@requires_authentication
@requires_state_consistency
def add_category(name):
"""Creates category entry."""
owner_id = login_session['userid']
try:
newCategory = Category(name=name, owner_id=owner_id)
session.add(newCategory)
session.commit()
return Response(newCategory.tojson())
except IntegrityError:
session.rollback()
return Response(
error='Invalid new category parameters. '
'(name: {}, owner_id: {})'.format(name, owner_id),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to add new category',
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to add new category',
response_code=500)
@requires_authentication
@requires_state_consistency
def delete_category(id):
"""Deletes category that matches given id."""
try:
category = session.query(Category).filter(Category.id == id).one()
if not check_authorization(category.owner_id):
return Result(
error='Permission denied',
response_code=401)
session.delete(category)
session.commit()
return Response('Success')
except DataError:
return Response(
error='Category id is not an integer',
response_code=400)
except NoResultFound:
return Response(
error='There is no category that corresponds '
'to the specified id. (id: {})'.format(id),
response_code=400)
except InternalError as e:
print(e)
return Response(
error='Failed to delete category with id: {}'.format(id),
response_code=500)
except IntegrityError as e:
print(e)
session.rollback()
return Response(
error='Failed to delete category with id: {}'.format(id),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to delete category with id: {}'.format(id),
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to delete category with id: {}'.format(id),
response_code=500)
def get_items(id=None):
"""If id is set, then it returns the item that matches the id,
else it returns all item entries."""
if id is not None:
try:
response = Response(session.query(Item).filter(
Item.id == id).one().tojson())
except NoResultFound:
response = Response([])
except:
response = Response(
error='Unknown error',
response_code=500)
return response
else:
item_list = []
for item in session.query(Item).all():
item_list.append(item.tojson())
return Response(item_list) if len(item_list) > 0\
else Response([])
@requires_authentication
@requires_state_consistency
def add_item(name, category_id, description=None, image_id=None):
"""Creates item entry."""
owner_id = login_session['userid']
# This ensures that the category set is also owned by the user
try:
category = session.query(Category)\
.filter(Category.id == category_id).one()
except NoResultFound:
return Response(
error='Invalid category id',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
if not check_authorization(category.owner_id):
return Result(
error='Permission denied for given category id.',
response_code=401)
# This ensures that the image set (if set) is also owned by the user
if image_id:
try:
image = session.query(Image)\
.filter(Image.id == image_id).one()
except NoResultFound:
return Response(
error='Invalid image id',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
if not check_authorization(image.owner_id):
return Result(
error='Permission denied for given image id.',
response_code=401)
try:
newItem = Item(
name=name,
description=description,
owner_id=owner_id,
category_id=category_id,
image_id=image_id)
session.add(newItem)
session.commit()
return Response(newItem.tojson())
except IntegrityError as e:
print(e)
session.rollback()
return Response(
error='Invalid new item parameters. '
'(name: {}, '
'description: {}, '
'category_id: {}, '
'owner_id: {}, '
'image_id: {})'
.format(name,
description,
category_id,
owner_id,
image_id),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to add new item',
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to add new item',
response_code=500)
@requires_authentication
@requires_state_consistency
def update_item(
id,
name=None,
category_id=None,
description=None,
image_id=None):
"""Updates item entry."""
try:
target = session.query(Item).filter(Item.id == id).one()
except NoResultFound:
return Response(error='No result found',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
else:
if not check_authorization(target.owner_id):
return Result(
error='Permission denied',
response_code=401)
updated = False
if name and (target.name != name):
target.name = name
updated = True
if category_id and (target.category_id != category_id):
# This ensures that the category set is also owned by the user
try:
category = session.query(Category)\
.filter(Category.id == category_id).one()
except NoResultFound:
return Response(
error='Invalid category id',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
if not check_authorization(category.owner_id):
return Result(
error='Permission denied for given category id.',
response_code=401)
target.category_id = category_id
updated = True
if description and (target.description != description):
target.description = description
updated = True
if image_id and (target.image_id != image_id):
# This ensures that the image set
# (if set) is also owned by the user
try:
image = session.query(Image)\
.filter(Image.id == image_id).one()
except NoResultFound:
return Response(
error='Invalid image id',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
if not check_authorization(image.owner_id):
return Result(
error='Permission denied for given image id.',
response_code=401)
target.image_id = image_id
updated = True
if updated:
target.created_on = func.now()
try:
session.add(target)
session.commit()
return Response('Success')
except IntegrityError as e:
print(e)
session.rollback()
return Response(
error='Invalid new item parameters. '
'(name: {}, '
'description: {}, '
'category_id: {}, '
'image_id: {})'
.format(name, description, category_id, image_id),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to update item',
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to update item',
response_code=500)
else:
return Response('Item has not changed')
@requires_authentication
@requires_state_consistency
def delete_item(id):
"""Deletes item that matches given id."""
try:
target = session.query(Item).filter(Item.id == id).one()
if not check_authorization(target.owner_id):
return Result(
error='Permission denied',
response_code=401)
image = target.image
session.delete(target)
session.commit()
return Response('Success')
except DataError:
return Response(
error='Item id is not an integer',
response_code=400)
except NoResultFound:
return Response(
error='There is no item that corresponds '
'to the specified id. (id: {})'.format(id),
response_code=400)
except InternalError as e:
print(e)
return Response(
error='Failed to delete item with id: {}'.format(id),
response_code=500)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to delete item with id: {}'.format(id),
response_code=500)
@requires_authentication
@requires_state_consistency
def add_image(path):
"""Create image entry."""
owner_id = login_session['userid']
try:
newimage = Image(path=path, owner_id=owner_id)
session.add(newimage)
session.commit()
return Response(newimage.tojson())
except IntegrityError as e:
print(e)
session.rollback()
return Response(
error='Invalid new image parameter. (path: {})'
.format(path),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to add new image',
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to add new image',
response_code=500)
@requires_authentication
@requires_state_consistency
def update_image(id, path=None):
"""Updates image path entry.
It also deletes image associated with old path."""
if not path:
return Response(error='Missing path parameter',
response_code=400)
try:
target = session.query(Image).filter(Image.id == id).one()
except NoResultFound:
return Response(error='No result found',
response_code=400)
except:
return Response(
error='Unknown error',
response_code=500)
else:
if not check_authorization(target.owner_id):
return Result(
error='Permission denied',
response_code=401)
if path == target.path:
return Response('Item has not changed')
file_path = uploaded_images.path(target.path)
os.remove(file_path)
target.path = path
try:
session.add(target)
session.commit()
return Response('Success')
except IntegrityError as e:
print(e)
session.rollback()
return Response(
error='Invalid new image parameters. (path: {})'
.format(path),
response_code=400)
except InvalidRequestError as e:
print(e)
return Response(
error='Failed to update image',
response_code=400)
except:
print("Unexpected error:", sys.exc_info()[0])
return Response(
error='Failed to update image',
response_code=500)
@requires_authentication
@requires_state_consistency
def delete_image(id):
"""Deletes image and image file that matches given id."""
try:
target = session.query(Image).filter(Image.id == id).one()
if not check_authorization(target.owner_id):
return Result(
error='Permission denied',
response_code=401)
file_path = uploaded_images.path(target.path)
os.remove(file_path)
session.delete(target)
session.commit()
return Response('Success')
except DataError:
return Response(
error='Image id is not an integer',
| |
<reponame>jtyr/ansible-yaml_dynamic_inventory
#!/usr/bin/env python
try:
import ConfigParser as configparser
except:
import configparser
import argparse
import glob
import json
import logging
import os
import re
import sys
import yaml
# Get logger
log = logging.getLogger(__name__)
def create_symlinks(cfg, inv):
for root, dirs, files in os.walk(cfg['vars_path']):
for f in files:
src = "%s/%s" % (root, f)
src_list = src[len(cfg['vars_path'])+1:].split('/')
# Ignore dotted (e.g. ".git")
if src_list[0].startswith('.'):
continue
# Strip out the YAML file extension
if src_list[-1].endswith('.yaml'):
src_list[-1] = src_list[-1][:-5]
elif src_list[-1].endswith('.yml'):
src_list[-1] = src_list[-1][:-4]
elif src_list[-1].endswith('.yaml.vault'):
src_list[-1] = "%s.vault" % src_list[-1][:-11]
elif src_list[-1].endswith('.yml.vault'):
src_list[-1] = "%s.vault" % src_list[-1][:-10]
# Keep only the top-level "all" file
if src_list[-1] in ['all', 'all.vault'] and len(src_list) > 1:
# Keep the .vault extension
if src_list[-1] == 'all.vault':
src_list[-2] += '.vault'
del src_list[-1]
src_list_s = '-'.join(src_list)
dst = []
# Ignore files which are not groups
if src_list[0] in ['all', 'all.vault'] or src_list_s in inv.keys():
dst.append("%s/%s" % (cfg['group_vars_path'], src_list_s))
# Add templates into the dst list
for ig in inv.keys():
if '@' in ig:
g, t = ig.split('@')
if t == src_list_s:
dst.append("%s/%s" % (cfg['group_vars_path'], ig))
# Create all destination symlinks
for d in dst:
# Make the source relative to the destination
s = os.path.relpath(src, os.path.dirname(d))
# Clear files and dirs of the same name
try:
if os.path.isdir(d):
os.rmdir(d)
elif os.path.exists(d) or os.path.lexists(d):
os.remove(d)
except Exception as e:
log.error("E: Cannot delete %s.\n%s" % (d, e))
sys.exit(1)
# Create new symlink
try:
os.symlink(s, d)
except Exception as e:
log.error("E: Cannot create symlink.\n%s" % e)
sys.exit(1)
def read_vars_file(inv, group, cfg, vars_always=False):
g = group
# Get template name
if '@' in group:
_, g = group.split('@')
# Do not try to load vault files
if g.endswith('.vault'):
return
path = "%s/%s" % (cfg['vars_path'], g.replace('-', '/'))
data = None
# Check if vars file exists
if os.path.isfile(path):
pass
elif os.path.isfile("%s/all" % path):
path += '/all'
else:
path = None
# Read the group file or the "all" file from the group dir if exists
if path is not None:
try:
data = yaml.safe_load(read_yaml_file(path, False))
except yaml.YAMLError as e:
log.error("E: Cannot load YAML inventory vars file.\n%s" % e)
sys.exit(1)
# Create empty group if needed
if group not in inv:
inv[group] = {
'hosts': []
}
# Create empty vars if required
if (
(
vars_always or
(
data is not None and
not cfg['symlinks'])) and
'vars' not in inv[group]):
inv[group]['vars'] = {}
# Update the vars with the file data if any
if data is not None and not cfg['symlinks']:
inv[group]['vars'].update(data)
def add_param(inv, path, param, val, cfg):
if param.startswith(':'):
param = param[1:]
_path = list(path)
if cfg['symlinks'] and cfg['vaults']:
# Create link g1.vault -> g1
_path[-1] += '.vault'
cfg_tmp = dict(cfg)
cfg_tmp['symlinks'] = None
add_param(inv, _path, 'children', ['-'.join(path)], cfg_tmp)
if isinstance(val, list) and len(val) and param == 'children':
val[0] += '.vault'
group = '-'.join(path)
# Add empty group
if group not in inv:
inv[group] = {}
# Add empty parameter
if param not in inv[group]:
if param == 'vars':
inv[group][param] = {}
else:
inv[group][param] = []
# Add parameter value
if isinstance(inv[group][param], dict) and isinstance(val, dict):
inv[group][param].update(val)
elif isinstance(inv[group][param], list) and isinstance(val, list):
# Add individual items if they don't exist
for v in val:
if v not in inv[group][param]:
inv[group][param] += val
# Read inventory vars file
if not cfg['symlinks']:
read_vars_file(inv, group, cfg)
def walk_yaml(inv, data, cfg, parent=None, path=[]):
if data is None:
return
params = list(k for k in data.keys() if k[0] == ':')
groups = list(k for k in data.keys() if k[0] != ':')
for p in params:
if parent is None:
_path = ['all']
else:
_path = list(path)
if p == ':templates' and parent is not None:
for t in data[p]:
_pth = list(_path)
_pth[-1] += "@%s" % t
add_param(
inv, _pth, 'children', ['-'.join(_path)], cfg)
elif p == ':hosts':
for h in data[p]:
# Add host with vars into the _meta hostvars
if isinstance(h, dict):
host_name = list(h.keys())[0]
host_vars = list(h.values())[0]
# Add host vars
if host_name not in inv['_meta']['hostvars']:
inv['_meta']['hostvars'].update(h)
else:
inv['_meta']['hostvars'][host_name].update(host_vars)
# Add host
add_param(
inv, _path, p, [host_name], cfg)
else:
add_param(inv, _path, p, [h], cfg)
else:
# Create empty hosts list if :hosts exists but it's empty
add_param(inv, _path, p, [], cfg)
elif p == ':vars':
add_param(inv, _path, p, data[p], cfg)
elif p == ':groups' and ':hosts' in data:
for g in data[p]:
g_path = g.split('-')
# Add hosts in the same way like above
for h in data[':hosts']:
if isinstance(h, dict):
add_param(
inv, g_path, 'hosts', [list(h.keys())[0]], cfg)
else:
add_param(
inv, g_path, 'hosts', [h], cfg)
elif p == ':add_hosts':
key = '__YAML_INVENTORY'
if key not in inv:
inv[key] = []
record = {
'path': path,
'patterns': data[p]
}
# Make a list of groups which want to add hosts by regexps
inv[key].append(record)
for g in groups:
if parent is not None:
if ':templates' in data[g]:
if data[g] is not None:
for t in data[g][':templates']:
_path = list(path + [g])
_path[-1] += "@%s" % t
add_param(
inv, path, 'children', ['-'.join(_path)], cfg)
else:
add_param(
inv, path, 'children', ['-'.join(path + [g])], cfg)
walk_yaml(inv, data[g], cfg, g, path + [g])
def read_yaml_file(f_path, strip_hyphens=True):
content = ''
try:
f = open(f_path, 'r')
except IOError as e:
log.error("E: Cannot open file %s.\n%s" % (f_path, e))
sys.exit(1)
for line in f.readlines():
if not strip_hyphens or strip_hyphens and not line.startswith('---'):
content += line
try:
f.close()
except IOError as e:
log.error("E: Cannot close file %s.\n%s" % (f_path, e))
sys.exit(1)
return content
def read_inventory(inventory_path):
# Check if the path is a directory
if not os.path.isdir(inventory_path):
log.error(
"E: No inventory directory %s.\n"
"Use YAML_INVENTORY_PATH environment variable to specify the "
"custom directory." % inventory_path)
sys.exit(1)
if not (
os.path.isfile("%s/main.yaml" % inventory_path) or
os.path.isfile("%s/main.yml" % inventory_path)):
log.error(
"E: Cannot find %s/main.yaml." % inventory_path)
sys.exit(1)
# Get names of all YAML files
yaml_files = glob.glob("%s/*.yaml" % inventory_path)
yaml_files += glob.glob("%s/*.yml" % inventory_path)
yaml_main = ''
yaml_content = ''
# Read content of all the files
for f_path in sorted(yaml_files):
file_name = os.path.basename(f_path)
# Keep content of the main.yaml file in a separate variable
if file_name == 'main.yaml' or file_name == 'main.yml':
yaml_main += read_yaml_file(f_path)
else:
yaml_content += read_yaml_file(f_path)
# Convert YAML string to data structure
try:
data = yaml.safe_load(yaml_content + yaml_main)
# Remove all YAML references
yaml_main = re.sub(r':\s+\*', ': ', yaml_main).replace('<<:', 'k:')
data_main = yaml.safe_load(yaml_main)
except yaml.YAMLError as e:
log.error("E: Cannot load YAML inventory.\n%s" % e)
sys.exit(1)
if data is not None:
# Delete all non-main variables
for key in list(data.keys()):
if key not in data_main:
data.pop(key, None)
return data
def my_construct_mapping(self, node, deep=False):
data = self.construct_mapping_org(node, deep)
return {
(str(key) if isinstance(key, int) else key): data[key] for key in data
}
def get_vars(config):
cwd = os.getcwd()
inventory_path = "%s/inventory" % cwd
TRUE = ('1', 'yes', 'y', 'true')
# Check if there is the config var specifying the inventory dir
if config.has_option('paths', 'inventory_path'):
inventory_path = config.get('paths', 'inventory_path')
# Check if there is the env var specifying the inventory dir
if 'YAML_INVENTORY_PATH' in os.environ:
inventory_path = os.environ['YAML_INVENTORY_PATH']
vars_path = "%s/vars" % inventory_path
# Check if there is the config var specifying the inventory/vars dir
if config.has_option('paths', 'inventory_vars_path'):
vars_path = config.get('paths', 'inventory_vars_path')
# Check if there is the env var specifying the inventory/vars dir
if 'YAML_INVENTORY_VARS_PATH' in os.environ:
vars_path = os.environ['YAML_INVENTORY_VARS_PATH']
group_vars_path = "%s/group_vars" % cwd
# Check if there is the config var specifying the group_vars dir
if config.has_option('paths', 'group_vars_path'):
group_vars_path = config.get('paths', 'group_vars_path')
# Check if there is the env var specifying the group_vars dir
if 'YAML_INVENTORY_GROUP_VARS_PATH' in os.environ:
group_vars_path = os.environ['YAML_INVENTORY_GROUP_VARS_PATH']
vaults = True
# Check if there is the config var specifying the support_vaults flag
if config.has_option('features', 'support_vaults'):
try:
vaults = config.getboolean('features', 'support_vaults')
except ValueError as e:
log.error("E: Wrong value of the support_vaults option.\n%s" % e)
# Check if there is the env var specifying the support_vaults flag
if (
'YAML_INVENTORY_SUPPORT_VAULTS' in os.environ and
os.environ['YAML_INVENTORY_SUPPORT_VAULTS'].lower() not in TRUE):
vaults = False
symlinks = True
# Check if there is the config var specifying the create_symlinks flag
| |
<gh_stars>0
load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_dependencies():
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=",
version = "v0.0.0-20190523083050-ea95bdfd59fc",
)
go_repository(
name = "com_github_alecthomas_template",
importpath = "github.com/alecthomas/template",
sum = "h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=",
version = "v0.0.0-20160405071501-a0175ee3bccc",
)
go_repository(
name = "com_github_alecthomas_units",
importpath = "github.com/alecthomas/units",
sum = "h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=",
version = "v0.0.0-20151022065526-2efee857e7cf",
)
go_repository(
name = "com_github_antihax_optional",
importpath = "github.com/antihax/optional",
sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_armon_consul_api",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_bazelbuild_buildtools",
importpath = "github.com/bazelbuild/buildtools",
sum = "h1:5MiHtWINtvfgTnuI+q53BDvWyoD0WH2R1l6r73V9tCo=",
version = "v0.0.0-20200925145723-e6efbf6df90b",
)
go_repository(
name = "com_github_beorn7_perks",
importpath = "github.com/beorn7/perks",
sum = "h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_chzyer_logex",
importpath = "github.com/chzyer/logex",
sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
version = "v1.1.10",
)
go_repository(
name = "com_github_chzyer_readline",
importpath = "github.com/chzyer/readline",
sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
version = "v0.0.0-20180603132655-2972be24d48e",
)
go_repository(
name = "com_github_chzyer_test",
importpath = "github.com/chzyer/test",
sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
version = "v0.0.0-20180213035817-a1ea475d72b1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
version = "v0.0.0-20201120205902-5459f2c99403",
)
go_repository(
name = "com_github_coreos_bbolt",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_etcd",
importpath = "github.com/coreos/etcd",
sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
importpath = "github.com/coreos/go-semver",
sum = "h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_coreos_go_systemd",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_pkg",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgryski_go_sip13",
importpath = "github.com/dgryski/go-sip13",
sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
version = "v0.0.0-20181026042036-e10d5fee7954",
)
go_repository(
name = "com_github_eapache_go_xerial_snappy",
importpath = "github.com/eapache/go-xerial-snappy",
sum = "h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=",
version = "v0.0.0-20180814174437-776d5712da21",
)
go_repository(
name = "com_github_flimzy_diff",
importpath = "github.com/flimzy/diff",
sum = "h1:DRbd+lN3lY1xVuQrfqvDNsqBwA6RMbClMs6tS5sqWWk=",
version = "v0.1.7",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=",
version = "v1.4.7",
)
go_repository(
name = "com_github_ghodss_yaml",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_gl_glfw",
importpath = "github.com/go-gl/glfw",
sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
version = "v0.0.0-20190409004039-e6da0acd62b1",
)
go_repository(
name = "com_github_go_gl_glfw_v3_3_glfw",
importpath = "github.com/go-gl/glfw/v3.3/glfw",
sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
version = "v0.0.0-20200222043503-6f7a984d4dc4",
)
go_repository(
name = "com_github_go_kit_kit",
importpath = "github.com/go-kit/kit",
sum = "h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=",
version = "v0.8.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_sql_driver_mysql",
importpath = "github.com/go-sql-driver/mysql",
sum = "h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_stack_stack",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_gogo_protobuf",
importpath = "github.com/gogo/protobuf",
sum = "h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=",
version = "v1.2.1",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=",
version = "v0.0.0-20190129154638-5b532d6fd5ef",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=",
version = "v1.1.1",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=",
version = "v1.4.2",
)
go_repository(
name = "com_github_golang_snappy",
importpath = "github.com/golang/snappy",
sum = "h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=",
version = "v0.0.1",
)
go_repository(
name = "com_github_google_btree",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=",
version = "v0.5.4",
)
go_repository(
name = "com_github_google_martian",
importpath = "github.com/google/martian",
sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_google_martian_v3",
importpath = "github.com/google/martian/v3",
sum = "h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs=",
version = "v3.0.0",
)
go_repository(
name = "com_github_google_pprof",
importpath = "github.com/google/pprof",
sum = "h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=",
version = "v0.0.0-20200708004538-1a94d8640e99",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_uuid",
importpath = "github.com/google/uuid",
sum = "h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=",
version = "v1.1.5",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_gopherjs_gopherjs",
importpath = "github.com/gopherjs/gopherjs",
sum = "h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=",
version = "v0.0.0-20200217142428-fce0ec30dd00",
)
go_repository(
name = "com_github_gorilla_mux",
importpath = "github.com/gorilla/mux",
sum = "h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=",
version = "v1.8.0",
)
go_repository(
name = "com_github_gorilla_websocket",
importpath = "github.com/gorilla/websocket",
sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=",
version = "v1.4.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=",
version = "v1.0.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=",
version = "v1.9.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway_v2",
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2",
sum = "h1:EhTvIsn53GrBLl45YVHk25cUHQHwlJfq2y8b7W5IpVY=",
version = "v2.1.0",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_hcl",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_ianlancetaylor_demangle",
importpath = "github.com/ianlancetaylor/demangle",
sum = "h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=",
version = "v0.0.0-20181102032728-5e5cf60278f6",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jmoiron_sqlx",
importpath = "github.com/jmoiron/sqlx",
sum = "h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE=",
version = "v1.3.1",
)
go_repository(
name = "com_github_jonboulle_clockwork",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
version = "v0.9.1",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kisielk_errcheck",
importpath = "github.com/kisielk/errcheck",
sum = "h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_klauspost_compress",
importpath = "github.com/klauspost/compress",
sum = "h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=",
version = "v1.9.8",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=",
version = "v1.0.1",
)
go_repository(
name = "com_github_kr_logfmt",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=",
version = "v1.1.1",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_lann_builder",
importpath = "github.com/lann/builder",
sum = "h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=",
version = "v0.0.0-20180802200727-47ae307949d0",
)
go_repository(
name = "com_github_lann_ps",
importpath = "github.com/lann/ps",
sum = "h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=",
version = "v0.0.0-20150810152359-62de8c46ede0",
)
go_repository(
name = "com_github_lib_pq",
importpath = "github.com/lib/pq",
sum = "h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8=",
version = "v1.9.0",
)
go_repository(
name = "com_github_magiconair_properties",
importpath = "github.com/magiconair/properties",
sum = "h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=",
version = "v1.8.0",
)
go_repository(
name = "com_github_masterminds_squirrel",
importpath = "github.com/Masterminds/squirrel",
sum = "h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8=",
version = "v1.5.0",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=",
version = "v1.14.6",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
version = "v1.1.2",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=",
version = "v0.0.0-20161129095857-cc309e4a2223",
)
go_repository(
name = "com_github_oklog_ulid",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_oneofone_xxhash",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_otiai10_copy",
importpath = "github.com/otiai10/copy",
sum = "h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc=",
version = "v1.0.2",
)
go_repository(
name = "com_github_otiai10_curr",
importpath = "github.com/otiai10/curr",
sum = "h1:o59bHXu8Ejas8Kq6pjoVJQ9/neN66SM8AKh6wI42BBs=",
version = "v0.0.0-20190513014714-f5a3d24e5776",
)
go_repository(
name = "com_github_otiai10_mint",
importpath = "github.com/otiai10/mint",
sum = "h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc=",
version = "v1.3.0",
)
go_repository(
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=",
version = "v1.2.0",
)
go_repository(
name = "com_github_pierrec_lz4",
importpath = "github.com/pierrec/lz4",
sum = "h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=",
version = "v2.0.5+incompatible",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
sum = "h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=",
version = "v0.8.0",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=",
version = "v0.9.3",
)
go_repository(
name = | |
# -*- coding: utf-8 -*-
# Adapted 2016 <NAME>, TU Berlin
# TU cluster specific changes.
# Written (W) 2008-2012 <NAME>
# Written (W) 2008-2010 <NAME>
# Written (W) 2012-2014 <NAME>, <EMAIL>
# Copyright (C) 2008-2012 Max-Planck-Society, 2012-2014 ETS
# This file is part of GridMap.
# GridMap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# GridMap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GridMap. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides wrappers that simplify submission and collection of jobs,
in a more 'pythonic' fashion.
We use pyZMQ to provide a heart beat feature that allows close monitoring
of submitted jobs and take appropriate action in case of failure.
:author: <NAME>
:author: <NAME>
:author: <NAME> (<EMAIL>)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import logging
import multiprocessing
import os
import sys
import traceback
import socket
from datetime import datetime
from importlib import import_module
import zmq
from clustermap.conf import (CHECK_FREQUENCY, DEFAULT_QUEUE, DRMAA_PRESENT, HEARTBEAT_FREQUENCY,
IDLE_THRESHOLD, MAX_IDLE_HEARTBEATS, MAX_TIME_BETWEEN_HEARTBEATS, NUM_RESUBMITS, USE_MEM_FREE)
from clustermap.data import zdumps, zloads
from clustermap.runner import _heart_beat
if DRMAA_PRESENT:
from drmaa import (ExitTimeoutException, InvalidJobException,
JobControlAction, JOB_IDS_SESSION_ALL, Session,
TIMEOUT_NO_WAIT)
# Python 2.x backward compatibility
if sys.version_info < (3, 0):
range = xrange
# Placeholder string, since a job could potentially return None on purpose
_JOB_NOT_FINISHED = '*@#%$*@#___CLUSTERMAP___NOT___DONE___@#%**#*$&*%'
class JobException(Exception):
'''
New exception type for when one of the jobs crashed.
'''
pass
class Job(object):
"""
Central entity that wraps a function and its data. Basically, a job consists
of a function, its argument list, its keyword list and a field "ret" which
is filled, when the execute method gets called.
.. note::
This can only be used to wrap picklable functions (i.e., those that
are defined at the module or class level).
"""
__slots__ = ('_f', 'args', 'id', 'kwlist', 'cleanup', 'ret', 'traceback',
'num_slots', 'mem_free', 'white_list', 'path', 'uniq_id',
'name', 'queue', 'environment', 'working_dir',
'cause_of_death', 'num_resubmits', 'home_address',
'log_stderr_fn', 'log_stdout_fn', 'timestamp', 'host_name',
'heart_beat', 'track_mem', 'track_cpu','mem_max')
def __init__(self, f, args, kwlist=None, cleanup=True, mem_max="16G", mem_free="8G",
name='clustermap_job', num_slots=1, queue=DEFAULT_QUEUE):
"""
Initializes a new Job.
:param f: a function, which should be executed.
:type f: function
:param args: argument list of function f
:type args: list
:param kwlist: dictionary of keyword arguments for f
:type kwlist: dict
:param cleanup: flag that determines the cleanup of input and log file
:type cleanup: boolean
:param mem_free: Estimate of how much memory this job will need (for
scheduling)
:type mem_free: str
:param name: Name to give this job
:type name: str
:param num_slots: Number of slots this job should use.
:type num_slots: int
:param queue: SGE queue to schedule job on.
:type queue: str
"""
self.track_mem = []
self.track_cpu = []
self.heart_beat = None
self.traceback = None
self.host_name = ''
self.timestamp = None
self.log_stdout_fn = ''
self.log_stderr_fn = ''
self.home_address = ''
self.num_resubmits = 0
self.cause_of_death = ''
self.path = None
self._f = None
self.function = f
self.args = args
self.id = -1
self.kwlist = kwlist if kwlist is not None else {}
self.cleanup = cleanup
self.ret = _JOB_NOT_FINISHED
self.num_slots = num_slots
self.mem_free = mem_free
self.mem_max = mem_max
self.white_list = []
self.name = name.replace(' ', '_')
self.queue = queue
# Save copy of environment variables
self.environment = {}
for env_var, value in os.environ.items():
try:
if not isinstance(env_var, bytes):
env_var = env_var.encode()
if not isinstance(value, bytes):
value = value.encode()
except UnicodeEncodeError:
logger = logging.getLogger(__name__)
logger.warning('Skipping non-ASCII environment variable.')
else:
self.environment[env_var] = value
self.working_dir = os.getcwd()
@property
def function(self):
''' Function this job will execute. '''
return self._f
@function.setter
def function(self, f):
"""
setter for function that carefully takes care of
namespace, avoiding __main__ as a module
"""
m = inspect.getmodule(f)
try:
self.path = os.path.dirname(os.path.abspath(inspect.getsourcefile(f)))
except TypeError:
self.path = ''
# if module is not __main__, all is good
if m.__name__ != "__main__":
self._f = f
else:
# determine real module name
mn = os.path.splitext(os.path.basename(m.__file__))[0]
# make sure module is present
import_module(mn)
# get module
mod = sys.modules[mn]
# set function from module
self._f = getattr(mod, f.__name__)
def execute(self):
"""
Executes function f with given arguments
and writes return value to field ret.
If an exception is encountered during execution, ret will
contain a pickled version of it.
Input data is removed after execution to save space.
"""
try:
self.ret = self.function(*self.args, **self.kwlist)
except Exception as exception:
self.ret = exception
self.traceback = traceback.format_exc()
traceback.print_exc()
@property
def native_specification(self):
"""
define python-style getter
"""
ret = "-shell yes -b yes"
if self.mem_free and USE_MEM_FREE:
ret += " -l mem_free={}".format(self.mem_free)
if self.mem_max and USE_MEM_FREE:
ret += " -l h_vmem={}".format(self.mem_max)
if self.num_slots and self.num_slots > 1:
ret += " -pe smp {}".format(self.num_slots)
if self.white_list:
ret += " -l h={}".format('|'.join(self.white_list))
if self.queue:
ret += " -q {}".format(self.queue)
return ret
###############################
# Job Submission and Monitoring
###############################
class JobMonitor(object):
"""
Job monitor that communicates with other nodes via 0MQ.
"""
def __init__(self, temp_dir='tmp/'):
"""
set up socket
"""
self.logger = logging.getLogger(__name__)
context = zmq.Context()
self.temp_dir = temp_dir
self.socket = context.socket(zmq.REP)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0)) # connecting to a UDP address doesn't send packets
local_ip_address = s.getsockname()[0]
self.host_name = socket.gethostname()
self.ip_address = local_ip_address
if self.ip_address[:3] == '127':
self.logger.error("IP address is localhost: {0}".format(self.ip_address))
self.interface = "tcp://%s" % (self.ip_address)
# bind to random port and remember it
self.port = self.socket.bind_to_random_port(self.interface)
self.home_address = "%s:%i" % (self.interface, self.port)
self.logger.info("Setting up JobMonitor on %s", self.home_address)
# uninitialized field (set in check method)
self.jobs = []
self.ids = []
self.session_id = None
self.id_to_job = {}
def __enter__(self):
'''
Enable JobMonitor to be used as a context manager.
'''
return self
def __exit__(self, exc_type, exc_value, exc_tb):
'''
Gracefully handle exceptions by terminating all jobs, and closing
sockets.
'''
# Always close socket
self.socket.close()
# Clean up if we have a valid session
if self.session_id is not None:
with Session(self.session_id) as session:
# If we encounter an exception, kill all jobs
if exc_type is not None:
self.logger.info('Encountered %s, so killing all jobs.', exc_type.__name__)
# try to kill off all old jobs
try:
session.control(JOB_IDS_SESSION_ALL, JobControlAction.TERMINATE)
except InvalidJobException:
self.logger.debug("Could not kill all jobs for session.", exc_info=True)
# Get rid of job info to prevent memory leak
try:
session.synchronize([JOB_IDS_SESSION_ALL], TIMEOUT_NO_WAIT, dispose=True)
except ExitTimeoutException:
pass
def check(self, session_id, jobs):
"""
serves input and output data
"""
# save list of jobs
self.jobs = jobs
self.id_to_job = {job.id: job for job in self.jobs}
# keep track of DRMAA session_id (for resubmissions)
self.session_id = session_id
# determines in which interval to check if jobs are alive
self.logger.debug('Starting local hearbeat')
local_heart = multiprocessing.Process(target=_heart_beat,
args=(-1, self.home_address, -1, "", CHECK_FREQUENCY))
local_heart.start()
try:
self.logger.debug("Starting ZMQ event loop")
# main loop
while not self.all_jobs_done():
self.logger.debug('Waiting for message')
msg_str = self.socket.recv()
msg = zloads(msg_str)
self.logger.debug('Received message: %s', msg)
return_msg = ""
job_id = msg["job_id"]
# only if its not the local beat
if job_id != -1:
# If message is from a valid job, process that message
if job_id in self.id_to_job:
job = self.id_to_job[job_id]
if msg["command"] == "fetch_input":
return_msg = self.id_to_job[job_id]
job.timestamp = datetime.now()
self.logger.debug("Received input request from %s", job_id)
if msg["command"] == "store_output":
# be nice
return_msg = "thanks"
# store tmp job object
if isinstance(msg["data"], Job):
tmp_job = msg["data"]
# copy relevant fields
job.ret = tmp_job.ret
job.traceback = tmp_job.traceback
self.logger.info("Received output from %s", job_id)
# Returned exception instead of job, so store that
elif isinstance(msg["data"], tuple):
job.ret, job.traceback = msg["data"]
self.logger.info("Received exception from %s", job_id)
else:
self.logger.error(("Received message with invalid data: %s"), msg)
job.ret = msg["data"]
job.timestamp = datetime.now()
if msg["command"] == "heart_beat":
job.heart_beat = msg["data"]
# keep track of mem and cpu
try:
job.track_mem.append(job.heart_beat["memory"])
job.track_cpu.append(job.heart_beat["cpu_load"])
except (ValueError, TypeError):
self.logger.error("Error decoding heart-beat", exc_info=True)
return_msg = "all good"
job.timestamp = datetime.now()
if msg["command"] == "get_job":
# serve job for display
return_msg = job
| |
<reponame>kekraft/contamination_stack<filename>people_tracking/scripts/people_tracker.py
#!/usr/bin/env python
import rospy
import numpy as np
from std_msgs.msg import *
from geometry_msgs.msg import Point, Quaternion, Pose, Vector3
from visualization_msgs.msg import Marker, MarkerArray
from sensor_msgs.msg import LaserScan
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
from math import sin, cos, sqrt
from scipy.spatial.distance import pdist
import Queue
from ellipse2d import Ellipse2d
from people_tracking.msg import PersonLocation, PersonLocationArray
class Person:
def __init__(self, e, time_stamp, name="uknown", axis_a=0.9, center_a=0.1, prev_positions_maxsize = 10):
if e is None:
self.a = self.center = self.b = self.theta = None
else:
self.a = e.a
self.b = e.b
self.center = e.center
self.theta = e.theta
self.prev_positions = list()
self.prev_positions_maxsize = prev_positions_maxsize
self.axis_alpha = axis_a
self.center_alpha = center_a
self.name = name
self.last_update = time_stamp
def update(self, e, time_stamp):
''' Update current position and stores previous position.
The current position becomes that of the e.
The previous position gets loaded in the previous positions list
Stores time the update was performed
'''
# store prev position before updating current position
# older positions are stored at front of list.
prev_pos = (self.center[0], self.center[1], self.theta, self.a, self.b, self.last_update)
if len(self.prev_positions) > self.prev_positions_maxsize:
# pop off the oldest position
self.prev_positions.pop(0)
self.prev_positions.append(prev_pos)
if self.a != None and self.b != None and self.center != None:
self.center = [self.center[i]*self.center_alpha + e.center[i]*(1-self.center_alpha) for i in [0, 1]]
self.a = self.a*self.axis_alpha + e.a*(1-self.axis_alpha)
self.b = self.b*self.axis_alpha + e.b*(1-self.axis_alpha)
else:
self.a = e.a
self.b = e.b
self.center = e.center
self.theta = e.theta
self.last_update = time_stamp
print "Updated myself"
print self
def get_last_position(self):
# Returns last (x,y,theta) in pre positions list
prev_pos = self.prev_positions(len(self.prev_positions) - 1)
x = prev_pos[0]
y = prev_pos[1]
theta = prev_pos[2]
return (x,y,theta)
def get_last_time_updated(self):
return self.last_update
def is_match(self, e, e_stamp, match_dist_thresh = 0.8, match_time_thresh = 0.3):
''' Returns true if the person and the ellipse are a match.
False, otherwise.
Matches are based on the time stamp differences and the
location differences. Both within some threshold.
'''
# dur_diff = (e_stamp.to_sec() - self.last_update.to_sec())
dur_diff = e_stamp - self.last_update
# print "Succesful dur_diff: ", dur_diff, " of type ", type(dur_diff)
if (dur_diff.to_sec() < match_time_thresh):
pt_matrix = np.zeros((2,2))
pt_matrix[[0],] = self.center
pt_matrix[[1],] = e.center
# print "pt matrix: ", pt_matrix, "shape ", pt_matrix.shape
dist = pdist(pt_matrix, 'euclidean')
# print "distance ", dist
if abs(dist) < match_dist_thresh:
return True
else:
print "Failed dist match"
else:
print "Failed time match"
return False
def __str__(self):
person_str = 'Name: {0} \
\nOld positions:{1} \n Current Position: center: {2}, theta: {3}, a: {4}, b:{5}\
'.format(self.name, self.prev_positions, \
self.center, self.theta, self.a, self.b)
return person_str
class Multitracker:
potential_names = Queue.Queue()
[potential_names.put(str(x)) for x in xrange(200)]
def __init__ (self):
self.people = []
self.e_colors = []
self.red = ColorRGBA(1, 0, 0, 1)
self.green = ColorRGBA(0, 1, 0, 1)
self.person_tracker = PersonTracker() ### allows us to get some methods from person tracker
# self.marker_pub = rospy.Publisher("multiperson_markers", MarkerArray, queue_size=10)
self.people_locations_pub = rospy.Publisher("people_locations", PersonLocationArray, queue_size=10)
self.scan_sub = rospy.Subscriber("filtered_scan", LaserScan, self.find_ppl_from_scan)
# self.contam_array_sub = rospy.Subscriber("contam_array", Float32MultiArray, self.get_colors)
def reset (self, run, pub):
m = Marker(header=Header(stamp=rospy.Time.now(), frame_id="laser"), ns="person", id=0, type=3, action=3)
pub.publish(MarkerArray([m]))
self.people = []
self.e_colors = []
def _min_dist(self, dist, ind, sort, ellipse):
''' Match indices from knn algorithm to the ellipse data
'''
print "dist ", dist
print "ind ", ind
print "sort ", sort
print "ellipse ", ellipse
for i in xrange(len(ind[ellipse])):
k = ind[ellipse, i]
if k not in sort: #if the ideal position is available
sort[k] = (ellipse, dist[ellipse, i])
break
elif sort[k][1] > dist[ellipse, i]: #if it's taken, but this ellipse is closer
temp = sort[k][0]
sort[k] = (ellipse, dist[ellipse, i])
sort = self._min_dist(dist, ind, sort, temp)
break
return sort
def match_people(self, new_ellipses, time_stamp):
''' Matches people from one timestep to another.
People are currently represented as ellipses.
Assumes that from one timestep to another, the
people will move just a little bit.
Doesn't not gracefully handle cases when 2 people overlap closely.
'''
dist_thres = 0.05 # distance 1 pt from another pt in timesteps to be considered just one pt
time_thres = 0.1
ellipses = range(len(new_ellipses))
print "\n\n\n"
old_centers = np.asarray([person.center for person in self.people])
print "Old c: \n", old_centers
new_centers = np.asarray([e.center for e in new_ellipses])
print "New c: \n", new_centers
new_people = []
for n_index, nc in enumerate(new_centers):
new_is_matched = False
for o_index, oc in enumerate(old_centers):
if self.people[o_index].is_match(new_ellipses[n_index], time_stamp):
# match between old timestep ellipse and the new ellipse
old_person = self.people[o_index]
old_person.update(new_ellipses[n_index], time_stamp)
new_people.append(old_person)
new_is_matched = True
# print "old person"
# print old_person
# print "Found match"
# print "length of new ppl ", len(new_people)
# print "matched person!"
# print old_person
break # the new one is matched, move on to another new one
if not new_is_matched:
# No match found, add it in as a new person
# name = (str(time_stamp.to_sec()))
''' +
str(new_ellipses[n_index].center[0]) +
str(new_ellipses[n_index].center[1]))
'''
name = self.potential_names.get()
print "Name: ", name
new_person = Person(new_ellipses[n_index], time_stamp, name)
new_people.append(new_person)
print "no match found, adding one in"
print new_person
print "length of new ppl after no match ", len(new_people)
self.people = new_people
print "all people", len(new_people)
print "".join([str(per) for per in self.people] )
# def get_colors(self, contamination):
# data = contamination.data
# for c in xrange(len(data)):
# if data[c] < 0.5:
# self.e_colors[c] = self.red
# else:
# self.e_colors[c] = self.green
#data to markers - does not link markers to past marker
def find_ppl_from_scan(self, data):
# angle = data.angle_min
# incr = data.angle_increment
# max_range = data.range_max
# ranges = data.ranges
# points = []
# Get x,y points of laser, excluding max distance angles
points = self.points_from_ranges(data.ranges, data.angle_increment, data.angle_min, data.range_max)
# Get ellipses from the (x,y) points
new_ellipses = self.get_ellipses_from_points(points)
## if no new ellipses, what to do?
# add missed timestep to all ppl then return
if len(new_ellipses) < 1:
return
print "New ellipses: ", new_ellipses
# **********************************
# ******* troubleshooting **************
# markers = MarkerArray()
# for i in xrange(len(new_ellipses)):
# e = new_ellipses[i]
# # data = self.person_tracker.create_person_data(e.center[0], e.center[1], e.theta, e.a, e.b)
# m = Marker(ns="person", id=i + 100, type=3, action=0)
# m.header = Header(stamp=rospy.Time.now(), frame_id="laser")
# m.pose = Pose(Point(e.center[0], e.center[1], .5),
# Quaternion(0.0,0.0,1.0,cos(e.theta/2)))
# m.scale = Vector3(e.a*2, e.b*2,1) #scale, in meters
# m.color = self.red #clean = red, infected = green
# markers.markers.append(m)
# publisher.publish(markers)
# *******************************
self.match_people(new_ellipses, data.header.stamp)
print "Num people found after matching: ", len(self.people)
# Publish the current people
self.pub_ppl_data()
def points_from_ranges(self, ranges, angle_increment, min_angle, max_dist):
'''
Return all "valid" laser ranges from the current scan
and append them as [x,y] locations rather than just distances.
Valid means, anything that is not the max distance of the laser scan.
'''
angle = min_angle
points = []
for r in ranges:
if r < max_dist:
points.append([cos(angle)*r, sin(angle)*r])
angle += angle_increment
return np.asarray(points)
def get_ellipses_from_points(self, points):
'''
Returns a list of ellipses that were fit to the points.
Clusters the points (must be at least 4 points).
Fits ellipses to each cluster.
Checks if the these are valid, ppl-sized ellipses.
If so, appends these to a list of elllipses.
'''
if len(points) > 3:
db = DBSCAN(eps=0.5, min_samples=4).fit(points)
else:
# no points to cluster, so return
return []
labels = db.labels_
#return points, labels
# Number of clusters in labels, ignoring noise if present.
## Fit an ellipse to each cluster
n_clusters = len(set(labels)) - (1 if -1 in db.labels_ else 0)
print "Num clusters: ", n_clusters
new_ellipses = []
for n in xrange(n_clusters):
xy = points[labels==n]
e = Ellipse2d()
e.fit(xy)
print e
## check to see if its a valid Ellipse
if e.is_valid():
print "valid e : ", e
new_ellipses.append(e)
return new_ellipses
def pub_ppl_data(self):
people_data = PersonLocationArray()
# markers = MarkerArray()
for i in xrange(len(self.people)):
person = self.people[i]
# person_data = self.person_tracker.create_person_data(person.center[0], person.center[1], person.theta, person.a, person.b, "p"+ str(i))
person_data = self.person_tracker.create_person_data(person.center[0],
person.center[1],
person.theta,
| |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Tuple, Union
import torch
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from monai.engines.utils import CommonKeys as Keys
from monai.engines.utils import GanKeys, default_make_latent, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.transforms import Transform
from monai.utils import exact_version, optional_import
if TYPE_CHECKING:
from ignite.engine import Engine
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine")
Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric")
__all__ = ["Trainer", "SupervisedTrainer", "GanTrainer"]
class Trainer(Workflow):
"""
Base class for all kinds of trainers, inherits from Workflow.
"""
def run(self) -> None:
"""
Execute training based on Ignite Engine.
If call this function multiple times, it will continuously run from the previous state.
"""
self.scaler = torch.cuda.amp.GradScaler() if self.amp else None
super().run()
def get_train_stats(self) -> Dict[str, float]:
return {"total_epochs": self.state.max_epochs, "total_iterations": self.state.epoch_length}
class SupervisedTrainer(Trainer):
"""
Standard supervised training method with image and label, inherits from ``Trainer`` and ``Workflow``.
Args:
device: an object representing the device on which to run.
max_epochs: the total epoch number for trainer to run.
train_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
network: to train with this network.
optimizer: the optimizer associated to the network.
loss_function: the loss function associated to the optimizer.
epoch_length: number of iterations for one epoch, default to `len(train_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_train_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_train_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
train_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision training, default is False.
"""
def __init__(
self,
device: torch.device,
max_epochs: int,
train_data_loader: DataLoader,
network: torch.nn.Module,
optimizer: Optimizer,
loss_function: Callable,
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
post_transform: Optional[Transform] = None,
key_train_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
train_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
# set up Ignite engine and environments
super().__init__(
device=device,
max_epochs=max_epochs,
data_loader=train_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_metric=key_train_metric,
additional_metrics=additional_metrics,
handlers=train_handlers,
amp=amp,
)
self.network = network
self.optimizer = optimizer
self.loss_function = loss_function
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]):
"""
Callback function for the Supervised Training processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- PRED: prediction result of model.
- LOSS: loss value computed by loss function.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
self.network.train()
self.optimizer.zero_grad()
if self.amp and self.scaler is not None:
with torch.cuda.amp.autocast():
predictions = self.inferer(inputs, self.network, *args, **kwargs)
loss = self.loss_function(predictions, targets).mean()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
predictions = self.inferer(inputs, self.network, *args, **kwargs)
loss = self.loss_function(predictions, targets).mean()
loss.backward()
self.optimizer.step()
return {Keys.IMAGE: inputs, Keys.LABEL: targets, Keys.PRED: predictions, Keys.LOSS: loss.item()}
class GanTrainer(Trainer):
"""
Generative adversarial network training based on Goodfellow et al. 2014 https://arxiv.org/abs/1406.266,
inherits from ``Trainer`` and ``Workflow``.
Training Loop: for each batch of data size `m`
1. Generate `m` fakes from random latent codes.
2. Update discriminator with these fakes and current batch reals, repeated d_train_steps times.
3. If g_update_latents, generate `m` fakes from new random latent codes.
4. Update generator with these fakes using discriminator feedback.
Args:
device: an object representing the device on which to run.
max_epochs: the total epoch number for engine to run.
train_data_loader: Core ignite engines uses `DataLoader` for training loop batchdata.
g_network: generator (G) network architecture.
g_optimizer: G optimizer function.
g_loss_function: G loss function for optimizer.
d_network: discriminator (D) network architecture.
d_optimizer: D optimizer function.
d_loss_function: D loss function for optimizer.
epoch_length: number of iterations for one epoch, default to `len(train_data_loader)`.
g_inferer: inference method to execute G model forward. Defaults to ``SimpleInferer()``.
d_inferer: inference method to execute D model forward. Defaults to ``SimpleInferer()``.
d_train_steps: number of times to update D with real data minibatch. Defaults to ``1``.
latent_shape: size of G input latent code. Defaults to ``64``.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
d_prepare_batch: callback function to prepare batchdata for D inferer.
Defaults to return ``GanKeys.REALS`` in batchdata dict.
g_prepare_batch: callback function to create batch of latent input for G inferer.
Defaults to return random latents.
g_update_latents: Calculate G loss with new latent codes. Defaults to ``True``.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_train_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_train_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
train_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
"""
def __init__(
self,
device: torch.device,
max_epochs: int,
train_data_loader: DataLoader,
g_network: torch.nn.Module,
g_optimizer: Optimizer,
g_loss_function: Callable,
d_network: torch.nn.Module,
d_optimizer: Optimizer,
d_loss_function: Callable,
epoch_length: Optional[int] = None,
g_inferer: Optional[Inferer] = None,
d_inferer: Optional[Inferer] = None,
d_train_steps: int = 1,
latent_shape: int = 64,
non_blocking: bool = False,
d_prepare_batch: Callable = default_prepare_batch,
g_prepare_batch: Callable = default_make_latent,
g_update_latents: bool = True,
iteration_update: Optional[Callable] = None,
post_transform: Optional[Transform] = None,
key_train_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
train_handlers: Optional[Sequence] = None,
):
# set up Ignite engine and environments
super().__init__(
device=device,
max_epochs=max_epochs,
data_loader=train_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=d_prepare_batch,
iteration_update=iteration_update,
key_metric=key_train_metric,
additional_metrics=additional_metrics,
handlers=train_handlers,
post_transform=post_transform,
)
self.g_network = g_network
self.g_optimizer = g_optimizer
self.g_loss_function = g_loss_function
self.g_inferer = SimpleInferer() if g_inferer is None else g_inferer
self.d_network = d_network
self.d_optimizer = d_optimizer
self.d_loss_function = d_loss_function
self.d_inferer = SimpleInferer() if d_inferer is None else d_inferer
self.d_train_steps = d_train_steps
self.latent_shape = latent_shape
self.g_prepare_batch = g_prepare_batch
self.g_update_latents = g_update_latents
def _iteration(
self, engine: Engine, batchdata: Union[Dict, Sequence]
) -> Dict[str, Union[torch.Tensor, int, float, bool]]:
"""
Callback function for Adversarial Training processing logic of 1 iteration in Ignite Engine.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.