input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
svn_ra_callbacks_invoke_open_tmp_file(self, *args)
def __init__(self, *args):
"""__init__(self) -> svn_ra_callbacks_t"""
this = apply(_ra.new_svn_ra_callbacks_t, args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ra.delete_svn_ra_callbacks_t
__del__ = lambda self : None;
svn_ra_callbacks_t_swigregister = _ra.svn_ra_callbacks_t_swigregister
svn_ra_callbacks_t_swigregister(svn_ra_callbacks_t)
def svn_ra_initialize(*args):
"""svn_ra_initialize(apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_initialize, args)
def svn_ra_create_callbacks(*args):
"""svn_ra_create_callbacks(svn_ra_callbacks2_t callbacks, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_create_callbacks, args)
def svn_ra_open3(*args):
"""
svn_ra_open3(svn_ra_session_t session_p, char repos_URL, char uuid,
svn_ra_callbacks2_t callbacks, apr_hash_t config,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_open3, args)
def svn_ra_open2(*args):
"""
svn_ra_open2(svn_ra_session_t session_p, char repos_URL, svn_ra_callbacks2_t callbacks,
apr_hash_t config, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_open2, args)
def svn_ra_open(*args):
"""
svn_ra_open(svn_ra_session_t session_p, char repos_URL, svn_ra_callbacks_t callbacks,
void callback_baton, apr_hash_t config,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_open, args)
def svn_ra_reparent(*args):
"""svn_ra_reparent(svn_ra_session_t ra_session, char url, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_reparent, args)
def svn_ra_get_session_url(*args):
"""svn_ra_get_session_url(svn_ra_session_t ra_session, char url, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_get_session_url, args)
def svn_ra_get_latest_revnum(*args):
"""
svn_ra_get_latest_revnum(svn_ra_session_t session, svn_revnum_t latest_revnum,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_latest_revnum, args)
def svn_ra_get_dated_revision(*args):
"""
svn_ra_get_dated_revision(svn_ra_session_t session, svn_revnum_t revision, apr_time_t tm,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_dated_revision, args)
def svn_ra_change_rev_prop(*args):
"""
svn_ra_change_rev_prop(svn_ra_session_t session, svn_revnum_t rev, char name,
svn_string_t value, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_change_rev_prop, args)
def svn_ra_rev_proplist(*args):
"""
svn_ra_rev_proplist(svn_ra_session_t session, svn_revnum_t rev, apr_hash_t props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_rev_proplist, args)
def svn_ra_rev_prop(*args):
"""
svn_ra_rev_prop(svn_ra_session_t session, svn_revnum_t rev, char name,
svn_string_t value, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_rev_prop, args)
def svn_ra_get_commit_editor3(*args):
"""
svn_ra_get_commit_editor3(svn_ra_session_t session, svn_delta_editor_t editor,
void edit_baton, apr_hash_t revprop_table,
svn_commit_callback2_t callback, apr_hash_t lock_tokens,
svn_boolean_t keep_locks, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_commit_editor3, args)
def svn_ra_get_commit_editor2(*args):
"""
svn_ra_get_commit_editor2(svn_ra_session_t session, svn_delta_editor_t editor,
void edit_baton, char log_msg, svn_commit_callback2_t callback,
apr_hash_t lock_tokens,
svn_boolean_t keep_locks, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_commit_editor2, args)
def svn_ra_get_commit_editor(*args):
"""
svn_ra_get_commit_editor(svn_ra_session_t session, svn_delta_editor_t editor,
void edit_baton, char log_msg, svn_commit_callback_t callback,
apr_hash_t lock_tokens, svn_boolean_t keep_locks,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_commit_editor, args)
def svn_ra_get_file(*args):
"""
svn_ra_get_file(svn_ra_session_t session, char path, svn_revnum_t revision,
svn_stream_t stream, svn_revnum_t fetched_rev,
apr_hash_t props, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_file, args)
def svn_ra_get_dir2(*args):
"""
svn_ra_get_dir2(svn_ra_session_t session, apr_hash_t dirents, svn_revnum_t fetched_rev,
apr_hash_t props, char path,
svn_revnum_t revision, apr_uint32_t dirent_fields,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_dir2, args)
def svn_ra_get_dir(*args):
"""
svn_ra_get_dir(svn_ra_session_t session, char path, svn_revnum_t revision,
apr_hash_t dirents, svn_revnum_t fetched_rev,
apr_hash_t props, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_dir, args)
def svn_ra_get_mergeinfo(*args):
"""
svn_ra_get_mergeinfo(svn_ra_session_t session, svn_mergeinfo_catalog_t catalog,
apr_array_header_t paths, svn_revnum_t revision,
svn_mergeinfo_inheritance_t inherit,
svn_boolean_t include_descendants, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_mergeinfo, args)
def svn_ra_do_update2(*args):
"""
svn_ra_do_update2(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, svn_revnum_t revision_to_update_to,
char update_target, svn_depth_t depth,
svn_boolean_t send_copyfrom_args, svn_delta_editor_t update_editor,
void update_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_update2, args)
def svn_ra_do_update(*args):
"""
svn_ra_do_update(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, svn_revnum_t revision_to_update_to,
char update_target, svn_boolean_t recurse,
svn_delta_editor_t update_editor,
void update_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_update, args)
def svn_ra_do_switch2(*args):
"""
svn_ra_do_switch2(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, svn_revnum_t revision_to_switch_to,
char switch_target, svn_depth_t depth,
char switch_url, svn_delta_editor_t switch_editor,
void switch_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_switch2, args)
def svn_ra_do_switch(*args):
"""
svn_ra_do_switch(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, svn_revnum_t revision_to_switch_to,
char switch_target, svn_boolean_t recurse,
char switch_url, svn_delta_editor_t switch_editor,
void switch_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_switch, args)
def svn_ra_do_status2(*args):
"""
svn_ra_do_status2(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, char status_target, svn_revnum_t revision,
svn_depth_t depth, svn_delta_editor_t status_editor,
void status_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_status2, args)
def svn_ra_do_status(*args):
"""
svn_ra_do_status(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, char status_target, svn_revnum_t revision,
svn_boolean_t recurse, svn_delta_editor_t status_editor,
void status_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_status, args)
def svn_ra_do_diff3(*args):
"""
svn_ra_do_diff3(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, svn_revnum_t revision,
char diff_target, svn_depth_t depth, svn_boolean_t ignore_ancestry,
svn_boolean_t text_deltas,
char versus_url, svn_delta_editor_t diff_editor,
void diff_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_diff3, args)
def svn_ra_do_diff2(*args):
"""
svn_ra_do_diff2(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, svn_revnum_t revision,
char diff_target, svn_boolean_t recurse, svn_boolean_t ignore_ancestry,
svn_boolean_t text_deltas,
char versus_url, svn_delta_editor_t diff_editor,
void diff_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_diff2, args)
def svn_ra_do_diff(*args):
"""
svn_ra_do_diff(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, svn_revnum_t revision,
char diff_target, svn_boolean_t recurse, svn_boolean_t ignore_ancestry,
char versus_url,
svn_delta_editor_t diff_editor, void diff_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_diff, args)
def svn_ra_get_log2(*args):
"""
svn_ra_get_log2(svn_ra_session_t session, apr_array_header_t paths,
svn_revnum_t start, svn_revnum_t end, int limit,
svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_boolean_t include_merged_revisions,
apr_array_header_t revprops,
svn_log_entry_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_log2, args)
def svn_ra_get_log(*args):
"""
svn_ra_get_log(svn_ra_session_t session, apr_array_header_t paths,
svn_revnum_t start, svn_revnum_t end, int limit,
svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_log, args)
def svn_ra_check_path(*args):
"""
svn_ra_check_path(svn_ra_session_t session, char path, svn_revnum_t revision,
svn_node_kind_t kind, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_check_path, args)
def svn_ra_stat(*args):
"""
svn_ra_stat(svn_ra_session_t session, char path, svn_revnum_t revision,
svn_dirent_t dirent, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_stat, args)
def svn_ra_get_uuid2(*args):
"""svn_ra_get_uuid2(svn_ra_session_t session, char uuid, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_get_uuid2, args)
def svn_ra_get_uuid(*args):
"""svn_ra_get_uuid(svn_ra_session_t session, char uuid, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_get_uuid, args)
def svn_ra_get_repos_root2(*args):
"""svn_ra_get_repos_root2(svn_ra_session_t session, char url, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_get_repos_root2, args)
def svn_ra_get_repos_root(*args):
"""svn_ra_get_repos_root(svn_ra_session_t session, char url, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_get_repos_root, args)
def svn_ra_get_locations(*args):
"""
svn_ra_get_locations(svn_ra_session_t session, apr_hash_t locations, char path,
svn_revnum_t peg_revision, apr_array_header_t location_revisions,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_locations, args)
def svn_ra_get_location_segments(*args):
"""
svn_ra_get_location_segments(svn_ra_session_t session, char path, svn_revnum_t peg_revision,
svn_revnum_t start_rev, svn_revnum_t end_rev,
svn_location_segment_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_location_segments, args)
def svn_ra_get_file_revs2(*args):
"""
svn_ra_get_file_revs2(svn_ra_session_t session, char path, svn_revnum_t start,
svn_revnum_t end, svn_boolean_t include_merged_revisions,
svn_file_rev_handler_t handler,
void handler_baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_file_revs2, args)
def svn_ra_get_file_revs(*args):
"""
svn_ra_get_file_revs(svn_ra_session_t session, char path, svn_revnum_t start,
svn_revnum_t end, svn_ra_file_rev_handler_t handler,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_file_revs, args)
def svn_ra_lock(*args):
"""
svn_ra_lock(svn_ra_session_t session, apr_hash_t path_revs, char comment,
svn_boolean_t steal_lock, svn_ra_lock_callback_t lock_func,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_lock, args)
def svn_ra_unlock(*args):
"""
svn_ra_unlock(svn_ra_session_t session, apr_hash_t path_tokens, svn_boolean_t break_lock,
svn_ra_lock_callback_t lock_func,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_unlock, args)
def svn_ra_get_lock(*args):
"""
svn_ra_get_lock(svn_ra_session_t session, svn_lock_t lock, char path,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_lock, args)
def svn_ra_get_locks(*args):
"""
svn_ra_get_locks(svn_ra_session_t session, apr_hash_t locks, char path,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_locks, args)
def svn_ra_replay_range(*args):
"""
svn_ra_replay_range(svn_ra_session_t session, svn_revnum_t start_revision,
svn_revnum_t end_revision, svn_revnum_t low_water_mark,
svn_boolean_t send_deltas, svn_ra_replay_revstart_callback_t revstart_func,
svn_ra_replay_revfinish_callback_t revfinish_func,
void replay_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_replay_range, args)
def svn_ra_replay(*args):
"""
svn_ra_replay(svn_ra_session_t session, svn_revnum_t revision, svn_revnum_t low_water_mark,
svn_boolean_t send_deltas,
svn_delta_editor_t editor, void edit_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_replay, args)
def svn_ra_has_capability(*args):
"""
svn_ra_has_capability(svn_ra_session_t session, svn_boolean_t has, char capability,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_has_capability, args)
def svn_ra_get_deleted_rev(*args):
"""
svn_ra_get_deleted_rev(svn_ra_session_t session, char path, svn_revnum_t peg_revision,
svn_revnum_t end_revision, svn_revnum_t revision_deleted,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_get_deleted_rev, args)
SVN_RA_CAPABILITY_DEPTH = _ra.SVN_RA_CAPABILITY_DEPTH
SVN_RA_CAPABILITY_MERGEINFO = _ra.SVN_RA_CAPABILITY_MERGEINFO
SVN_RA_CAPABILITY_LOG_REVPROPS = _ra.SVN_RA_CAPABILITY_LOG_REVPROPS
SVN_RA_CAPABILITY_PARTIAL_REPLAY = _ra.SVN_RA_CAPABILITY_PARTIAL_REPLAY
SVN_RA_CAPABILITY_COMMIT_REVPROPS = _ra.SVN_RA_CAPABILITY_COMMIT_REVPROPS
def svn_ra_print_modules(*args):
"""svn_ra_print_modules(svn_stringbuf_t output, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_print_modules, args)
def svn_ra_print_ra_libraries(*args):
"""svn_ra_print_ra_libraries(svn_stringbuf_t descriptions, void ra_baton, apr_pool_t pool) -> svn_error_t"""
return apply(_ra.svn_ra_print_ra_libraries, args)
class svn_ra_plugin_t:
"""Proxy of C svn_ra_plugin_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_plugin_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_plugin_t, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _ra.svn_ra_plugin_t_name_set
__swig_getmethods__["name"] = _ra.svn_ra_plugin_t_name_get
__swig_setmethods__["description"] = _ra.svn_ra_plugin_t_description_set
__swig_getmethods__["description"] = _ra.svn_ra_plugin_t_description_get
__swig_setmethods__["open"] = _ra.svn_ra_plugin_t_open_set
__swig_getmethods__["open"] = _ra.svn_ra_plugin_t_open_get
__swig_setmethods__["get_latest_revnum"] = _ra.svn_ra_plugin_t_get_latest_revnum_set
__swig_getmethods__["get_latest_revnum"] = _ra.svn_ra_plugin_t_get_latest_revnum_get
__swig_setmethods__["get_dated_revision"] = _ra.svn_ra_plugin_t_get_dated_revision_set
__swig_getmethods__["get_dated_revision"] = _ra.svn_ra_plugin_t_get_dated_revision_get
__swig_setmethods__["change_rev_prop"] = _ra.svn_ra_plugin_t_change_rev_prop_set
__swig_getmethods__["change_rev_prop"] = _ra.svn_ra_plugin_t_change_rev_prop_get
__swig_setmethods__["rev_proplist"] = _ra.svn_ra_plugin_t_rev_proplist_set
__swig_getmethods__["rev_proplist"] = _ra.svn_ra_plugin_t_rev_proplist_get
__swig_setmethods__["rev_prop"] = _ra.svn_ra_plugin_t_rev_prop_set
__swig_getmethods__["rev_prop"] = _ra.svn_ra_plugin_t_rev_prop_get
__swig_setmethods__["get_commit_editor"] = _ra.svn_ra_plugin_t_get_commit_editor_set
__swig_getmethods__["get_commit_editor"] = _ra.svn_ra_plugin_t_get_commit_editor_get
__swig_setmethods__["get_file"] = _ra.svn_ra_plugin_t_get_file_set
__swig_getmethods__["get_file"] = _ra.svn_ra_plugin_t_get_file_get
__swig_setmethods__["get_dir"] = _ra.svn_ra_plugin_t_get_dir_set
__swig_getmethods__["get_dir"] = _ra.svn_ra_plugin_t_get_dir_get
__swig_setmethods__["do_update"] = _ra.svn_ra_plugin_t_do_update_set
__swig_getmethods__["do_update"] = _ra.svn_ra_plugin_t_do_update_get
__swig_setmethods__["do_switch"] = _ra.svn_ra_plugin_t_do_switch_set
__swig_getmethods__["do_switch"] = _ra.svn_ra_plugin_t_do_switch_get
__swig_setmethods__["do_status"] = _ra.svn_ra_plugin_t_do_status_set
__swig_getmethods__["do_status"] = _ra.svn_ra_plugin_t_do_status_get
__swig_setmethods__["do_diff"] = _ra.svn_ra_plugin_t_do_diff_set
__swig_getmethods__["do_diff"] = _ra.svn_ra_plugin_t_do_diff_get
__swig_setmethods__["get_log"] = _ra.svn_ra_plugin_t_get_log_set
__swig_getmethods__["get_log"] = _ra.svn_ra_plugin_t_get_log_get
__swig_setmethods__["check_path"] | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for the speech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.python.ops import inplace_ops
from lingvo.core import base_encoder
from lingvo.core import base_layer
from lingvo.core import layers
from lingvo.core import plot
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import rnn_layers
from lingvo.core import summary_utils
from lingvo.core import model_helper
ConvLSTMBlock = collections.namedtuple('ConvLSTMBlock', ('rnn', 'cnn'))
class AsrEncoder(base_encoder.BaseEncoder):
"""Speech encoder version 1."""
@classmethod
def Params(cls):
"""Configs for AsrEncoder."""
p = super(AsrEncoder, cls).Params()
p.Define('lstm_tpl', rnn_cell.LSTMCellSimple.Params(),
'Configs template for the RNN layer.')
p.Define('cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the conv layer.')
p.Define('proj_tpl', layers.ProjectionLayer.Params(),
'Configs template for the projection layer.')
p.Define(
'highway_skip', False,
'If set, residual connections from different layers are gated. '
'Will only be used if residual_start is enabled.')
p.Define('highway_skip_tpl', layers.HighwaySkipLayer.Params(),
'Configs template for the highway skip layer.')
p.Define('conv_lstm_tpl', rnn_cell.ConvLSTMCell.Params(),
'Configs template for ConvLSTMCell.')
p.Define(
'after_conv_lstm_cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the cnn layer immediately follow the'
' convlstm layer.')
p.Define('conv_filter_shapes', None, 'Filter shapes for each conv layer.')
p.Define('conv_filter_strides', None, 'Filter strides for each conv layer.')
p.Define('input_shape', [None, None, None, None],
'Shape of the input. This should a TensorShape with rank 4.')
p.Define('lstm_cell_size', 256, 'LSTM cell size for the RNN layer.')
p.Define('num_cnn_layers', 2, 'Number of conv layers to create.')
p.Define('num_conv_lstm_layers', 1, 'Number of conv lstm layers to create.')
p.Define('num_lstm_layers', 3, 'Number of rnn layers to create')
p.Define('project_lstm_output', True,
'Include projection layer after each encoder LSTM layer.')
p.Define('pad_steps', 6,
'Extra zero-padded timesteps to add to the input sequence. ')
p.Define(
'residual_start', 0, 'Start residual connections from this lstm layer. '
'Disabled if 0 or greater than num_lstm_layers.')
p.Define('residual_stride', 1,
'Number of lstm layers to skip per residual connection.')
p.Define(
'bidi_rnn_type', 'func', 'Options: func, native_cudnn. '
'func: BidirectionalFRNN, '
'native_cudnn: BidirectionalNativeCuDNNLSTM.')
# TODO(yonghui): Maybe move those configs to a separate file.
# Set some reasonable default values.
#
# NOTE(yonghui): The default config below assumes the following encoder
# architecture:
#
# cnn/batch-norm/relu ->
# cnn/batch-norm/relu ->
# bidirectional conv-lstm ->
# cnn/batch-norm/relu
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm
#
# Default config for the rnn layer.
p.lstm_tpl.params_init = py_utils.WeightInit.Uniform(0.1)
# Default config for the convolution layer.
p.input_shape = [None, None, 80, 3]
p.conv_filter_shapes = [(3, 3, 3, 32), (3, 3, 32, 32)]
p.conv_filter_strides = [(2, 2), (2, 2)]
p.cnn_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ConvLayer yet (as of sep 22, 2016).
# Default config for the projection layer.
p.proj_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ProjectionLayer yet (as of sep 22, 2016).
p.conv_lstm_tpl.filter_shape = [1, 3] # height (time), width (frequency)
p.conv_lstm_tpl.inputs_shape = [None, None, None, None]
p.conv_lstm_tpl.cell_shape = [None, None, None, None]
p.conv_lstm_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
p.after_conv_lstm_cnn_tpl.filter_shape = [3, 3, None, None]
p.after_conv_lstm_cnn_tpl.params_init = (
py_utils.WeightInit.TruncatedGaussian(0.1))
p.after_conv_lstm_cnn_tpl.filter_stride = [1, 1]
return p
@base_layer.initializer
def __init__(self, params):
super(AsrEncoder, self).__init__(params)
p = self.params
assert p.packed_input is False, ('Packed inputs are not yet supported for '
'AsrEncoder.')
name = p.name
with tf.variable_scope(name):
# First create the conv layers.
assert p.num_cnn_layers == len(p.conv_filter_shapes)
assert p.num_cnn_layers == len(p.conv_filter_strides)
params_conv_layers = []
for i in range(p.num_cnn_layers):
conv_p = p.cnn_tpl.Copy()
conv_p.name = 'conv_L%d' % (i)
conv_p.filter_shape = p.conv_filter_shapes[i]
conv_p.filter_stride = p.conv_filter_strides[i]
conv_p.is_eval = p.is_eval
params_conv_layers.append(conv_p)
self.CreateChildren('conv', params_conv_layers)
conv_output_shape = tf.TensorShape(p.input_shape)
for i in range(p.num_cnn_layers):
conv_output_shape = self.conv[i].OutShape(conv_output_shape)
conv_output_shape = conv_output_shape.as_list()
assert len(conv_output_shape) == 4 # batch, height, width, channel.
params_conv_lstm_rnn = []
params_conv_lstm_cnn = []
for i in range(p.num_conv_lstm_layers):
# NOTE(yonghui): We assume that output from ConvLSTMBlock has the same
# shape as its input.
_, _, width, in_channel = conv_output_shape
f_conv_lstm_p = p.conv_lstm_tpl.Copy()
f_conv_lstm_p.name = 'f_conv_lstm_%d' % (i)
f_conv_lstm_p.inputs_shape = [None, 1, width, in_channel]
f_conv_lstm_p.cell_shape = [None, 1, width, in_channel]
b_conv_lstm_p = f_conv_lstm_p.Copy()
b_conv_lstm_p.name = 'b_conv_lstm_%d' % (i)
conv_lstm_rnn_p = self.CreateConvLstmLayerParams()
conv_lstm_rnn_p.name = 'conv_lstm_rnn'
conv_lstm_rnn_p.fwd = f_conv_lstm_p
conv_lstm_rnn_p.bak = b_conv_lstm_p
params_conv_lstm_rnn.append(conv_lstm_rnn_p)
cnn_p = p.after_conv_lstm_cnn_tpl.Copy()
cnn_p.name = 'conv_lstm_cnn_%d' % (i)
cnn_p.filter_shape[2] = 2 * in_channel
cnn_p.filter_shape[3] = in_channel
params_conv_lstm_cnn.append(cnn_p)
# TODO(yonghui): Refactor ConvLSTMBlock into a layer.
self.CreateChildren('conv_lstm_rnn', params_conv_lstm_rnn)
self.CreateChildren('conv_lstm_cnn', params_conv_lstm_cnn)
(self._first_lstm_input_dim,
self._first_lstm_input_dim_pad) = self.FirstLstmLayerInputDimAndPadding(
conv_output_shape, pad_to_multiple=16)
# Now create all the rnn layers and projection layers.
# TODO(yonghui): take care of device placement.
params_rnn_layers = []
params_proj_layers = []
params_highway_skip_layers = []
for i in range(p.num_lstm_layers):
if i == 0:
input_dim = self._first_lstm_input_dim
else:
input_dim = 2 * p.lstm_cell_size
forward_p = p.lstm_tpl.Copy()
forward_p.name = 'fwd_rnn_L%d' % (i)
forward_p.num_input_nodes = input_dim
forward_p.num_output_nodes = p.lstm_cell_size
backward_p = forward_p.Copy()
backward_p.name = 'bak_rnn_L%d' % (i)
rnn_p = self.CreateBidirectionalRNNParams(forward_p, backward_p)
rnn_p.name = 'brnn_L%d' % (i)
params_rnn_layers.append(rnn_p)
if p.project_lstm_output and (i < p.num_lstm_layers - 1):
proj_p = p.proj_tpl.Copy()
proj_p.input_dim = 2 * p.lstm_cell_size
proj_p.output_dim = 2 * p.lstm_cell_size
proj_p.name = 'proj_L%d' % (i)
proj_p.is_eval = p.is_eval
params_proj_layers.append(proj_p)
# add the skip layers
residual_index = i - p.residual_start + 1
if p.residual_start > 0 and residual_index >= 0 and p.highway_skip:
highway_skip = p.highway_skip_tpl.Copy()
highway_skip.name = 'enc_hwskip_%d' % len(params_highway_skip_layers)
highway_skip.input_dim = 2 * p.lstm_cell_size
params_highway_skip_layers.append(highway_skip)
self.CreateChildren('rnn', params_rnn_layers)
self.CreateChildren('proj', params_proj_layers)
self.CreateChildren('highway_skip', params_highway_skip_layers)
@property
def _use_functional(self):
return True
def CreateBidirectionalRNNParams(self, forward_p, backward_p):
return model_helper.CreateBidirectionalRNNParams(self.params, forward_p,
backward_p)
def CreateConvLstmLayerParams(self):
return rnn_layers.BidirectionalFRNN.Params()
def FirstLstmLayerInputDimAndPadding(self,
conv_output_shape,
pad_to_multiple=16):
lstm_input_shape = conv_output_shape
# Makes sure the lstm input dims is multiple of 16 (alignment
# requirement from FRNN).
first_lstm_input_dim_unpadded = lstm_input_shape[2] * lstm_input_shape[3]
if self._use_functional and (first_lstm_input_dim_unpadded % pad_to_multiple
!= 0):
first_lstm_input_dim = int(
(first_lstm_input_dim_unpadded + pad_to_multiple - 1) /
pad_to_multiple) * pad_to_multiple
else:
first_lstm_input_dim = first_lstm_input_dim_unpadded
first_lstm_input_dim_padding = (
first_lstm_input_dim - first_lstm_input_dim_unpadded)
return first_lstm_input_dim, first_lstm_input_dim_padding
@property
def supports_streaming(self):
return False
def zero_state(self, batch_size):
return py_utils.NestedMap()
def FProp(self, theta, batch, state0=None):
"""Encodes source as represented by 'inputs' and 'paddings'.
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
batch: A NestedMap with fields:
src_inputs - The inputs tensor. It is expected to be of shape [batch,
time, feature_dim, channels].
paddings - The paddings tensor. It is expected to be of shape [batch,
time].
state0: Recurrent input state. Not supported/ignored by this encoder.
Returns:
(outputs, out_paddings, state1) tuple. Outputs is of the shape
[time, batch, depth], and out_paddings is of the shape [time, batch]
"""
p = self.params
inputs, paddings = batch.src_inputs, batch.paddings
with tf.name_scope(p.name):
# Add a few extra padded timesteps at the end. This is for ensuring the
# correctness of the conv-layers at the edges.
if p.pad_steps > 0:
# inplace_update() is not supported by TPU for now. Since we have done
# padding on the input_generator, we may avoid this additional padding.
assert not py_utils.use_tpu()
inputs_pad = tf.zeros(
inplace_ops.inplace_update(tf.shape(inputs), 1, p.pad_steps),
inputs.dtype)
paddings_pad = tf.ones(
inplace_ops.inplace_update(tf.shape(paddings), 1, p.pad_steps),
paddings.dtype)
inputs = tf.concat([inputs, inputs_pad], 1, name='inputs')
paddings = tf.concat([paddings, paddings_pad], 1)
def ReshapeForPlot(tensor, padding, name):
"""Transposes and flattens channels to [batch, dim, seq_len] shape."""
# Flatten any dimensions beyond the third into the third.
batch_size = tf.shape(tensor)[0]
max_len = tf.shape(tensor)[1]
plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1])
plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name)
return (plot_tensor, summary_utils.SequenceLength(padding))
plots = [
ReshapeForPlot(
tf.transpose(inputs, [0, 1, 3, 2]), paddings, 'inputs')
]
conv_out = inputs
out_padding = paddings
for i, conv_layer in enumerate(self.conv):
conv_out, out_padding = conv_layer.FProp(theta.conv[i], conv_out,
out_padding)
plots.append(
ReshapeForPlot(
tf.transpose(conv_out, [0, 1, 3, 2]), out_padding,
'conv_%d_out' % i))
def TransposeFirstTwoDims(t):
first_dim = tf.shape(t)[0]
second_dim = tf.shape(t)[1]
t_new = tf.transpose(
tf.reshape(t, [first_dim, second_dim, -1]), | |
import requests
import os
import logging
import pylast as pyl
from typing import List, Tuple, Optional, Dict, Union, Set, Generator
from yaml import safe_load
from fuzzywuzzy.fuzz import UWRatio
from fuzzywuzzy.process import extractOne
from .configuration import config
from .state import GenreDataState
log = logging.getLogger(__name__)
######
# genres.py
# Genre search is inspied by beets' LastGenre plugin (and actually uses its genre tree).
######
# LastFM-extended-scrobbles uses beetbox/beets' genres tree.
# Licensed under MIT
# Thank you Beets <3
BEETS_GENRES_TREE_SRC = "https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres-tree.yaml"
BEETS_GENRES_LIST_SRC = "https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres.txt"
BEETS_LICENSE_SRC = "https://raw.githubusercontent.com/beetbox/beets/master/LICENSE"
BEETS_GENRES_TREE_PATH = os.path.join(config.CACHE_DIR, "genres-tree.yaml")
BEETS_GENRES_LIST_PATH = os.path.join(config.CACHE_DIR, "genres.txt")
BEETS_LICENSE_PATH = os.path.join(config.CACHE_DIR, "LICENSE_BEETS_GENRES.md")
PYLAST_CACHING_FILE = os.path.join(config.CACHE_DIR, "pylast_cache")
lastfm = pyl.LastFMNetwork(
api_key=config.LASTFM_API_KEY,
api_secret=config.LASTFM_API_SECRET,
)
lastfm.enable_caching(PYLAST_CACHING_FILE)
lastfm.enable_rate_limit()
class Genre:
# DEPRECATED the genre tree is unused
__slots__ = ("name", "parent_genre", "is_leaf", "tree_depth")
def __init__(self, genre_name: str, parent_genre: "Genre", is_leaf: bool, tree_depth: int):
self.name = genre_name
self.parent_genre = parent_genre
self.is_leaf = is_leaf
self.tree_depth = tree_depth
def __str__(self):
return f"<Genre \"{self.name}\" " \
f"parent=\"{self.parent_genre.name if self.parent_genre is not None else 'None'}\" " \
f"is_leaf={self.is_leaf} " \
f"depth={self.tree_depth}>"
def parents(self) -> List["Genre"]:
"""
Return a list of parent nodes, ordered from closest to root.
Returns:
A list of Genre instances, which are parents of the passed node, ordered from closest to root.
"""
parents = []
current: Genre = self.parent_genre
while current is not None:
parents.append(current)
current = current.parent_genre
return parents
def download_genre_data() -> None:
"""
Downloads the genre data from beets' GitHub repository. Saves into the configurable cache directory.
"""
log.info("Downloading genre data...")
log.debug(f"Downloading genre list from {BEETS_GENRES_LIST_SRC}")
resp_list = requests.get(BEETS_GENRES_LIST_SRC)
with open(BEETS_GENRES_LIST_PATH, "wb") as genre_list_f:
for chunk in resp_list.iter_content(chunk_size=256):
genre_list_f.write(chunk)
# DEPRECATED the genre tree is unused
log.debug(f"Downloading genre tree from {BEETS_GENRES_TREE_SRC}")
resp_tree = requests.get(BEETS_GENRES_TREE_SRC)
with open(BEETS_GENRES_TREE_PATH, "wb") as genre_tree_f:
for chunk in resp_tree.iter_content(chunk_size=256):
genre_tree_f.write(chunk)
log.debug(f"Downloading Beets' MIT LICENSE notice from {BEETS_LICENSE_SRC}")
resp_license = requests.get(BEETS_LICENSE_SRC)
with open(BEETS_LICENSE_PATH, "wb") as genre_license_f:
for chunk in resp_license.iter_content(chunk_size=256):
genre_license_f.write(chunk)
log.info("Genre data downloaded.")
# def _load_genre_tree(genre_tree_raw: dict, output_list: List[Genre],
# _parent_node: Genre = None, _depth: int = 0) -> None:
# # TODO this is actually not used yet, make this an option
# # (allow for most-specific results)
# # DEPRECATED the genre tree is unused
# """
# Generate a list of Genre instances.
#
# Args:
# genre_tree_raw:
# Raw genre tree data.
# output_list:
# Due to the nature of lists, this is the list of Genre instances will end up in.
# _parent_node:
# Don't use this directly, it is used for recursion. Contains the parent node for the current subtree.
# """
# if type(genre_tree_raw) is list:
# for element in genre_tree_raw:
# # By checking if we're about to reach the end, we should save some time by directly appending
# # instead of recursively calling for a simple string
# if type(element) is str:
# # This is always a leaf!
# genre_instance = Genre(element, _parent_node, True, _depth)
# output_list.append(genre_instance)
# # Otherwise just recursively call with the subtree
# elif type(element) is dict:
# _load_genre_tree(element, output_list, _parent_node, _depth)
#
# elif type(genre_tree_raw) is dict:
# for sub_tree_key in genre_tree_raw.keys():
# # For each key, instantiate a Genre and pass it down the recursion
# genre_instance = Genre(sub_tree_key, _parent_node, False, _depth)
# output_list.append(genre_instance)
#
# _load_genre_tree(genre_tree_raw[sub_tree_key], output_list, genre_instance, _depth + 1)
def load_genre_data(state: GenreDataState) -> None:
"""
Load the cached genre data. Updates the GenreDataState instance.
Args:
state:
GenreDataState instance.
"""
log.info("Loading genre data...")
with open(BEETS_GENRES_LIST_PATH, "r", encoding="utf8") as genre_list_f:
genres_list_ = genre_list_f.read().splitlines()
genres_list_ = [a.title() for a in genres_list_]
# DEPRECATED the genre tree is unused
# with open(BEETS_GENRES_TREE_PATH, "r", encoding="utf8") as genre_tree_f:
# genres_tree_ = safe_load(genre_tree_f)
# Flatten the tree into a dictionary
# list_of_genre_instances: List[Genre] = []
# _load_genre_tree(genres_tree_, list_of_genre_instances)
# Create a quick access list with just genre names as strings
# list_of_genre_names = [a.name for a in list_of_genre_instances]
log.info("Genre data loaded.")
# return genres_list_, list_of_genre_instances, list_of_genre_names
genre_state.full_genre_list = genres_list_
# On startup:
# If needed, download the genres tree & list from beets' GitHub repository
if not os.path.isfile(BEETS_GENRES_LIST_PATH):
download_genre_data()
# DEPRECATED
# full_genres_list: List[str]
# genres_list: List[Genre]
# genres_name_list: List[str]
# DEPRECATED the genre tree is unused
# full_genres_list, genres_list, genres_name_list = load_genre_data()
# Load the genre data globally
genre_state: GenreDataState = GenreDataState()
load_genre_data(genre_state)
######
# Caching
######
cached_tags: Dict[Tuple[str, str, str], Optional[List[str]]] = {}
# Caching decorator
def cache_tag_results(func):
"""
This function is a decorator for the fetch_genre_by_mbid/metadata functions.
Caches the results in cached_tags dictionary to speed up subsequent lookups.
"""
def wrapper(arg_1, arg_2, arg_3, *rest):
# Return the cached result if possible
cache_tuple = (arg_1, arg_2, arg_3)
if cache_tuple in cached_tags:
log.debug(f"{func.__name__}: cache hit")
return cached_tags[cache_tuple]
# Otherwise call the function and cache its result
log.debug(f"{func.__name__}: cache miss")
result = func(arg_1, arg_2, arg_3, *rest)
cached_tags[cache_tuple] = result
return result
return wrapper
# def _get_tag_depth(tag: str) -> int:
# # DEPRECATED this function was meant for the genre tree system, but is unused
# if tag not in genres_name_list:
# return -1
#
# return genres_list[genres_name_list.index(tag)].tree_depth
def _filter_top_tags(tag_list: List[pyl.TopItem]) -> List[str]:
"""
Given a list of pylast.TopItem tags, filter them by min_genre_weight, deduplicate them and sort them by weight.
Keeps only valid genres.
Args:
tag_list:
A list of pylast.TopItem tags.
Returns:
Sorted and deduplicated list of genre names.
"""
# Now we merge and filter the tags
# noinspection PyUnresolvedReferences
merged_tags: List[Tuple[str, int]] = [
(a.item.name, int(a.weight)) for a in
tag_list
if int(a.weight) > config.MIN_TAG_WEIGHT
]
# Sort by popularity and deduplicate
sorted_tags_str: Set[str] = set([a[0] for a in sorted(merged_tags, key=lambda e: e[1])])
# Now filter with the genre whitelist
filtered_tags: List[str] = [
tag.title() for tag in list(sorted_tags_str) if tag.title() in genre_state.full_genre_list
]
# Shorten the list to max_genre_count (see config.toml)
return filtered_tags[:config.MAX_GENRE_COUNT]
def _parse_lastfm_track_genre(
track: Optional[pyl.Track],
album: Optional[pyl.Album],
artist: pyl.Artist,
) -> List[str]:
"""
Extract genre tags from Last.fm's tag system.
Args:
track:
pylast.Track instance to search for tags on
album:
pylast.Album instance to search for tags on
artist:
pylast.Artist instance to search for tags on
Returns:
List of strings, representing the best choices for genres.
Length depends on max_genre_count config value.
"""
# Uses the most accurate genres
# Tries the track first, then the album, then finally the artist if we still don't have enough genres
# If enough tags are in the track and album, the artist tags are not even downloaded.
final_tag_list: List[str] = []
if track is not None:
track_tags_raw: List[pyl.TopItem] = track.get_top_tags(limit=config.MAX_GENRE_COUNT)
track_tags: List[str] = _filter_top_tags(track_tags_raw)
final_tag_list += track_tags
if album is not None and len(final_tag_list) < config.MAX_GENRE_COUNT:
album_tags_raw: List[pyl.TopItem] = album.get_top_tags(limit=config.MAX_GENRE_COUNT)
album_tags: List[str] = _filter_top_tags(album_tags_raw)
final_tag_list += album_tags
if len(final_tag_list) < config.MAX_GENRE_COUNT:
artist_tags_raw: List[pyl.TopItem] = artist.get_top_tags(limit=config.MAX_GENRE_COUNT)
artist_tags: List[str] = _filter_top_tags(artist_tags_raw)
final_tag_list += artist_tags
return final_tag_list[:config.MAX_GENRE_COUNT]
def _search_page_gen(
pylast_search: Union[pyl.AlbumSearch, pyl.TrackSearch, pyl.ArtistSearch],
page_limit: int = config.MAX_LASTFM_PAGES
) -> Generator[List[Union[pyl.Album, pyl.Track, pyl.Artist]], None, None]:
"""
Fetch results from a pylast AlbumSearch/TrackSearch/ArtistSearch object.
Args:
pylast_search:
pylast.*Search object to fetch pages for
page_limit:
Hard page limit.
Returns:
A generator, returns next page (list) of corresponding pylast results
(pylast.Album for pylast.AlbumSearch, ...) on each yield.
"""
counter = 0
last = pylast_search.get_next_page()
while len(last) > 0 and counter < page_limit:
counter += 1
yield pylast_search.get_next_page()
@cache_tag_results
def fetch_genre_by_mbid(track_mbid: str, album_mbid: str, artist_mbid: str) -> Optional[List[str]]:
"""
Given a track, album and artist MBID, find the corresponding Last.fm entries.
This function is cached using a combination of all three arguments.
Args:
track_mbid:
String with the track's MBID.
album_mbid:
String with the album's MBID.
artist_mbid:
String with the artist's MBID.
Returns:
List of strings containing title-cased genre names.
None if no result.
"""
try:
track: pyl.Track = lastfm.get_track_by_mbid(track_mbid)
album: pyl.Album = lastfm.get_album_by_mbid(album_mbid)
artist: pyl.Artist = lastfm.get_artist_by_mbid(artist_mbid)
return _parse_lastfm_track_genre(track, album, artist)
except pyl.WSError:
# No result
return None
@cache_tag_results
def fetch_genre_by_metadata(track_title: str, album_title: str, artist_name: str) -> Optional[List[str]]:
"""
Given a track, album and artist name, find the corresponding Last.fm entries.
This function is cached using a combination of all three arguments.
Args:
track_title:
String with the track's title.
album_title:
String with the album's title.
artist_name:
String with the artist's name.
Returns:
List of strings containing title-cased genre names.
None if no result.
"""
try:
# Fetch just one page, we don't need more
# TODO can this cause problems when an artist has multiple tracks with the same title?
# Can we even solve this - pylast.Track has no album data?
track: Optional[pyl.Track] = None
if track_title is not None and len(track_title) > 0:
try:
track_search: | |
# Input Optimization Algorithm
# ReverseLearning, 2017
# Import dependencies
import tensorflow as tf
import numpy as np
from time import time
import pandas
# Suppress warnings
from warnings import filterwarnings
filterwarnings("ignore")
class IOA:
def __init__(self, model, ins, tensorBoardPath = None):
# The model to calculate outputs with
self.model = model
# Number of inputs
self.ins = ins
# Path to save tensorboard files (optional)
self.tbPath = tensorBoardPath
def clean(self, inp):
"""
Cleans an input
TODO: Implememnt
"""
return inp
def optimize(self, target, epochs = 1000, learn_rate = .01, debug = False,
loss_function="absolute_distance", restrictions = {}, debug_interval = -1,
error_tolerance = None, rangeGradientScalar = 10e10, gradientTolerance = 0.0,
startPreset = [], returnDigest = False, digestInterval = 1, title = ""):
"""
Applies the Input Backprop Algorithm and returns an input with
a target output
Parameters:
target : target value
- "max" for maximum value
- "min" for minimum value
epochs : number of epochs to run (DEFAULT 1000)
- (-1) runs until error is below learning rate
loss_function : loss function to use (DEFAULT absolute distance)
- "absolute_distance" : absolute difference between label and output
- "cross_entropy" : cross entropy function
- "quadratic_distance" : absolute distance squared
debug : on / off debug mode
restrictions : a dictionary of range and type restrictions for the optimal
- Format: {index0 : restriction0, ..., indexN : restrictionN}
- For constant value: restrictionX = value
- For range: restricionX = (lower, upper)
debug_interval : number of epochs between each debug statement
- Use a negative number to only print ending statement
error_tolerance : the largest acceptable error before auto-breaking
(default is learn_rate)
rangeGradientScalar : scalar for the gradients of the range-restricted vars
"""
# Start a tensorflow session
sess = tf.Session()
# Reason for breaking the loop (zero grad, finished epocs, etc.)
breakReason = None
# Initialize returnDigest
digest = []
# Clean inputs
# Ensure thar epochs is an integer
epochs = int(epochs)
# Ensure that learning rate is a float
learn_rate = float(learn_rate)
# If the target us a string (ie. max or min), make sure all lowercase and valid
# Otherwise clean input
if type(target) == str:
target = target.lower()
if target not in ["max", "min"]: raise ValueError("'{0}' is not a valid target".format(target))
else:
try: target = self.clean(target)
except: raise ValueError("'{0}' is not a valid target".format(target))
# If the error tolerance wasn't set, set it to the learning rate
if error_tolerance == None: error_tolerance = learn_rate
# Chck for valid starting preset
if len(startPreset) != self.ins and startPreset != []:
raise ValueError("{0} is not a valid starting preset".format(startPreset))
# Get and format the range-restricted restrictions
rangeRestrictedIndexes = []
for i in restrictions.keys():
if type(restrictions[i]) in [list, tuple]:
rangeRestrictedIndexes.append(i)
# - DEFINE PARAMETERS -
# Input
# Start with mode set by startMode
if startPreset == []:
startOptimal = [[tf.Variable(0.0, name = "StartOptimal-{0}".format(i))
for i in range(self.ins)]]
else:
startOptimal = [[tf.Variable(float(i), name = "StartOptimal-{0}".format(i))
for i in startPreset]]
# Apply constant restrictions to startOptimal and collect restricted vars
rangeRestrictedVars = []
for k in restrictions.keys():
if type(restrictions[k]) in [float, int]:
# Apply constant
startOptimal[0][k] = tf.constant(float(restrictions[k]))
elif type(restrictions[k]) in [list, tuple]:
rangeRestrictedVars.append(startOptimal[0][k])
# Get the range-restriction vectors for startOptimal
rangeRestrictedVectors = self._getRestrictionVectors(restrictions, startOptimal)
# Tensor of the lowest gradients for use when checking if max / min / best is found
lowGrad = self._getLowGrad(startOptimal, gradientTolerance)
# Finalize optimal
optimal = self._applyRestrictionVector(startOptimal, rangeRestrictedVectors)
if self.tbPath != None:
for i in range(len(optimal[0])):
tf.summary.scalar("Optimal-{0}".format(i), optimal[0][i])
# Calculate output from the model (restrictions applied)
out = self.model(optimal)
if self.tbPath != None: tf.summary.histogram("Output", out)
# Target label
# If the target is max or min, don't set label
if target in ["max", "min"]: label = None
else: label = tf.constant(target)
# Loss function
loss = self._getLossFunction(loss_function, target, label, out)
if self.tbPath != None: tf.summary.histogram("Loss", loss)
# Get variables (exclude constants)
vlist = self._getVarList(startOptimal)
# End if there are no variables to optimize
if len(vlist) == 0:
final = self._evalOptimal(optimal, sess)
sess.close()
return final
# Create an optimizer of the given learning rate
optimizer = tf.train.ProximalGradientDescentOptimizer(learn_rate)
# Get the gradients from the loss function for each variable
gradients = optimizer.compute_gradients(loss, var_list = vlist)
if self.tbPath != None: tf.summary.histogram("OriginalGradients", gradients)
# Raise range-restricted variables
newGrads = [self._raiseGrad(g, rangeGradientScalar)
if g[1] in rangeRestrictedVars else g
for g in gradients]
if self.tbPath != None: tf.summary.histogram("NewGradients", newGrads)
# Gradient application
applyNewGrads = optimizer.apply_gradients(newGrads, name = "ApplyGradients")
# Get the absolute error
if target in ["max", "min"]:
absoluteError = tf.constant(0.0)
else:
absoluteError = tf.abs(tf.subtract(label, out), name = "AbsoluteError")
# Summarize the error
if self.tbPath != None: tf.summary.scalar("error", absoluteError)
# Merge and create FileWriter for TensorBoard
if self.tbPath != None:
mergeTensorBoard = tf.summary.merge_all()
writer = tf.summary.FileWriter(self.tbPath, sess.graph)
# Initialize the computation graph
sess.run(tf.global_variables_initializer())
# - TRAIN -
# A counter for counting epochs
counter = 0
# If debug is on, print intial debug report
if debug and debug_interval > 0:
self._printDebugStatus(sess, epochs = counter, startOptimal = startOptimal, optimal = optimal)
# The main traing loop
while True:
# Start timer (for DEBUG profiling)
time0 = time()
# Break if error is 0 or within learning rate of zero
absoluteErrorEvaluated = absoluteError.eval(session = sess)
# If absolute error is a single number, put it in a list
if type(absoluteErrorEvaluated) not in [list, tuple, np.ndarray]:
absoluteErrorEvaluated = [absoluteErrorEvaluated]
if sum(absoluteErrorEvaluated) <= error_tolerance \
and target not in ["max", "min"]:
breakReason = "Beat Error"
break
# Break if gradients are all zero
gradCheck = self._checkGradients(newGrads, lowGrad, sess)
if gradCheck:
breakReason = gradCheck
break
# Break if epochs limit reached
if counter >= epochs and epochs != -1:
breakReason = "Epoch Limit Reached"
break
# Apply training step to find optimal
sess.run(applyNewGrads)
# Increment counter
counter += 1
# Write summaries
if self.tbPath != None:
writer.add_summary(sess.run(mergeTensorBoard), counter)
# Debug printing
if counter % debug_interval == 0 and debug and debug_interval > 0:
# Dont show error for max and min
if target == "max" or target == "min": absErrorDebug = None
else: absErrorDebug = absoluteErrorEvaluated
self._printDebugStatus(sess, epochs = counter, startOptimal = startOptimal,
optimal = optimal, absoluteError = absErrorDebug,
timer = time() - time0, gradients = newGrads)
if counter % digestInterval == 0:
if target == "max" or target == "min": absErrorDebug = None
else: absErrorDebug = absoluteErrorEvaluated
# Add to digest
digest = self._addDigest(digest, sess, epochs = counter, startOptimal = startOptimal,
optimal = optimal, absoluteError = absErrorDebug,
timer = time() - time0, gradients = newGrads)
# Print final digest
if debug:
# Print title
print("\n{0}".format(title))
# Print final optimal (remove list endings if a single number)
evalOpt = [i.eval(session = sess) for i in optimal[0]]
if len(evalOpt) > 1:
print("OPTIMAL INPUT :: {0}".format(evalOpt))
else:
print("OPTIMAL INPUT :: {0}".format(evalOpt[0]))
# Print the calculated output (remove list endings if a single number)
calcOut = self.model(optimal).eval(session = sess)
if type(calcOut) in [list, tuple, np.ndarray, np.array]:
if len(calcOut) > 1:
print("CALCULATED OUT :: {0}".format(calcOut[0]))
else:
if type(calcOut[0]) in [list, tuple, np.ndarray, np.array]:
print("CALCULATED OUT :: {0}".format(calcOut[0][0]))
else:
print("CALCULATED OUT :: {0}".format(calcOut[0]))
else:
print("CALCULATED OUT :: {0}".format(calcOut))
# Print target
if label != None:
print("TARGET OUT :: {0}".format(label.eval(session = sess)))
err = absoluteError.eval(session=sess)
if type(err) in [list, tuple, np.ndarray]:
if len(err) > 1:
print("ERROR :: {0}".format(err))
print("TOTAL ERROR :: {0}".format(sum(err)))
else:
print("ERROR :: {0}".format(err[0]))
elif target in ["min", "max"]:
print("TARGET OUT :: {0}".format(target))
else:
print("ERROR :: {0}".format(err))
print("EPOCHS :: {0} ({1})".format(counter, breakReason))
# Don't repeat final data point it digest
if counter % debug_interval != 0:
# Dont show error for max and min
if target == "max" or target == "min": absErrorDebug = None
else:
absErrorDebug = absoluteErrorEvaluated
# Add to digest
digest = self._addDigest(digest, sess, epochs=counter, startOptimal=startOptimal,
optimal=optimal, absoluteError=absErrorDebug,
timer=time() - time0, gradients=newGrads)
# Finalize the optimal solution
final = self._evalOptimal(optimal, sess)
# Close the session, free the memory
sess.close()
# Return the final solution
if returnDigest:
return | |
<gh_stars>1-10
import re
import collections
import collections.abc
from copy import copy
from pathlib import Path
from numbers import Number
from operator import truediv
from itertools import chain, repeat, accumulate
from typing import Any, Dict, List, Tuple, Optional, Sequence, Hashable, Iterator, Union, Type, Set, Callable
from coba.backports import Literal
from coba.contexts import CobaContext
from coba.exceptions import CobaException
from coba.utilities import PackageChecker
from coba.pipes import Pipes, Sink, Source, JsonEncode, JsonDecode, DiskSource, DiskSink, ListSource, ListSink, Foreach
class Table:
"""A container class for storing tabular data."""
def __init__(self, name:str, primary_cols: Sequence[str], rows: Sequence[Dict[str,Any]], preferred_cols: Sequence[str] = []):
"""Instantiate a Table.
Args:
name: The name of the table. Used for display purposes.
primary_cols: Table columns used to make each row's tuple "key".
rows: The actual rows that should be stored in the table. Each row is required to contain the given primary_cols.
preferred_cols: A list of columns that we prefer be displayed immediately after primary columns. All remaining
columns (i.e., neither primary nor preferred) will be ordered alphabetically.
"""
self._name = name
self._primary = primary_cols
for row in rows:
assert len(row.keys() & primary_cols) == len(primary_cols), 'A Table row was provided without a primary key.'
all_columns: Set[str] = set()
for row in rows:
all_columns |= {'index'} if '_packed' in row else set()
all_columns |= row.keys()-{'_packed'}
all_columns |= all_columns.union(row.get('_packed',{}).keys())
col_priority = list(chain(primary_cols + ['index'] + preferred_cols + sorted(all_columns)))
self._columns = sorted(all_columns, key=col_priority.index)
self._rows_keys: List[Hashable ] = []
self._rows_flat: Dict[Hashable, Dict[str,Any]] = {}
self._rows_pack: Dict[Hashable, Dict[str,Any]] = {}
for row in rows:
row_key = row[primary_cols[0]] if len(primary_cols) == 1 else tuple(row[col] for col in primary_cols)
row_pack = row.pop('_packed',{})
row_flat = row
if row_pack:
row_pack['index'] = list(range(1,len(list(row_pack.values())[0])+1))
self._rows_keys.append(row_key)
self._rows_pack[row_key] = row_pack
self._rows_flat[row_key] = row_flat
self._rows_keys = sorted(self._rows_keys)
@property
def name(self) -> str:
"""The name of the table."""
return self._name
@property
def keys(self) -> Sequence[Hashable]:
"""Keys for accessing data in the table."""
return self._rows_keys
@property
def columns(self) -> Sequence[str]:
"""The columns in the table."""
return self._columns
@property
def dtypes(self) -> Sequence[Type[Union[int,float,bool,object]]]:
"""The dtypes for the columns in the table."""
flats = self._rows_flat
packs = self._rows_pack
columns_packed = [ any([ col in packs[key] for key in self.keys]) for col in self.columns ]
columns_values = [ [flats[key].get(col, packs[key].get(col, self._default(col))) for key in self.keys] for col in self.columns ]
return [ self._infer_type(column_packed, column_values) for column_packed, column_values in zip(columns_packed,columns_values)]
def filter(self, row_pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Table':
"""Filter to specific rows.
Args:
pred: A predicate that returns true for row dictionaries that should be kept.
kwargs: key value pairs where the key is the column and the value indicates what
value a row should have in that column to be kept. Keeping logic depends on
the row value type and the kwargs value type. If kwarg value == row value keep
the row. If kwarg value is callable pass the row value to the predicate. If
kwarg value is a collection keep the row if the row value is in the collection.
If kwarg value is a string apply a regular expression match to the row value.
"""
def satisifies_filter(col_filter,col_value):
if col_filter == col_value:
return True
if isinstance(col_filter,Number) and isinstance(col_value,str):
return re.search(f'(\D|^){col_filter}(\D|$)', col_value)
if isinstance(col_filter,str) and isinstance(col_value,str):
return re.search(col_filter, col_value)
if callable(col_filter):
return col_filter(col_value)
return False
def satisfies_all_filters(key):
row = self[key]
row_filter_results = [ row_pred is None or row_pred(row) ]
col_filter_results = [ ]
for col,col_filter in kwargs.items():
if isinstance(col_filter,collections.abc.Container) and not isinstance(col_filter,str):
col_filter_results.append(row[col] in col_filter or any([satisifies_filter(cf,row[col]) for cf in col_filter]))
else:
col_filter_results.append(satisifies_filter(col_filter,row.get(col,self._default(col)) ))
return all(row_filter_results+col_filter_results)
new_result = copy(self)
new_result._rows_keys = list(filter(satisfies_all_filters,self.keys))
return new_result
def to_pandas(self) -> Any:
"""Turn the Table into a Pandas data frame."""
PackageChecker.pandas("Table.to_pandas")
import pandas as pd #type: ignore
import numpy as np #type: ignore #pandas installs numpy so if we have pandas we have numpy
col_numpy = { col: np.empty(len(self), dtype=dtype) for col,dtype in zip(self.columns,self.dtypes)}
row_index = 0
for key in self.keys:
flat = self._rows_flat[key]
pack = self._rows_pack[key]
pack_size = 1 if not pack else len(pack['index'])
for col in self.columns:
if col in pack:
val = pack[col]
elif col in flat:
if isinstance(flat[col], (tuple,list)):
val = [flat[col]]
else:
val = flat[col]
else:
val = self._default(col)
col_numpy[col][row_index:(row_index+pack_size)] = val
row_index += pack_size
return pd.DataFrame(col_numpy, columns=self.columns)
def to_tuples(self) -> Sequence[Tuple[Any,...]]:
"""Turn the Table into a sequence of tuples."""
tuples = []
for key in self.keys:
flat = self._rows_flat[key]
pack = self._rows_pack[key]
if not pack:
tuples.append(tuple(flat.get(col,self._default(col)) for col in self.columns))
else:
tuples.extend(list(zip(*[pack.get(col,repeat(flat.get(col,self._default(col)))) for col in self.columns])))
return tuples
def _default(self, column:str) -> Any:
return [1] if column == "index" else None
def _infer_type(self, is_packed: bool, values: Sequence[Any]) -> Type[Union[int,float,bool,object]]:
types: List[Optional[Type[Any]]] = []
to_type = lambda value: None if value is None else type(value)
for value in values:
if is_packed and isinstance(value, (list,tuple)):
types.extend([to_type(v) for v in value])
else:
types.append(to_type(value))
return self._resolve_types(types)
def _resolve_types(self, types: Sequence[Optional[Type[Any]]]) -> Type[Union[int,float,bool,object]]:
types = list(set(types))
if len(types) == 1 and types[0] in [dict,str]:
return object
if len(types) == 1 and types[0] in [int,float,bool]:
return types[0]
if all(t in [None,int,float] for t in types):
return float
return object
def __iter__(self) -> Iterator[Dict[str,Any]]:
for key in self.keys:
yield self[key]
def __contains__(self, key: Union[Hashable, Sequence[Hashable]]) -> bool:
return key in self.keys
def __str__(self) -> str:
return str({"Table": self.name, "Columns": self.columns, "Rows": len(self)})
def _ipython_display_(self):
#pretty print in jupyter notebook (https://ipython.readthedocs.io/en/stable/config/integrating.html)
print(str(self))
def __len__(self) -> int:
return sum([ len(self._rows_pack[key].get('index',[None])) for key in self.keys ])
def __getitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> Dict[str,Any]:
if key not in self.keys: raise KeyError(key)
return dict(**self._rows_flat[key], **self._rows_pack[key])
class InteractionsTable(Table):
def to_progressive_lists(self, span: int = None, each: bool = False, ord_col: str = "index", val_col: str = "reward"):
"""Return expanding or exponential averages for col grouped by learner and possibly environment.
Args:
span: If span is None return an expanding average (i.e., progressive validation). If span is not none
calculate a simple moving average with window size of span (window will be smaller than span initially).
each: If true the group by learner and environment (as in each environment). If each is false
then only group by learner.
ord_col: The column which indicates the order in which averaging is calculated on the val_col.
val_col: The column we wish to calculate the progressive average values for.
Returns:
Either [[learner_id, col progressive means...],...] or [[learner_id, environment_id, col progressive means...],...].
"""
lrn_sim_rows = []
for interactions in self:
values = interactions[val_col]
orders = interactions[ord_col]
values = [ v[1] for v in sorted(zip(orders,values)) ]
if span is None or span >= len(values):
cumwindow = list(accumulate(values))
cumdivisor = list(range(1,len(cumwindow)+1))
elif span == 1:
cumwindow = list(values)
cumdivisor = [1]*len(cumwindow)
else:
moving_sum = 0
cumwindow = []
cumdivisor = []
for index in range(len(values)):
sub_i = index-span
add_i = index
moving_sum += values[add_i]
moving_sum -= values[sub_i] if sub_i >= 0 else 0
cumwindow.append(moving_sum)
cumdivisor.append(min(span,add_i+1))
#highly-performant way to calucate exponential moving average identical to Pandas df.ewm(span=span).mean()
#alpha = 2/(1+span)
#cumwindow = list(accumulate(values , lambda a,c: c + (1-alpha)*a))
#cumdivisor = list(accumulate([1.]*len(values), lambda a,c: c + (1-alpha)*a)) #type: ignore
lrn_sim_rows.append([interactions["learner_id"], interactions["environment_id"], *list(map(truediv, cumwindow, cumdivisor))])
if each:
return lrn_sim_rows
else:
grouped_lrn_sim_rows = collections.defaultdict(list)
for row in lrn_sim_rows:
grouped_lrn_sim_rows[row[0]].append(row[2:])
lrn_rows = []
for learner_id in grouped_lrn_sim_rows.keys():
Z = list(zip(*grouped_lrn_sim_rows[learner_id]))
if not Z: continue
Y = [ sum(z)/len(z) for z in Z ]
lrn_rows.append([learner_id, *Y])
return lrn_rows
def to_progressive_pandas(self, span: int = None, each: bool = False, ord_col="index", val_col: str = "reward"):
"""Return expanding or exponential averages for yaxis grouped by learner and possibly environment.
Args:
span: If span is None return an expanding average (i.e., progressive validation). If span is not none
calculate a simple moving average with window size of span (window will be smaller than span initially).
each: If true the group by learner and environment (as in each environment). If each is false
then only group by learner.
ord_col: The column which indicates the order in which averaging is calculated on the val_col.
val_col: The column we wish to calculate the progressive average values for.
Returns:
A data frame whose columns are (learner_id, [environment_id], interaction | |
string representation of my class
(package.module.Class); normally this is adequate, but
you may override this to change it.
"""
return reflect.qual(self.__class__).encode('utf-8')
def getTypeToCopyFor(self, perspective):
"""Determine what type tag to send for me.
By default, defer to self.L{getTypeToCopy}() normally this is
adequate, but you may override this to change it.
"""
return self.getTypeToCopy()
def jellyFor(self, jellier):
"""Assemble type tag and state to copy for this broker.
This will call L{getTypeToCopyFor} and L{getStateToCopy}, and
return an appropriate s-expression to represent me.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
p = jellier.invoker.serializingPerspective
t = self.getTypeToCopyFor(p)
state = self.getStateToCopyFor(p)
sxp = jellier.prepare(self)
sxp.extend([t, jellier.jelly(state)])
return jellier.preserve(self, sxp)
class Cacheable(Copyable):
"""A cached instance.
This means that it's copied; but there is some logic to make sure
that it's only copied once. Additionally, when state is retrieved,
it is passed a "proto-reference" to the state as it will exist on
the client.
XXX: The documentation for this class needs work, but it's the most
complex part of PB and it is inherently difficult to explain.
"""
def getStateToCacheAndObserveFor(self, perspective, observer):
"""
Get state to cache on the client and client-cache reference
to observe locally.
This is similar to getStateToCopyFor, but it additionally
passes in a reference to the client-side RemoteCache instance
that will be created when it is unserialized. This allows
Cacheable instances to keep their RemoteCaches up to date when
they change, such that no changes can occur between the point
at which the state is initially copied and the client receives
it that are not propagated.
"""
return self.getStateToCopyFor(perspective)
def jellyFor(self, jellier):
"""Return an appropriate tuple to serialize me.
Depending on whether this broker has cached me or not, this may
return either a full state or a reference to an existing cache.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
luid = jellier.invoker.cachedRemotelyAs(self, 1)
if luid is None:
luid = jellier.invoker.cacheRemotely(self)
p = jellier.invoker.serializingPerspective
type_ = self.getTypeToCopyFor(p)
observer = RemoteCacheObserver(jellier.invoker, self, p)
state = self.getStateToCacheAndObserveFor(p, observer)
l = jellier.prepare(self)
jstate = jellier.jelly(state)
l.extend([type_, luid, jstate])
return jellier.preserve(self, l)
else:
return cached_atom, luid
def stoppedObserving(self, perspective, observer):
"""This method is called when a client has stopped observing me.
The 'observer' argument is the same as that passed in to
getStateToCacheAndObserveFor.
"""
class RemoteCopy(Unjellyable):
"""I am a remote copy of a Copyable object.
When the state from a L{Copyable} object is received, an instance will
be created based on the copy tags table (see setUnjellyableForClass) and
sent the L{setCopyableState} message. I provide a reasonable default
implementation of that message; subclass me if you wish to serve as
a copier for remote data.
NOTE: copiers are invoked with no arguments. Do not implement a
constructor which requires args in a subclass of L{RemoteCopy}!
"""
def setCopyableState(self, state):
"""I will be invoked with the state to copy locally.
'state' is the data returned from the remote object's
'getStateToCopyFor' method, which will often be the remote
object's dictionary (or a filtered approximation of it depending
on my peer's perspective).
"""
if _PY3:
state = {x.decode('utf8') if isinstance(x, bytes)
else x:y for x,y in state.items()}
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.setCopyableState(unjellier.unjelly(jellyList[1]))
return self
class RemoteCache(RemoteCopy, Serializable):
"""A cache is a local representation of a remote L{Cacheable} object.
This represents the last known state of this object. It may
also have methods invoked on it -- in order to update caches,
the cached class generates a L{pb.RemoteReference} to this object as
it is originally sent.
Much like copy, I will be invoked with no arguments. Do not
implement a constructor that requires arguments in one of my
subclasses.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{observe_messagename}' and call it on my with the same arguments.
"""
if not isinstance(message, str):
message = message.decode('utf8')
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "observe_%s" % message)
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, None, method, args, kw)
def jellyFor(self, jellier):
"""serialize me (only for the broker I'm for) as the original cached reference
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
assert jellier.invoker is self.broker, "You cannot exchange cached proxies between brokers."
return b'lcache', self.luid
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.broker = unjellier.invoker
self.luid = jellyList[1]
borgCopy = self._borgify()
# XXX questionable whether this was a good design idea...
init = getattr(borgCopy, "__init__", None)
if init:
init()
unjellier.invoker.cacheLocally(jellyList[1], self)
borgCopy.setCopyableState(unjellier.unjelly(jellyList[2]))
# Might have changed due to setCopyableState method; we'll assume that
# it's bad form to do so afterwards.
self.__dict__ = borgCopy.__dict__
# chomp, chomp -- some existing code uses "self.__dict__ =", some uses
# "__dict__.update". This is here in order to handle both cases.
self.broker = unjellier.invoker
self.luid = jellyList[1]
return borgCopy
## def __really_del__(self):
## """Final finalization call, made after all remote references have been lost.
## """
def __cmp__(self, other):
"""Compare me [to another RemoteCache.
"""
if isinstance(other, self.__class__):
return cmp(id(self.__dict__), id(other.__dict__))
else:
return cmp(id(self.__dict__), other)
def __hash__(self):
"""Hash me.
"""
return int(id(self.__dict__) % sys.maxsize)
broker = None
luid = None
def __del__(self):
"""Do distributed reference counting on finalize.
"""
try:
# log.msg( ' --- decache: %s %s' % (self, self.luid) )
if self.broker:
self.broker.decCacheRef(self.luid)
except:
log.deferr()
def _borgify(self):
"""
Create a new object that shares its state (i.e. its C{__dict__}) and
type with this object, but does not share its identity.
This is an instance of U{the Borg design pattern
<https://code.activestate.com/recipes/66531/>} originally described by
<NAME>, but unlike the example given there, this is not a
replacement for a Singleton. Instead, it is for lifecycle tracking
(and distributed garbage collection). The purpose of these separate
objects is to have a separate object tracking each application-level
reference to the root L{RemoteCache} object being tracked by the
broker, and to have their C{__del__} methods be invoked.
This may be achievable via a weak value dictionary to track the root
L{RemoteCache} instances instead, but this implementation strategy
predates the availability of weak references in Python.
@return: The new instance.
@rtype: C{self.__class__}
"""
blank = _createBlank(self.__class__)
blank.__dict__ = self.__dict__
return blank
def unjellyCached(unjellier, unjellyList):
luid = unjellyList[1]
return unjellier.invoker.cachedLocallyAs(luid)._borgify()
setUnjellyableForClass("cached", unjellyCached)
def unjellyLCache(unjellier, unjellyList):
luid = unjellyList[1]
obj = unjellier.invoker.remotelyCachedForLUID(luid)
return obj
setUnjellyableForClass("lcache", unjellyLCache)
def unjellyLocal(unjellier, unjellyList):
obj = unjellier.invoker.localObjectForID(unjellyList[1])
return obj
setUnjellyableForClass("local", unjellyLocal)
@comparable
class RemoteCacheMethod:
"""A method on a reference to a L{RemoteCache}.
"""
def __init__(self, name, broker, cached, perspective):
"""(internal) initialize.
"""
self.name = name
self.broker = broker
self.perspective = perspective
self.cached = cached
def __cmp__(self, other):
return cmp((self.name, self.broker, self.perspective, self.cached), other)
def __hash__(self):
return hash((self.name, self.broker, self.perspective, self.cached))
def __call__(self, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the object hasn't been given to the peer yet.")
return self.broker._sendMessage(b'cache', self.perspective, cacheID,
self.name, args, kw)
@comparable
class RemoteCacheObserver:
"""I am a reverse-reference to the peer's L{RemoteCache}.
I am generated automatically when a cache is serialized. I
represent a reference to the client's L{RemoteCache} object that
will represent a particular L{Cacheable}; I am the additional
object passed to getStateToCacheAndObserveFor.
"""
def __init__(self, broker, cached, perspective):
"""(internal) Initialize me.
@param broker: a L{pb.Broker} instance.
@param cached: a L{Cacheable} instance that this L{RemoteCacheObserver}
corresponds to.
@param perspective: a reference to the perspective who is observing this.
"""
self.broker = broker
self.cached = cached
self.perspective = perspective
def __repr__(self):
return "<RemoteCacheObserver(%s, %s, %s) at %s>" % (
self.broker, self.cached, self.perspective, id(self))
def __hash__(self):
"""Generate a hash unique to all L{RemoteCacheObserver}s for this broker/perspective/cached triplet
"""
return ( (hash(self.broker) % 2**10)
+ (hash(self.perspective) % 2**10)
+ (hash(self.cached) % 2**10))
def __cmp__(self, other):
"""Compare me to another L{RemoteCacheObserver}.
"""
return cmp((self.broker, self.perspective, self.cached), | |
Ul, S, Sl = (None if U is None else U[valid_gene_checker, :]), \
(None if Ul is None else Ul[valid_gene_checker, :]), \
(None if S is None else S[valid_gene_checker, :]), \
(None if Sl is None else Sl[valid_gene_checker, :])
subset_adata = subset_adata[:, valid_gene_checker]
adata.var[kin_param_pre + 'sanity_check'] = valid_bools_
if assumption_mRNA.lower() == 'auto': assumption_mRNA = assump_mRNA
if experiment_type == 'conventional': assumption_mRNA = 'ss'
if model.lower() == "stochastic" and experiment_type.lower() not in ["conventional", "kinetics", "degradation", "kin", "deg", "one-shot"]:
"""
# temporially convert to deterministic model as moment model for mix_std_stm
and other types of labeling experiment is ongoing."""
model = "deterministic"
if assumption_mRNA.lower() == "ss" or (experiment_type.lower() in ["one-shot", "mix_std_stm"]):
if est_method.lower() == "auto": est_method = "gmm" if model == 'stochastic' else 'ols'
if experiment_type.lower() == "one_shot":
beta = subset_adata.var.beta if "beta" in subset_adata.var.keys() else None
gamma = subset_adata.var.gamma if "gamma" in subset_adata.var.keys() else None
ss_estimation_kwargs = {"beta": beta, "gamma": gamma}
else:
ss_estimation_kwargs = {}
est = ss_estimation(
U=U.copy() if U is not None else None,
Ul=Ul.copy() if Ul is not None else None,
S=S.copy() if S is not None else None,
Sl=Sl.copy() if Sl is not None else None,
P=P.copy() if P is not None else None,
US=US.copy() if US is not None else None,
S2=S2.copy() if S2 is not None else None,
conn=subset_adata.obsp['moments_con'],
t=t,
ind_for_proteins=ind_for_proteins,
model=model,
est_method=est_method,
experiment_type=experiment_type,
assumption_mRNA=assumption_mRNA,
assumption_protein=assumption_protein,
concat_data=concat_data,
cores=cores,
**ss_estimation_kwargs
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if experiment_type in ["one-shot", "one_shot"]:
est.fit(one_shot_method=one_shot_method, **est_kwargs)
else:
est.fit(**est_kwargs)
alpha, beta, gamma, eta, delta = est.parameters.values()
U, S = get_U_S_for_velocity_estimation(
subset_adata,
use_smoothed,
has_splicing,
has_labeling,
log_unnormalized,
NTR_vel,
)
vel = velocity(estimation=est)
vel_U = vel.vel_u(U)
if exp_type == 'one-shot':
vel_S = vel.vel_s(U, U + S)
else:
vel_S = vel.vel_s(U, S)
vel_P = vel.vel_p(S, P)
adata = set_velocity(
adata,
vel_U,
vel_S,
vel_P,
_group,
cur_grp,
cur_cells_bools,
valid_bools_,
ind_for_proteins,
)
adata = set_param_ss(
adata,
est,
alpha,
beta,
gamma,
eta,
delta,
experiment_type,
_group,
cur_grp,
kin_param_pre,
valid_bools_,
ind_for_proteins,
)
elif assumption_mRNA.lower() == "kinetic":
if model_was_auto and experiment_type.lower() == "kin": model = "mixture"
data_type = 'smoothed' if use_smoothed else 'sfs'
params, half_life, cost, logLL, param_ranges, cur_X_data, cur_X_fit_data = kinetic_model(subset_adata, tkey, model, est_method, experiment_type, has_splicing,
has_switch=True, param_rngs={}, data_type=data_type, **est_kwargs)
len_t, len_g = len(np.unique(t)), len(_group)
if cur_grp == _group[0]:
if len_g == 1:
X_data, X_fit_data = np.array((adata.n_vars, len_t)), np.array((adata.n_vars, len_t))
else:
X_data, X_fit_data = np.array((len_g, adata.n_vars, len_t)), np.array((len_g, adata.n_vars, len_t))
if len(_group) == 1:
X_data, X_fit_data = cur_X_data, cur_X_fit_data
else:
X_data[cur_grp_i, :, :], X_fit_data[cur_grp_i, :, :] = cur_X_data, cur_X_fit_data
a, b, alpha_a, alpha_i, alpha, beta, gamma = (
params.loc[:, 'a'].values if 'a' in params.columns else None,
params.loc[:, 'b'].values if 'b' in params.columns else None,
params.loc[:, 'alpha_a'].values if 'alpha_a' in params.columns else None,
params.loc[:, 'alpha_i'].values if 'alpha_i' in params.columns else None,
params.loc[:, 'alpha'].values if 'alpha' in params.columns else None,
params.loc[:, 'beta'].values if 'beta' in params.columns else None,
params.loc[:, 'gamma'].values if 'gamma' in params.columns else None,
)
if alpha is None:
alpha = fbar(a, b, alpha_a, 0) if alpha_i is None else fbar(a, b, alpha_a, alpha_i)
all_kinetic_params = ['a', 'b', 'alpha_a', 'alpha_i', 'alpha', 'beta', 'gamma']
extra_params = params.loc[:, params.columns.difference(all_kinetic_params)]
# if alpha = None, set alpha to be U; N - gamma R
params = {"alpha": alpha, "beta": beta, "gamma": gamma, "t": t}
vel = velocity(**params)
U, S = get_U_S_for_velocity_estimation(
subset_adata,
use_smoothed,
has_splicing,
has_labeling,
log_unnormalized,
NTR_vel,
)
vel_U = vel.vel_u(U)
vel_S = vel.vel_u(S)
vel_P = vel.vel_p(S, P)
adata = set_velocity(
adata,
vel_U,
vel_S,
vel_P,
_group,
cur_grp,
cur_cells_bools,
valid_bools_,
ind_for_proteins,
)
adata = set_param_kinetic(
adata,
alpha,
a,
b,
alpha_a,
alpha_i,
beta,
gamma,
cost,
logLL,
kin_param_pre,
extra_params,
_group,
cur_grp,
valid_bools_,
)
# add protein related parameters in the moment model below:
elif model.lower() == "model_selection":
warnings.warn("Not implemented yet.")
if group is not None and group in adata.obs[group]:
uns_key = group + "_dynamics"
else:
uns_key = "dynamics"
if sanity_check and experiment_type in ['kin', 'deg']:
sanity_check_cols = adata.var.columns.str.endswith('sanity_check')
adata.var['use_for_dynamics'] = adata.var.loc[:, sanity_check_cols].sum(1).astype(bool)
else:
adata.var['use_for_dynamics'] = adata.var['use_for_pca'].copy()
adata.uns[uns_key] = {
"filter_gene_mode": filter_gene_mode,
"t": t,
"group": group,
"X_data": X_data,
"X_fit_data": X_fit_data,
"asspt_mRNA": assumption_mRNA,
"experiment_type": experiment_type,
"normalized": normalized,
"model": model,
"has_splicing": has_splicing,
"has_labeling": has_labeling,
"has_protein": has_protein,
"use_smoothed": use_smoothed,
"NTR_vel": NTR_vel,
"log_unnormalized": log_unnormalized,
}
return adata
def kinetic_model(subset_adata, tkey, model, est_method, experiment_type, has_splicing, has_switch, param_rngs,
data_type='sfs', **est_kwargs):
"""est_method is not used. data_type can either 'sfs' or 'smoothed'."""
time = subset_adata.obs[tkey].astype('float')
if experiment_type.lower() == 'kin':
if has_splicing:
layers = ['M_ul', 'M_sl', 'M_uu', 'M_su'] if (
'M_ul' in subset_adata.layers.keys() and data_type == 'smoothed') \
else ['X_ul', 'X_sl', 'X_uu', 'X_su']
if model in ['deterministic', 'stochastic']:
layer_u = 'M_ul' if ('M_ul' in subset_adata.layers.keys() and data_type == 'smoothed') else 'X_ul'
layer_s = 'M_sl' if ('M_ul' in subset_adata.layers.keys() and data_type == 'smoothed') else 'X_sl'
X, X_raw = prepare_data_has_splicing(subset_adata, subset_adata.var.index, time,
layer_u=layer_u, layer_s=layer_s, total_layers=layers)
elif model.startswith('mixture'):
X, _, X_raw = prepare_data_deterministic(subset_adata, subset_adata.var.index, time,
layers=layers, total_layers=layers)
if model == 'deterministic':
X = [X[i][[0, 1], :] for i in range(len(X))]
_param_ranges = {'alpha': [0, 1000], 'beta': [0, 1000], 'gamma': [0, 1000]}
x0 = {'u0': [0, 1000], 's0': [0, 1000]}
Est, _ = Estimation_DeterministicKin, Deterministic
elif model == 'stochastic':
x0 = {'u0': [0, 1000], 's0': [0, 1000],
'uu0': [0, 1000], 'ss0': [0, 1000],
'us0': [0, 1000]}
if has_switch:
_param_ranges = {'a': [0, 1000], 'b': [0, 1000],
'alpha_a': [0, 1000], 'alpha_i': 0,
'beta': [0, 1000], 'gamma': [0, 1000], }
Est, _ = Estimation_MomentKin, Moments
else:
_param_ranges = {'alpha': [0, 1000], 'beta': [0, 1000], 'gamma': [0, 1000], }
Est, _ = Estimation_MomentKinNoSwitch, Moments_NoSwitching
elif model == 'mixture':
_param_ranges = {'alpha': [0, 1000], 'alpha_2': [0, 0], 'beta': [0, 1000], 'gamma': [0, 1000], }
x0 = {'ul0': [0, 0], 'sl0': [0, 0], 'uu0': [0, 1000], 'su0': [0, 1000]}
Est = Mixture_KinDeg_NoSwitching(Deterministic(), Deterministic())
elif model == 'mixture_deterministic_stochastic':
X, X_raw = prepare_data_mix_has_splicing(subset_adata, subset_adata.var.index, time, layer_u=layers[2],
layer_s=layers[3], layer_ul=layers[0], layer_sl=layers[1],
total_layers=layers, mix_model_indices=[0, 1, 5, 6, 7, 8, 9])
_param_ranges = {'alpha': [0, 1000], 'alpha_2': [0, 0], 'beta': [0, 1000], 'gamma': [0, 1000], }
x0 = {'ul0': [0, 0], 'sl0': [0, 0],
'u0': [0, 1000], 's0': [0, 1000],
'uu0': [0, 1000], 'ss0': [0, 1000],
'us0': [0, 1000], }
Est = Mixture_KinDeg_NoSwitching(Deterministic(), Moments_NoSwitching())
elif model == 'mixture_stochastic_stochastic':
_param_ranges = {'alpha': [0, 1000], 'alpha_2': [0, 0], 'beta': [0, 1000], 'gamma': [0, 1000], }
X, X_raw = prepare_data_mix_has_splicing(subset_adata, subset_adata.var.index, time, layer_u=layers[2],
layer_s=layers[3], layer_ul=layers[0], layer_sl=layers[1],
total_layers=layers, mix_model_indices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
x0 = {'ul0': [0, 1000], 'sl0': [0, 1000],
'ul_ul0': [0, 1000], 'sl_sl0': [0, 1000],
'ul_sl0': [0, 1000],
'u0': [0, 1000], 's0': [0, 1000],
'uu0': [0, 1000], 'ss0': [0, 1000],
'us0': [0, 1000], }
Est = Mixture_KinDeg_NoSwitching(Moments_NoSwitching(), Moments_NoSwitching())
else:
raise NotImplementedError(f'model {model} with kinetic assumption is not implemented. '
f'current supported models for kinetics experiments include: stochastic, deterministic, mixture,'
f'mixture_deterministic_stochastic or mixture_stochastic_stochastic')
else:
total_layer = 'M_t' if ('M_t' in subset_adata.layers.keys() and data_type == 'smoothed') else 'X_total'
if model in ['deterministic', 'stochastic']:
layer = 'M_n' if ('M_n' in subset_adata.layers.keys() and data_type == 'smoothed') else 'X_new'
X, X_raw = prepare_data_no_splicing(subset_adata, subset_adata.var.index, time, layer=layer,
total_layer=total_layer)
elif model.startswith('mixture'):
layers = ['M_n', 'M_t'] if ('M_n' in subset_adata.layers.keys() and data_type == 'smoothed') \
else ['X_new', 'X_total']
X, _, X_raw = prepare_data_deterministic(subset_adata, subset_adata.var.index, time, layers=layers,
total_layers=total_layer)
if model == 'deterministic':
X = [X[i][0, :] for i in range(len(X))]
_param_ranges = {'alpha': [0, 1000], 'gamma': [0, 1000], }
x0 = {'u0': [0, 1000]}
Est, _ = Estimation_DeterministicKinNosp, Deterministic_NoSplicing
elif model == 'stochastic':
x0 = {'u0': [0, 1000], 'uu0': [0, 1000], }
if has_switch:
_param_ranges = {'a': [0, 1000], 'b': [0, 1000],
'alpha_a': [0, 1000], 'alpha_i': 0,
'gamma': [0, 1000], }
Est, _ = Estimation_MomentKinNosp, Moments_Nosplicing
else:
_param_ranges = {'alpha': [0, 1000], 'gamma': [0, 1000], }
Est, _ = Estimation_MomentKinNoSwitchNoSplicing, Moments_NoSwitchingNoSplicing
elif model == 'mixture':
_param_ranges = {'alpha': [0, 1000], 'alpha_2': [0, 0], 'gamma': [0, 1000], }
x0 = {'u0': [0, 0], 'o0': [0, 1000]}
Est = Mixture_KinDeg_NoSwitching(Deterministic_NoSplicing(), Deterministic_NoSplicing())
elif model == 'mixture_deterministic_stochastic':
X, X_raw = prepare_data_mix_no_splicing(subset_adata, subset_adata.var.index, time,
layer_n=layers[0], layer_t=layers[1], total_layer=total_layer,
mix_model_indices=[0, 2, 3])
_param_ranges = {'alpha': [0, 1000], 'alpha_2': [0, 0], 'gamma': [0, 1000], }
x0 = {'u0': | |
"""
Test functions used to create k8s objects
"""
from kubespawner.objects import make_pod, make_pvc, make_ingress
from kubernetes.client import ApiClient
api_client = ApiClient()
def test_make_simplest_pod():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"labels": {},
"annotations": {}
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_labeled_pod():
"""
Test specification of the simplest possible pod specification with labels
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
labels={"test": "true"}
)) == {
"metadata": {
"name": "test",
"labels": {"test": "true"},
"annotations": {}
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_annotated_pod():
"""
Test specification of the simplest possible pod specification with annotations
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
annotations={"test": "true"}
)) == {
"metadata": {
"name": "test",
"annotations": {"test": "true"},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_image_pull_secrets():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
image_pull_secret='super-sekrit'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"imagePullSecrets": [
{'name': 'super-sekrit'}
],
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_set_container_uid_and_gid():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
run_as_uid=1000,
run_as_gid=2000,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"securityContext": {
"runAsUser": 1000,
"runAsGroup": 2000
},
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_set_container_uid_and_pod_fs_gid():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
run_as_uid=1000,
fs_gid=1000,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"securityContext": {
"runAsUser": 1000,
},
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'securityContext': {
'fsGroup': 1000,
},
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_set_pod_supplemental_gids():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
run_as_uid=1000,
supplemental_gids=[100],
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"securityContext": {
"runAsUser": 1000,
},
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'securityContext': {
'supplementalGroups': [100],
},
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_run_privileged_container():
"""
Test specification of the container to run as privileged
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
run_privileged=True,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
"resources": {
"limits": {},
"requests": {}
},
"securityContext": {
"privileged": True,
},
'volumeMounts': [],
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_resources_all():
"""
Test specifying all possible resource limits & guarantees
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cpu_limit=2,
cpu_guarantee=1,
cmd=['jupyterhub-singleuser'],
port=8888,
mem_limit='1Gi',
mem_guarantee='512Mi',
image_pull_policy='IfNotPresent',
image_pull_secret="myregistrykey",
node_selector={"disk": "ssd"}
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"imagePullSecrets": [{"name": "myregistrykey"}],
"nodeSelector": {"disk": "ssd"},
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {
"cpu": 2,
"memory": '1Gi'
},
"requests": {
"cpu": 1,
"memory": '512Mi'
}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_env():
"""
Test specification of a pod with custom environment variables
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
env={
'TEST_KEY': 'TEST_VALUE'
},
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [{'name': 'TEST_KEY', 'value': 'TEST_VALUE'}],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {
},
"requests": {
}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_lifecycle():
"""
Test specification of a pod with lifecycle
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
lifecycle_hooks={
'preStop': {
'exec': {
'command': ['/bin/sh', 'test']
}
}
}
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {
},
"requests": {
}
},
"lifecycle": {
"preStop": {
"exec": {
"command": ["/bin/sh", "test"]
}
}
}
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_init_containers():
"""
Test specification of a pod with initContainers
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
init_containers=[
{
'name': 'init-myservice',
'image': 'busybox',
'command': ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
},
{
'name': 'init-mydb',
'image': 'busybox',
'command': ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;']
}
]
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {
},
"requests": {
}
},
}
],
"initContainers": [
{
"name": "init-myservice",
"image": "busybox",
"command": ["sh", "-c",
"until nslookup myservice; do echo waiting for myservice; sleep 2; done;"]
},
{
"name": "init-mydb",
"image": "busybox",
"command": ["sh", "-c", "until nslookup mydb; do echo waiting for mydb; sleep 2; done;"]
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_extra_container_config():
"""
Test specification of a pod with initContainers
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
extra_container_config={
'envFrom': [
{
'configMapRef': {
'name': 'special-config'
}
}
]
}
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {
},
"requests": {
}
},
'envFrom': [
{
'configMapRef': {
'name': 'special-config'
}
}
]
}
],
'restartPolicy': 'OnFailure',
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_extra_pod_config():
"""
Test specification of a pod with initContainers
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
image_pull_policy='IfNotPresent',
tolerations=[{
'key': 'wrong_toleration',
'operator': 'Equal',
'value': 'wrong_value'
}],
extra_pod_config={
'dns_policy': 'ClusterFirstWithHostNet',
'restartPolicy': 'Always',
'tolerations': [{
'key': 'correct_toleration',
'operator': 'Equal',
'value': 'correct_value'
}],
},
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": | |
<reponame>saullocastro/structmanager
import os
import cPickle as pickle
from pprint import pformat
from collections import Iterable
from .output_codes import OUTC, get_output_code
from .cards_opt import *
from .cards_solver import *
class Genesis(object):
"""GENESIS optimization model
This class corresponds to an optimization model.
================== ======================================================
Attribute Description
================== ======================================================
`dvprops` `dict` of :class:`.DVPROP3` objects
`deqatns` `dict` of :class:`.DEQATN` objects
`dtables` `dict` of :class:`.DTABLE`
`dresps` `dict` of :class:`.DRESP1` or :class:`.DRESP23`
objects
`dcons` `dict` of :class:`.DCONS` objects
`dvars` `dict` of :class:`.DVAR` objects
`dvar_codes` `dict` classifying the :class:`.DVAR` objects by
their unique codes
`dlinks` `dict` of :class:`.DLINK` objects
`newprops` `dict` with different NASTRAN cards, see
:meth:`.reset_newprops`
`nodal_displ` `dict` with the nodal displacements constraints as
detailed in :meth:`.nodal_displ_from_excel`
`loads_list` `list` containing the load cases' ids
`spcs_list` `list` containing the single point constraint id
for each load case
`num_cycles` `int` indicating the number of design cycles
`outputdir` `str` path to the output directory
`genesisfile` `file` handler to GENESIS's output file
`nastranfile` `file` handler to NASTRAN's output file
================== ======================================================
"""
def __init__(self):
self.dvprops = {}
self.reset_newprops()
self.deqatns = {}
self.dtables = {}
self.dresps = {}
self.dcons = {}
self.dvars = {}
self.dvar_codes = {}
self.dlinks = {}
#TODO future implementation
#self.externalDRESP3 = {}
# Description
# `externalDRESP3` `dict` of :class:`.DRESP23` objects, containing
# external design responses to be considered
self.topocheck = False
self.nodal_displ = None
self.loads_list = None
self.spcs_list = None
self.num_cycles = None
self.outputdir = None
self.genesisfile = None
self.nastranfile = None
self.datname = None
def nodal_displ_from_excel(self, xls_file):
"""Create nodal displacement constraints based on an Excel file.
This function reads the xls file with nodal displacement constraints
and returns a dictionary with the format::
nd = {load_case_id1 : {grid_1 : {output : [minvalue, maxvalue],
grid_2 : output : [minvalue, maxvalue],
grid_3 : output : [minvalue, maxvalue]}}
load_case_id2 : {grid_1 : {output : [minvalue, maxvalue],
grid_3 : outout : [minvalue, maxvalue]}}}
where `minvalue` and `maxvalue` are the minimum and maximum
displacement values.
Parameters
----------
xls_file : str
The full path to the Excel file.
Returns
-------
output : str
A string with one of the values:
- `'Translation X'`
- `'Translation Y'`
- `'Translation Z'`
- `'Rotation X'`
- `'Rotation Y'`
- `'Rotation Z'`
- `'Translation Total'`
- `'Absolute X'`
- `'Absolute Y'`
- `'Absolute Z'`
"""
from structmanager.excel import Excel
if not xls_file:
return None
ex = Excel(xls_file)
found = False
for row in range(1, 100):
for col in range(1, 23):
refvalue = str(ex.get_cell(1, row, col))
if refvalue.find('DISPLACEMENT CONSTRAINTS') > -1:
found = True
break
if found:
break
nd = {}
irow = row + 2
#TODO this try/except block is mainly to avoid Excel from being
# a defunct
try:
for row in range(irow, 65536):
load_id = ex.get_cell(1, row, col)
if load_id.__class__.__name__ == 'NoneType':
break
#
load_id = int(load_id)
node_id = ex.get_cell(1, row, col + 1)
node_id = int(node_id)
output = ex.get_cell(1, row, col + 2)
output = str(output)
minvalue = ex.get_cell(1, row, col + 3)
minvalue = float(minvalue)
maxvalue = ex.get_cell(1, row, col + 4)
maxvalue = float(maxvalue)
if not load_id in nd.keys():
nd[load_id] = {}
if not node_id in nd[load_id].keys():
nd[load_id][node_id] = {}
nd[load_id][node_id][output] = [minvalue, maxvalue]
ex.close()
self.nodal_displ = nd
except:
ex.close()
print('nodal_displ_from_excel() failed!')
return None
for load_id in nd.keys():
for node_id in nd[load_id].keys():
for con_name in nd[load_id][node_id].keys():
con_minvalue = nd[load_id][node_id][con_name][0]
con_maxvalue = nd[load_id][node_id][con_name][1]
for k, v in OUTC['DISP'].iteritems():
if v.find(con_name) > -1:
code = k
break
if code == 7:
#DRESP1 x
labelx = 'x' + str(node_id)
dresp1 = DRESP1(labelx, 'DISP', '', '', 1, [node_id])
self.dresps[dresp1.id] = dresp1
dresp1xid = dresp1.id
#DRESP1 y
labely = 'y' + str(node_id)
dresp1 = DRESP1(labely, 'DISP', '', '', 2, [node_id])
self.dresps[dresp1.id] = dresp1
dresp1yid = dresp1.id
#DRESP1 z
labelz = 'z' + str(node_id)
dresp1 = DRESP1(labelz, 'DISP', '', '', 3, [node_id])
self.dresps[dresp1.id] = dresp1
dresp1zid = dresp1.id
#DEQATN
eq = ('T(%s,%s,%s)=SQRT(%s**2+%s**2+%s**2)' %
(labelx, labely, labelz, labelx, labely, labelz))
deqatn = DEQATN(eq)
self.deqatns[deqatn.id] = deqatn
#DRESP23
label = 'r' + str(node_id)
dresp23 = DRESP23(label, deqatn.id)
dresp23.add_dresp1(dresp1xid)
dresp23.add_dresp1(dresp1yid)
dresp23.add_dresp1(dresp1zid)
self.dresps[dresp23.id] = dresp23
#DCONS
stress_type = 'positive'
lid_lb_ub = [str(load_id), con_minvalue, con_maxvalue]
self.dcons[dresp23.id] = DCONS(dresp23.id, lid_lb_ub,
stress_type)
elif (code == 8 or code == 9 or code == 10):
#DRESP1
label = 'a' + str(node_id)
dresp1 = DRESP1(label, 'DISP', '', '', code, [node_id])
self.dresps[dresp1.id] = dresp1
#DEQATN
eq = 'D(%s)=ABS(%s)' % (label, label)
deqatn = DEQATN(eq)
self.deqatns[deqatn.id] = deqatn
#DRESP23
label = 'r' + str(node_id)
dresp23 = DRESP23(label, deqatn.id)
dresp23.add_dresp1(dresp23.id)
self.dresps[dresp23.id] = dresp23
#DCONS
stress_type = 'positive'
lid_lb_ub = [str(load_id), con_minvalue, con_maxvalue]
self.dcons[dresp23.id] = DCONS(dresp23.id, lid_lb_ub,
stress_type)
else:
#DRESP1
label = 'r' + str(node_id)
dresp1 = DRESP1(label, 'DISP', '', '', code, [node_id])
self.dresps[dresp1.id] = dresp1
#DCONS
stress_type = 'both'
lid_lb_ub = [str(load_id), con_minvalue, con_maxvalue]
self.dcons[dresp1.id] = DCONS(dresp1.id, lid_lb_ub,
stress_type)
def dlinks_from_excel(self, xls_file):
"""Read links between variables from an Excel file.
The Excel file should have the following format:
======= ====== ======= ======= ======= ======= ======= ======= =======
. col j col j+1 col j+2 col j+3 col j+4 col j+5 col j+6 col j+7
======= ====== ======= ======= ======= ======= ======= ======= =======
row i DLINK
row i+1
row i+2 dvar c0 c ivar1 c1 ivar2 c2 ...
row i+3 LP.1.1 0. 1. LS.1.1 0.4 LS.1.2 0.6 ...
======= ====== ======= ======= ======= ======= ======= ======= =======
where the relative position between the cell with `DLINK` and the
others must be held.
Parameters
----------
xls_file : str
The full path of the Excel file.
"""
from structmanager.excel import Excel
ex = Excel(xls_file)
found = False
print('Reading Excel File %s...' % xls_file)
for row in range(1, 101):
for col in range(1, 256):
rvalue = ex.get_cell(1, row, col)
if 'DLINK' == rvalue:
found = True
break
if found:
break
dlinks = {}
count = -1
irow = row + 3
for row in range(irow, 65536):
dvar_code = ex.get_cell(1, row, col)
if dvar_code is None:
break
print(' creating DLINK for variable: %s' % dvar_code)
c0 = ex.get_cell(1, row, col + 1)
c = ex.get_cell(1, row, col + 2)
ivar_code_1 = ex.get_cell(1, row, col + 3)
c_1 = ex.get_cell(1, row, col + 4)
ivar_code_2 = ex.get_cell(1, row, col + 5)
c_2 = ex.get_cell(1, row, col + 6)
ivar_code_3 = ex.get_cell(1, row, col + 7)
c_3 = ex.get_cell(1, row, col + 8)
dvar = self.dvar_codes[dvar_code]
ivar_1 = self.dvar_codes[ivar_code_1]
try:
dvi_ci = [ivar_1.id, float(c_1)]
except:
dvi_ci = [ivar_1.id, str(c_1)]
if ivar_code_2.__class__.__name__ <> 'NoneType':
ivar_2 = self.dvar_codes[ivar_code_2]
dvi_ci = dvi_ci + [ivar_2.id, float(c_2)]
if ivar_code_3.__class__.__name__ <> 'NoneType':
ivar_3 = self.dvar_codes[ivar_code_3]
dvi_ci = dvi_ci + [ivar_3.id, float(c_3)]
count += 1
self.dlinks[count] = DLINK(dvar.id, dvi_ci, c0=c0, cmult=c)
ex.close()
def set_output_file(self, path):
"""Define the data related to the output file.
The output directory is estimated based on `path`.
Parameters
----------
path : str
The full path of the output file.
"""
self.outputdir = os.path.dirname(path)
self.datname = path
tmppath = os.path.join(self.outputdir, 'genesis.temp')
self.genesisfile = open(tmppath, 'w')
tmppath = os.path.join(self.outputdir, 'nastran.temp')
self.nastranfile = open(tmppath, 'w')
def _read_inputs(self, topocheck=False, topo_max_elem=1,
manufact_cons_input=[], manufact_cons_coord=0):
#TODO probably remove this method
self.topocheck = topocheck
self.topo_max_elem = topo_max_elem
self.manufact_cons_input = manufact_cons_input
self.manufact_cons_coord = manufact_cons_coord
def add_dvprop(self, *args):
dvprop = DVPROP3(*args)
self.dvprops[dvprop.id] = dvprop
def print_model(self):
"""Print the whole model.
"""
self._print_dvprops()
self._print_dvars()
self._print_dresps()
self._print_deqatns()
self._print_dcons()
self._print_dobj()
self._print_dlinks()
self._print_newprops()
self.merge_temp_files()
def reset_newprops(self):
"""Reset the dictionary `newprops`.
This dictionary contains NASTRAN property cards that should be created
along with the optimization model.
The supported cards are those corresponding to the classes defined in
:mod:`.cards_solver`.
"""
self.newprops = {}
self.newprops['PSHELL'] = {}
self.newprops['PBAR'] = {}
self.newprops['PBARL'] = {}
self.newprops['PBEAM'] = {}
self.newprops['PBEAML'] = {}
self.newprops['PCOMP'] = {}
def create_dvars(self):
"""Create the design variables.
The method :meth:`.DVPROP3.create_dvar` of each property contained in
the dictionary `dvprops` is called.
"""
self.dvars = {}
self.dvar_codes = {}
if len(self.dvprops) == 0:
raise RuntimeError('No DVPROPs defined!')
for dvprop in self.dvprops.values():
dvprop.create_dvars()
for dvar in dvprop.dvars:
self.dvars[dvar.id] = dvar
self.dvar_codes[dvar.code] = dvar
def constrain_pshell(self, pid, cname, rtype, allow_C, allow_T):
"""Add constraints to the bottom | |
<filename>fuzzy_modeling/tests/models/test_set_model.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import mock
from django.test import TestCase
from fuzzy_modeling.tests.utils import ResetMock
from fuzzy_modeling.models.sets import SetModel
from fuzzy.set.Set import Set
from fuzzy.set.Polygon import Polygon
from fuzzy.set.Triangle import Triangle
from fuzzy.set.Singleton import Singleton
from fuzzy.set.Trapez import Trapez
from fuzzy.set.Function import Function
from fuzzy.set.SFunction import SFunction
from fuzzy.set.ZFunction import ZFunction
from fuzzy.set.PiFunction import PiFunction
class SetModelTest(TestCase, ResetMock):
# def setUp(self):
# pass
def tearDown(self):
self.reset_all_pre_mocks(SetModel)
def _parameters_mock(self, name, value):
"""
mock a parameter
"""
param = mock.Mock()
param.name = name
param.get_value = lambda : value
return param
def _mock_setModel(self, set_choice):
self.set_choice = set_choice
self.set = SetModel(set=set_choice)
return self.set
def test_set_get_pyfuzzy_for_set_type(self):
" shoud return the correct corresponding pyfuzzy object for the Set type "
new_set = self._mock_setModel('fuzzy.set.Set.Set')
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Set()
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
def test_set_get_pyfuzzy_for_polygon_type(self):
" shoud return the correct corresponding pyfuzzy object for the Polygon type "
new_set = self._mock_setModel('fuzzy.set.Polygon.Polygon')
points = [(0.,0.),(30.,1.),(60.,0.)]
points_value = str(points)
self.parameters_mock = [
self._parameters_mock(name="points", value=points_value)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Polygon(points=points)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same points
self.assertEquals(pyfuzzy_set_expected.points, new_pyfuzzy_set.points)
def test_set_get_pyfuzzy_for_triangle_type(self):
" shoud return the correct corresponding pyfuzzy object for the Triangle type "
new_set = self._mock_setModel('fuzzy.set.Triangle.Triangle')
m = 1.2
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
self.parameters_mock = [
self._parameters_mock(name="m", value=m),
self._parameters_mock(name="alpha", value=alpha),
self._parameters_mock(name="beta", value=beta),
self._parameters_mock(name="y_max", value=y_max),
self._parameters_mock(name="y_min", value=y_min)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Triangle(m = m, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.m, new_pyfuzzy_set.m)
self.assertEquals(pyfuzzy_set_expected.alpha, new_pyfuzzy_set.alpha)
self.assertEquals(pyfuzzy_set_expected.beta, new_pyfuzzy_set.beta)
self.assertEquals(pyfuzzy_set_expected.y_max, new_pyfuzzy_set.y_max)
self.assertEquals(pyfuzzy_set_expected.y_min, new_pyfuzzy_set.y_min)
def test_set_get_pyfuzzy_for_singleton_type(self):
" shoud return the correct corresponding pyfuzzy object for the Singleton type "
new_set = self._mock_setModel('fuzzy.set.Singleton.Singleton')
x = 1.2
self.parameters_mock = [
self._parameters_mock(name="x", value=x),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Singleton(x=x)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.x, new_pyfuzzy_set.x)
def test_set_get_pyfuzzy_for_trapez_type(self):
" shoud return the correct corresponding pyfuzzy object for the Trapez type "
new_set = self._mock_setModel('fuzzy.set.Trapez.Trapez')
m1 = 1.2
m2 = 1.3
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
self.parameters_mock = [
self._parameters_mock(name="m1", value=m1),
self._parameters_mock(name="m2", value=m2),
self._parameters_mock(name="alpha", value=alpha),
self._parameters_mock(name="beta", value=beta),
self._parameters_mock(name="y_max", value=y_max),
self._parameters_mock(name="y_min", value=y_min)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Trapez(m1 = m1, m2 = m2, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.m1, new_pyfuzzy_set.m1)
self.assertEquals(pyfuzzy_set_expected.m2, new_pyfuzzy_set.m2)
self.assertEquals(pyfuzzy_set_expected.alpha, new_pyfuzzy_set.alpha)
self.assertEquals(pyfuzzy_set_expected.beta, new_pyfuzzy_set.beta)
self.assertEquals(pyfuzzy_set_expected.y_max, new_pyfuzzy_set.y_max)
self.assertEquals(pyfuzzy_set_expected.y_min, new_pyfuzzy_set.y_min)
def test_set_get_pyfuzzy_for_function_type(self):
" shoud return the correct corresponding pyfuzzy object for the Function type "
new_set = self._mock_setModel('fuzzy.set.Function.Function')
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Function()
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
def test_set_get_pyfuzzy_for_sfunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the SFunction type "
new_set = self._mock_setModel('fuzzy.set.SFunction.SFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = SFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_get_pyfuzzy_for_zfunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the ZFunction type "
new_set = self._mock_setModel('fuzzy.set.ZFunction.ZFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = ZFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_get_pyfuzzy_for_pifunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the PiFunction type "
new_set = self._mock_setModel('fuzzy.set.PiFunction.PiFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = PiFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_from_pyfuzzy_for_set_type(self):
" shoud return the correct corresponding SetModel for the Set pyfuzzy object "
pyfuzzy_set = Set()
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
def test_set_from_pyfuzzy_for_polygon_type(self):
" shoud return the correct corresponding SetModel for the Polygon pyfuzzy object "
points = [(0.,0.),(30.,1.),(60.,0.)]
pyfuzzy_set = Polygon(points=points)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(1,new_set.parameters.all().count())
points_param = new_set.parameters.all()[0]
self.assertEquals("points",points_param.name)
self.assertEquals(str(points),points_param.get_value())
def test_set_from_pyfuzzy_for_triangle_type(self):
" shoud return the correct corresponding SetModel for the Triangle pyfuzzy object "
m = 1.2
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
pyfuzzy_set = Triangle(m = m, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(5,new_set.parameters.all().count())
m_param = new_set.parameters.get(name="m")
alpha_param = new_set.parameters.get(name="alpha")
beta_param = new_set.parameters.get(name="beta")
y_max_param = new_set.parameters.get(name="y_max")
y_min_param = new_set.parameters.get(name="y_min")
self.assertEquals(pyfuzzy_set.m, m_param.get_value())
self.assertEquals(pyfuzzy_set.alpha, alpha_param.get_value())
self.assertEquals(pyfuzzy_set.beta, beta_param.get_value())
self.assertEquals(pyfuzzy_set.y_max, y_max_param.get_value())
self.assertEquals(pyfuzzy_set.y_min, y_min_param.get_value())
def test_set_from_pyfuzzy_for_trapez_type(self):
" shoud return the correct corresponding SetModel for the Trapez pyfuzzy object "
m1= 1.2
m2= 1.3
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
pyfuzzy_set = Trapez(m1 = m1, m2 = m2, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(6,new_set.parameters.all().count())
m1_param = new_set.parameters.get(name="m1")
m2_param = new_set.parameters.get(name="m2")
alpha_param = new_set.parameters.get(name="alpha")
beta_param = new_set.parameters.get(name="beta")
y_max_param = new_set.parameters.get(name="y_max")
y_min_param = new_set.parameters.get(name="y_min")
self.assertEquals(pyfuzzy_set.m1, m1_param.get_value())
self.assertEquals(pyfuzzy_set.m2, m2_param.get_value())
self.assertEquals(pyfuzzy_set.alpha, alpha_param.get_value())
self.assertEquals(pyfuzzy_set.beta, beta_param.get_value())
self.assertEquals(pyfuzzy_set.y_max, y_max_param.get_value())
self.assertEquals(pyfuzzy_set.y_min, y_min_param.get_value())
def test_set_from_pyfuzzy_for_function_type(self):
" shoud return the correct corresponding SetModel for the Function pyfuzzy object "
pyfuzzy_set = Function()
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
def test_set_from_pyfuzzy_for_sfunction_type(self):
" shoud return the correct corresponding SetModel for the SFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = SFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
delta_param = new_set.parameters.get(name="delta")
self.assertEquals(pyfuzzy_set.a, a_param.get_value())
self.assertEquals(pyfuzzy_set.delta, delta_param.get_value())
def test_set_from_pyfuzzy_for_zfunction_type(self):
" shoud return the correct corresponding SetModel for the ZFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = ZFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
delta_param = new_set.parameters.get(name="delta")
self.assertEquals(pyfuzzy_set.a, a_param.get_value())
self.assertEquals(pyfuzzy_set.delta, delta_param.get_value())
def test_set_from_pyfuzzy_for_pifunction_type(self):
" shoud return the correct corresponding SetModel for the PiFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = PiFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
| |
# -*- coding: utf-8 -*-
from common.base_test import BaseTest
from project import TESTRPC_URL
import lemoncheesecake.api as lcc
import requests
from lemoncheesecake.matching import (
check_that, equal_to, greater_than, has_length, is_false, is_integer, is_list, is_none, is_true, not_equal_to,
require_that, require_that_in
)
SUITE = {
"description": "Run ECHO test node and check TestPRC methods"
}
@lcc.prop("main", "type")
@lcc.tags("test_rpc")
@lcc.suite("Check TestPRC methods of ECHO test node")
class TestRPC(BaseTest):
def __init__(self):
super().__init__()
self.passphrase = None
self.account_address = None
self.time = None
self.SHA3_trx_hash = None
self.contract_address = None
self.null_trx_hash = None
self.contract = None
self.new_account_address = None
self.value = None
def rpc_call(self, method, params):
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 0,
}
return payload
def get_response(self, payload):
response = requests.post(TESTRPC_URL, json=payload).json()
lcc.log_debug(str(response))
if require_that("json-rpc response", response, has_length(3)):
require_that_in(response, "id", is_integer(), "jsonrpc", equal_to("2.0"))
return response
def transfer(self):
payload = self.rpc_call(
"personal_sendTransaction", [{
"from": self.account_address,
"to": self.new_account_address,
"value": self.value
}, ""]
)
trx_hash = self.get_response(payload)["result"]
return trx_hash
def create_contract(self):
payload = self.rpc_call(
"personal_sendTransaction", [{
"from": self.account_address,
"data": self.contract,
}, ""]
)
trx_hash = self.get_response(payload)["result"]
return trx_hash
def validate_transaction(self, transaction):
if require_that("transactions", transaction, has_length(14)):
if not self.type_validator.is_SHA3_256(transaction["blockHash"]):
lcc.log_error("Wrong format of 'blockHash', got: '{}'".format(transaction["blockHash"]))
else:
lcc.log_info("'blockHash' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["blockNumber"]):
lcc.log_error("Wrong format of 'blockNumber', got: '{}'".format(transaction["blockNumber"]))
else:
lcc.log_info("'blockNumber' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["gas"]):
lcc.log_error("Wrong format of 'gas', got: '{}'".format(transaction["gas"]))
else:
lcc.log_info("'gas' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["gasPrice"]):
lcc.log_error("Wrong format of 'gasPrice', got: '{}'".format(transaction["gasPrice"]))
else:
lcc.log_info("'gasPrice' has correct format: eth_hash")
if not self.type_validator.is_SHA3_256(transaction["hash"]):
lcc.log_error("Wrong format of 'hash', got: '{}'".format(transaction["hash"]))
else:
lcc.log_info("'hash' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["nonce"]):
lcc.log_error("Wrong format of 'nonce', got: '{}'".format(transaction["nonce"]))
else:
lcc.log_info("'nonce' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["to"]):
lcc.log_error("Wrong format of 'to', got: '{}'".format(transaction["to"]))
else:
lcc.log_info("'to' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["transactionIndex"]):
lcc.log_error("Wrong format of 'transactionIndex', got: '{}'".format(transaction["transactionIndex"]))
else:
lcc.log_info("'transactionIndex' has correct format: eth_hash")
check_that("value", transaction["value"], equal_to(self.value))
if not self.type_validator.is_eth_hash(transaction["v"]):
lcc.log_error("Wrong format of 'v', got: '{}'".format(transaction["v"]))
else:
lcc.log_info("'v' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["r"]):
lcc.log_error("Wrong format of 'r', got: '{}'".format(transaction["r"]))
else:
lcc.log_info("'r' has correct format: eth_hash")
if not self.type_validator.is_eth_hash(transaction["s"]):
lcc.log_error("Wrong format of 's', got: '{}'".format(transaction["s"]))
else:
lcc.log_info("'s' has correct format: eth_hash")
def validate_block(self, result):
if require_that("'result'", result, has_length(19)):
if not self.type_validator.is_eth_hash(result["number"]):
lcc.log_error("Wrong format of 'number', got: '{}'".format(result["number"]))
else:
lcc.log_info("'number' has correct format: hash")
if not self.type_validator.is_eth_hash(result["hash"]):
lcc.log_error("Wrong format of 'hash', got: '{}'".format(result["hash"]))
else:
lcc.log_info("'hash' has correct format: hash")
if not self.type_validator.is_eth_hash(result["parentHash"]):
lcc.log_error("Wrong format of 'parentHash', got: '{}'".format(result["parentHash"]))
else:
lcc.log_info("'parentHash' has correct format: hash")
if not self.type_validator.is_eth_hash(result["nonce"]):
lcc.log_error("Wrong format of 'nonce', got: '{}'".format(result["nonce"]))
else:
lcc.log_info("'nonce' has correct format: hash")
if not self.type_validator.is_eth_hash(result["sha3Uncles"]):
lcc.log_error("Wrong format of 'sha3Uncles', got: '{}'".format(result["sha3Uncles"]))
else:
lcc.log_info("'result' has correct format: hash")
if not self.type_validator.is_eth_hash(result["logsBloom"]):
lcc.log_error("Wrong format of 'logsBloom', got: '{}'".format(result["logsBloom"]))
else:
lcc.log_info("'result' has correct format: hash")
if not self.type_validator.is_eth_hash(result["transactionsRoot"]):
lcc.log_error("Wrong format of 'transactionsRoot', got: '{}'".format(result["transactionsRoot"]))
else:
lcc.log_info("'transactionsRoot' has correct format: hash")
if not self.type_validator.is_eth_hash(result["stateRoot"]):
lcc.log_error("Wrong format of 'stateRoot', got: '{}'".format(result["stateRoot"]))
else:
lcc.log_info("'stateRoot' has correct format: hash")
if not self.type_validator.is_eth_hash(result["receiptsRoot"]):
lcc.log_error("Wrong format of 'receiptsRoot', got: '{}'".format(result["receiptsRoot"]))
else:
lcc.log_info("'receiptsRoot' has correct format: hash")
if not self.type_validator.is_eth_hash(result["miner"]):
lcc.log_error("Wrong format of 'miner', got: '{}'".format(result["miner"]))
else:
lcc.log_info("'miner' has correct format: hash")
if not self.type_validator.is_eth_hash(result["difficulty"]):
lcc.log_error("Wrong format of 'difficulty', got: '{}'".format(result["difficulty"]))
else:
lcc.log_info("'difficulty' has correct format: hash")
if not self.type_validator.is_eth_hash(result["totalDifficulty"]):
lcc.log_error("Wrong format of 'totalDifficulty', got: '{}'".format(result["totalDifficulty"]))
else:
lcc.log_info("'totalDifficulty' has correct format: hash")
if not self.type_validator.is_eth_hash(result["extraData"]):
lcc.log_error("Wrong format of 'extraData', got: '{}'".format(result["extraData"]))
else:
lcc.log_info("'extraData' has correct format: hash")
if not self.type_validator.is_eth_hash(result["size"]):
lcc.log_error("Wrong format of 'size', got: '{}'".format(result["size"]))
else:
lcc.log_info("'size' has correct format: hash")
if not self.type_validator.is_eth_hash(result["gasLimit"]):
lcc.log_error("Wrong format of 'gasLimit', got: '{}'".format(result["gasLimit"]))
else:
lcc.log_info("'gasLimit' has correct format: hash")
if not self.type_validator.is_eth_hash(result["gasUsed"]):
lcc.log_error("Wrong format of 'gasUsed', got: '{}'".format(result["gasUsed"]))
else:
lcc.log_info("'gasUsed' has correct format: hash")
if not self.type_validator.is_eth_hash(result["timestamp"]):
lcc.log_error("Wrong format of 'timestamp', got: '{}'".format(result["timestamp"]))
else:
lcc.log_info("'timestamp' has correct format: hash")
if check_that("uncles", result["uncles"], is_list()):
if len(result["transactions"]) > 0:
for transaction in result["transactions"]:
self.validate_transaction(transaction)
def setup_suite(self):
self.passphrase = "<PASSWORD>"
self.null_trx_hash = "0x0000000000000000000000000000000000000000000000000000000000000000"
self.account_address = "0x000000000000000000000000000000000000000a"
self.contract_address = "0x0100000000000000000000000000000000000001"
self.time = "0xffff"
self.SHA3_trx_hash = "0x68656c6c6f20776f726c64"
self.value = "0xffff"
self.contract = self.get_byte_code("code_contract_Callee", "code")
self.initial_account = "0x000000000000000000000000000000000000000b"
self.invalid_hex_encoded_signature = \
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
def teardown_suite(self):
pass
@lcc.test("Check connection to ECHO test node")
def main_check(self):
message = {
'code': -32600,
'message': 'Missing or invalid method'
}
payload = self.rpc_call("", "")
response = requests.post(TESTRPC_URL, json=payload).json()
if require_that("json-rpc response", response, has_length(3)):
require_that_in(response, "id", is_none(), "jsonrpc", equal_to("2.0"), "error", equal_to(message))
@lcc.test("Check method 'miner_stop'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def miner_stop(self):
payload = self.rpc_call("miner_stop", ["0x0"])
response = self.get_response(payload)
require_that("'result'", response["result"], is_none())
@lcc.test("Check method 'miner_start'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.miner_stop")
def miner_start(self):
payload = self.rpc_call("miner_start", ["0x0"])
response = self.get_response(payload)
require_that("'result'", response["result"], is_none())
@lcc.test("Check method 'eth_mining'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.miner_start")
def eth_mining(self):
payload = self.rpc_call("eth_mining", [])
response = self.get_response(payload)
require_that("'result'", response["result"], is_true())
@lcc.test("Check method 'personal_newAccount'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def personal_new_account(self):
payload = self.rpc_call("personal_newAccount", [self.passphrase])
response = self.get_response(payload)
if not self.type_validator.is_eth_address(response["result"]):
lcc.log_error("Wrong format of 'result', got: {}".format(response["result"]))
else:
lcc.log_info("'result' has correct format: address")
self.new_account_address = response["result"]
return self.new_account_address
@lcc.test("Check method 'personal_listAccounts'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def personal_list_accounts(self):
payload = self.rpc_call("personal_listAccounts", [])
results = self.get_response(payload)["result"]
for result in results:
if not self.type_validator.is_eth_address(result):
lcc.log_error("Wrong format of 'result', got: {}".format(result))
else:
lcc.log_info("'result' has correct format: address")
@lcc.test("Check method 'personal_listRawAccounts'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def personal_list_raw_accounts(self):
payload = self.rpc_call("personal_listRawAccounts", [])
results = self.get_response(payload)["result"]
lcc.set_step("Check initialized accounts")
for result in results[:2]:
if not self.type_validator.is_eth_address(result["address"]):
lcc.log_error("Wrong format of 'address', got: {}".format(result["address"]))
else:
lcc.log_info("'result' has correct format: address")
if not self.type_validator.is_privkey(result["privkey"]):
lcc.log_error("Wrong format of 'privkey', got: {}".format(result["privkey"]))
else:
lcc.log_info("'result' has correct format: privkey")
check_that("passphrase", result["passphrase"], equal_to(""))
lcc.set_step("Check created accounts")
for result in results[2:]:
if not self.type_validator.is_eth_address(result["address"]):
lcc.log_error("Wrong format of 'address', got: {}".format(result["address"]))
else:
lcc.log_info("'result' has correct format: address")
if not self.type_validator.is_privkey(result["privkey"]):
lcc.log_error("Wrong format of 'privkey', got: {}".format(result["privkey"]))
else:
lcc.log_info("'result' has correct format: privkey")
check_that("passphrase", result["passphrase"], equal_to(self.passphrase))
@lcc.test("Check method 'eth_accounts'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_accounts(self):
payload = self.rpc_call("eth_accounts", [])
results = self.get_response(payload)["result"]
for account_address in results:
if not self.type_validator.is_eth_address(account_address):
lcc.log_error("Wrong format of 'address', got: {}".format(account_address))
else:
lcc.log_info("'result' has correct format: address")
@lcc.test("Check method 'personal_lockAccount'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def personal_lock_account(self):
payload = self.rpc_call("personal_lockAccount", [self.account_address])
response = self.get_response(payload)
require_that("'result'", response["result"], is_true())
@lcc.test("Check method 'personal_unlockAccount'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.personal_lock_account")
def personal_unlock_account(self):
payload = self.rpc_call("personal_unlockAccount", [self.account_address, self.passphrase, self.time])
response = self.get_response(payload)
require_that("'result'", response["result"], is_false())
@lcc.test("Check method 'personal_sendTransaction'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.personal_new_account")
def personal_send_transaction(self):
payload = self.rpc_call(
"personal_sendTransaction", [{
"from": self.account_address,
"to": self.new_account_address,
"value": self.value
}, ""]
)
response = self.get_response(payload)
if not self.type_validator.is_hex(response["result"]):
lcc.log_error("Wrong format of 'result', got: '{}'".format(response["result"]))
else:
lcc.log_info("'result' has correct format: hex")
@lcc.test("Check method 'eth_sign'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_sign(self):
payload = self.rpc_call("eth_sign", [self.initial_account, "0xdeadbaaf"])
response = self.get_response(payload)
require_that("'result'", response["result"], not_equal_to(self.invalid_hex_encoded_signature))
if not self.type_validator.is_hex(response["result"]):
lcc.log_error("Wrong format of 'result', got: '{}'".format(response["result"]))
else:
lcc.log_info("'result' has correct format: hex")
@lcc.disabled()
@lcc.test("Check method 'eth_sendTransaction'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_send_transaction(self):
payload = self.rpc_call(
"eth_sendTransaction", [{
"from": "0x0000000000000000000000000000000000000006",
"to": "0x0000000000000000000000000000000000000007",
"value": "0xfff"
}]
)
response = self.get_response(payload)
require_that("'result'", response["result"], not_equal_to(self.null_trx_hash))
if not self.type_validator.is_hex(response["result"]):
lcc.log_error("Wrong format of 'result', got: '{}'".format(response["result"]))
else:
lcc.log_info("'result' has correct format: hex")
@lcc.test("Check method 'web3_clientVersion'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def web3_client_version(self):
payload = self.rpc_call("web3_clientVersion", [])
response = self.get_response(payload)
client_version_parts = response["result"].split("/")
require_that("'first part of web3 client version", client_version_parts[0], equal_to("ECHO"))
echo_version_parts = client_version_parts[1].split(".")
require_that("'version of echo splitted by dot have length", len(echo_version_parts), greater_than(2))
require_that("'third part of web3 client version", client_version_parts[2], equal_to("Linux.64-bit"))
@lcc.test("Check method 'web3_sha3'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def web3_sha3(self):
payload = self.rpc_call("web3_sha3", [self.SHA3_trx_hash])
response = self.get_response(payload)
if not self.type_validator.is_SHA3_256(response["result"]):
lcc.log_error("Wrong format of 'result', got: '{}'".format(response["result"]))
else:
lcc.log_info("'result' has correct format: hex")
@lcc.test("Check method 'net_version'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def net_version(self):
payload = self.rpc_call("net_version", [])
response = self.get_response(payload)
require_that("'result'", response["result"], equal_to("255"))
@lcc.test("Check method 'net_listening'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def net_listening(self):
payload = self.rpc_call("net_listening", [])
response = self.get_response(payload)
require_that("'result'", response["result"], is_true())
@lcc.test("Check method 'net_peerCount'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def net_peer_count(self):
payload = self.rpc_call("net_peerCount", [])
response = self.get_response(payload)
require_that("'result'", response["result"], equal_to("0x00"))
@lcc.test("Check method 'eth_protocolVersion'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_protocol_version(self):
payload = self.rpc_call("eth_protocolVersion", [])
response = self.get_response(payload)
require_that("'result'", response["result"], equal_to("0x3f"))
@lcc.test("Check method 'eth_syncing'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_syncing(self):
payload = self.rpc_call("eth_syncing", [])
response = self.get_response(payload)
require_that("'result'", response["result"], is_false())
@lcc.test("Check method 'eth_coinbase'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_coinbase(self):
payload = self.rpc_call("eth_coinbase", [])
response = self.get_response(payload)
require_that("'result'", response["result"], equal_to(self.account_address))
@lcc.test("Check method 'eth_gasPrice'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_gas_price(self):
payload = self.rpc_call("eth_gasPrice", [])
response = self.get_response(payload)
require_that("'result'", response["result"], equal_to("0x01"))
@lcc.test("Check method 'eth_block_number'")
@lcc.depends_on("TestRPC.TestRPC.TestRPC.main_check")
def eth_block_number(self):
payload = self.rpc_call("eth_blockNumber", [])
response = self.get_response(payload)
if not self.type_validator.is_eth_block_number(response["result"]):
lcc.log_error("Wrong format of 'eth_blockNumber', got: '{}'".format(response["result"]))
else:
lcc.log_info("'result' has correct format: eth_blockNumber")
@lcc.test("Check | |
import numpy as np
import openmdao.api as om
from openmdao.utils.general_utils import warn_deprecation
from ..transcription_base import TranscriptionBase
from .components import RungeKuttaStepsizeComp, RungeKuttaStateContinuityIterGroup, \
RungeKuttaTimeseriesOutputComp, RungeKuttaControlContinuityComp
from ..common import TimeComp, PathConstraintComp
from ...utils.rk_methods import rk_methods
from ...utils.misc import CoerceDesvar, get_rate_units, get_source_metadata
from ...utils.introspection import get_targets
from ...utils.constants import INF_BOUND
from ...utils.indexing import get_src_indices_by_row
from ..grid_data import GridData
from fnmatch import filter
class RungeKutta(TranscriptionBase):
"""
The RungeKutta Transcription class.
RungeKutta transcription in Dymos uses the RungeKutta-based shooting method which propagates
the states from the phase initial time to the phase final time.
"""
def __init__(self, **kwargs):
super(RungeKutta, self).__init__(**kwargs)
self._rhs_source = 'ode'
msg = 'The RungeKutta transcription is deprecated and will be removed in Dymos v1.0.0.\n' \
'For equivalent behavior, users should switch to ' \
'GaussLobatto(order=3, solve_segments=True)'
warn_deprecation(msg)
def initialize(self):
self.options.declare('method', default='RK4', values=('RK4',),
desc='The integrator used within the explicit phase.')
self.options.declare('k_solver_class', default=om.NonlinearBlockGS,
values=(om.NonlinearBlockGS, om.NewtonSolver, om.NonlinearRunOnce),
allow_none=True,
desc='The nonlinear solver class used to converge the numerical '
'integration across each segment.')
self.options.declare('k_solver_options', default={'iprint': -1}, types=(dict,),
desc='The options passed to the nonlinear solver used to converge the'
'Runge-Kutta propagation across each step.')
def init_grid(self):
self.grid_data = GridData(num_segments=self.options['num_segments'],
transcription='runge-kutta',
transcription_order=self.options['method'],
segment_ends=self.options['segment_ends'],
compressed=self.options['compressed'])
def setup_solvers(self, phase):
"""
Add a NewtonSolver to converge continuity errors in the state between steps.
Parameters
----------
phase
The phase to which this transcription instance applies.
Returns
-------
"""
phase.nonlinear_solver = om.NewtonSolver()
phase.nonlinear_solver.options['iprint'] = -1
phase.nonlinear_solver.options['solve_subsystems'] = True
phase.nonlinear_solver.options['err_on_non_converge'] = True
phase.nonlinear_solver.linesearch = om.BoundsEnforceLS()
def configure_solvers(self, phase):
pass
def setup_time(self, phase):
time_units = phase.time_options['units']
num_seg = self.options['num_segments']
rk_data = rk_methods[self.options['method']]
num_nodes = num_seg * rk_data['num_stages']
grid_data = self.grid_data
super(RungeKutta, self).setup_time(phase)
time_comp = TimeComp(num_nodes=num_nodes, node_ptau=grid_data.node_ptau,
node_dptau_dstau=grid_data.node_dptau_dstau, units=time_units)
phase.add_subsystem('time', time_comp, promotes_outputs=['*'], promotes_inputs=['*'])
h_comp = RungeKuttaStepsizeComp(num_segments=num_seg,
seg_rel_lengths=np.diff(grid_data.segment_ends),
time_units=time_units)
phase.add_subsystem('stepsize_comp',
subsys=h_comp,
promotes_inputs=['t_duration'],
promotes_outputs=['h'])
def configure_time(self, phase):
super(RungeKutta, self).configure_time(phase)
phase.time.configure_io()
phase.stepsize_comp.configure_io()
options = phase.time_options
# The tuples here are (name, user_specified_targets, dynamic)
for name, usr_tgts, dynamic in [('time', options['targets'], True),
('time_phase', options['time_phase_targets'], True),
('t_initial', options['t_initial_targets'], False),
('t_duration', options['t_duration_targets'], False)]:
targets = get_targets(phase.ode, name=name, user_targets=usr_tgts)
if targets:
all_src_idxs = self.grid_data.subset_node_indices['all'] if dynamic else None
end_src_idxs = self.grid_data.subset_node_indices['segment_ends'] if dynamic else None
phase.connect(name, ['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs)
phase.connect(name, ['ode.{0}'.format(t) for t in targets],
src_indices=end_src_idxs)
def setup_ode(self, phase):
phase.add_subsystem('rk_solve_group',
RungeKuttaStateContinuityIterGroup(
num_segments=self.options['num_segments'],
method=self.options['method'],
state_options=phase.state_options,
time_units=phase.time_options['units'],
ode_class=phase.options['ode_class'],
ode_init_kwargs=phase.options['ode_init_kwargs'],
k_solver_class=self.options['k_solver_class'],
k_solver_options=self.options['k_solver_options']))
# Since the RK Solve group evaluates the ODE at *predicted* state values, we need
# to instantiate a second ODE group that will call the ODE at the actual integrated
# state values so that we can accurate evaluate path and boundary constraints and
# obtain timeseries for ODE outputs.
phase.add_subsystem('ode',
phase.options['ode_class'](num_nodes=2*self.options['num_segments'],
**phase.options['ode_init_kwargs']))
def configure_ode(self, phase):
phase.rk_solve_group.configure_io()
num_connected = len([s for s in phase.state_options
if phase.state_options[s]['connected_initial']])
phase.promotes('rk_solve_group',
inputs=['h'] if num_connected == 0 else ['h', 'initial_states:*'],
outputs=['states:*', 'state_predict_comp.predicted_states:*'])
def _get_rate_source_path(self, state_name, phase, nodes=None, **kwargs):
try:
var = phase.state_options[state_name]['rate_source']
except RuntimeError:
raise ValueError('state \'{0}\' in phase \'{1}\' was not given a '
'rate_source'.format(state_name, phase.name))
shape = phase.state_options[state_name]['shape']
state_size = np.prod(shape)
var_type = phase.classify_var(var)
num_segments = self.options['num_segments']
num_stages = rk_methods[self.options['method']]['num_stages']
# Determine the path to the variable
if var_type == 'time':
rate_path = 'time'
src_idxs = None
elif var_type == 'time_phase':
rate_path = 'time_phase'
src_idxs = None
elif var_type == 'state':
rate_path = 'state_predict_comp.predicted_states:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'indep_control':
rate_path = 'control_values:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'input_control':
rate_path = 'control_values:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'control_rate':
rate_path = 'control_rates:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'control_rate2':
rate_path = 'control_rates:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'indep_polynomial_control':
rate_path = 'polynomial_control_values:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'input_polynomial_control':
rate_path = 'polynomial_control_values:{0}'.format(var)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'polynomial_control_rate':
control_name = var[:-5]
rate_path = 'polynomial_control_rates:{0}_rate'.format(control_name)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'polynomial_control_rate2':
control_name = var[:-6]
rate_path = 'polynomial_control_rates:{0}_rate2'.format(control_name)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
elif var_type == 'parameter':
rate_path = 'parameters:{0}'.format(var)
size = np.prod(phase.parameter_options[var]['shape'])
src_idxs = np.zeros(num_segments * num_stages * size, dtype=int).reshape((num_segments,
num_stages,
state_size))
else:
# Failed to find variable, assume it is in the ODE
rate_path = 'rk_solve_group.ode.{0}'.format(var)
state_size = np.prod(shape)
size = num_segments * num_stages * state_size
src_idxs = np.arange(size, dtype=int).reshape((num_segments, num_stages, state_size))
return rate_path, src_idxs
def setup_states(self, phase):
pass
def configure_states(self, phase):
num_seg = self.options['num_segments']
num_state_input_nodes = num_seg + 1
for state_name, options in phase.state_options.items():
self._configure_state_introspection(state_name, options, phase)
size = np.prod(options['shape'])
if options['opt']:
# Set the desvar indices accordingly
desvar_indices = list(range(size))
if options['fix_initial']:
if options['initial_bounds'] is not None:
raise ValueError('Cannot specify \'fix_initial=True\' and specify '
'initial_bounds for state {0}'.format(state_name))
if options['connected_initial']:
raise ValueError('Cannot specify \'fix_initial=True\' and specify '
'\'connected_initial=True\' for state {0} in '
'phase {1}.'.format(state_name, phase.name))
del desvar_indices[:size]
if options['fix_final']:
raise ValueError('Cannot specify \'fix_final=True\' in '
'RungeKuttaPhase'.format(state_name))
if options['final_bounds'] is not None:
raise ValueError('Cannot specify \'final_bounds\' in RungeKuttaPhase '
'(state {0})'.format(state_name))
if len(desvar_indices) > 0:
coerce_desvar_option = CoerceDesvar(num_state_input_nodes, desvar_indices,
options)
lb = np.zeros_like(desvar_indices, dtype=float)
lb[:] = -INF_BOUND if coerce_desvar_option('lower') is None else \
coerce_desvar_option('lower')
ub = np.zeros_like(desvar_indices, dtype=float)
ub[:] = INF_BOUND if coerce_desvar_option('upper') is None else \
coerce_desvar_option('upper')
if options['initial_bounds'] is not None:
lb[0] = options['initial_bounds'][0]
ub[0] = options['initial_bounds'][-1]
phase.add_design_var(name='states:{0}'.format(state_name),
lower=lb,
upper=ub,
scaler=coerce_desvar_option('scaler'),
adder=coerce_desvar_option('adder'),
ref0=coerce_desvar_option('ref0'),
ref=coerce_desvar_option('ref'),
indices=desvar_indices)
num_seg = self.options['num_segments']
for state_name, options in phase.state_options.items():
shape = options['shape']
# Connect the states at the segment ends to the final ODE instance.
row_idxs = np.repeat(np.arange(1, num_seg, dtype=int), repeats=2)
row_idxs = np.concatenate(([0], row_idxs, [num_seg]))
src_idxs = get_src_indices_by_row(row_idxs, shape)
if shape == (1,):
src_idxs = src_idxs.ravel()
targets = get_targets(ode=phase.ode, name=state_name, user_targets=options['targets'])
if targets:
phase.connect('states:{0}'.format(state_name),
['ode.{0}'.format(tgt) for tgt in targets],
src_indices=src_idxs, flat_src_indices=True)
# Connect the state rate source to the k comp
rate_path, src_idxs = self._get_rate_source_path(state_name, phase)
phase.connect(rate_path,
'rk_solve_group.k_comp.f:{0}'.format(state_name),
src_indices=src_idxs,
flat_src_indices=True)
def setup_controls(self, phase):
super(RungeKutta, self).setup_controls(phase)
def configure_controls(self, phase):
super(RungeKutta, self).configure_controls(phase)
grid_data = self.grid_data
for name, options in phase.control_options.items():
shape = options['shape']
segment_end_idxs = grid_data.subset_node_indices['segment_ends']
all_idxs = grid_data.subset_node_indices['all']
segend_src_idxs = get_src_indices_by_row(segment_end_idxs, shape=shape)
all_src_idxs = get_src_indices_by_row(all_idxs, shape=shape)
if shape == (1,):
segend_src_idxs = segend_src_idxs.ravel()
all_src_idxs = all_src_idxs.ravel()
targets = get_targets(ode=phase.ode, name=name, user_targets=options['targets'])
if targets:
src_name = 'control_values:{0}'.format(name)
phase.connect(src_name,
['ode.{0}'.format(t) for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(src_name,
['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
targets = get_targets(ode=phase.ode, name=f'{name}_rate',
user_targets=options['rate_targets'])
if targets:
src_name = 'control_rates:{0}_rate'.format(name)
phase.connect(src_name,
['ode.{0}'.format(t) for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(src_name,
['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
targets = get_targets(ode=phase.ode, name=f'{name}_rate2',
user_targets=options['rate2_targets'])
if targets:
src_name = 'control_rates:{0}_rate2'.format(name)
phase.connect(src_name,
['ode.{0}'.format(t) for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(src_name,
['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
def setup_polynomial_controls(self, phase):
super(RungeKutta, self).setup_polynomial_controls(phase)
def configure_polynomial_controls(self, phase):
super(RungeKutta, self).configure_polynomial_controls(phase)
grid_data = self.grid_data
for name, options in phase.polynomial_control_options.items():
shape = options['shape']
segment_end_idxs = grid_data.subset_node_indices['segment_ends']
all_idxs = grid_data.subset_node_indices['all']
segend_src_idxs = get_src_indices_by_row(segment_end_idxs, shape=shape)
all_src_idxs = get_src_indices_by_row(all_idxs, shape=shape)
if shape == (1,):
segend_src_idxs = segend_src_idxs.ravel()
all_src_idxs = all_src_idxs.ravel()
targets = get_targets(phase.ode, name=name, user_targets=options['targets'])
if targets:
src_name = 'polynomial_control_values:{0}'.format(name)
phase.connect(src_name,
['ode.{0}'.format(t) for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(src_name,
['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
targets = get_targets(ode=phase.ode, name=f'{name}_rate',
user_targets=options['rate_targets'])
if targets:
phase.connect(f'polynomial_control_rates:{name}_rate',
['ode.{0}'.format(t) for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(f'polynomial_control_rates:{name}_rate',
['rk_solve_group.ode.{0}'.format(t) for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
targets = get_targets(ode=phase.ode, name=f'{name}_rate2',
user_targets=options['rate2_targets'])
if targets:
phase.connect(f'polynomial_control_rates:{name}_rate2',
[f'ode.{t}' for t in targets],
src_indices=segend_src_idxs, flat_src_indices=True)
phase.connect(f'polynomial_control_rates:{name}_rate2',
[f'rk_solve_group.ode.{t}' for t in targets],
src_indices=all_src_idxs, flat_src_indices=True)
def setup_defects(self, phase):
"""
Setup the Continuity component as necessary.
"""
"""
Setup the Collocation and Continuity components as necessary.
"""
grid_data = self.grid_data
num_seg = grid_data.num_segments
# Add the continuity constraint component if necessary
if num_seg > 1 and phase.control_options:
time_units = phase.time_options['units']
phase.add_subsystem('continuity_comp',
RungeKuttaControlContinuityComp(grid_data=grid_data,
state_options=phase.state_options,
control_options=phase.control_options,
time_units=time_units),
promotes_inputs=['t_duration'])
def configure_defects(self, phase):
grid_data = self.grid_data
num_seg = grid_data.num_segments
# Add the continuity constraint component if necessary
if num_seg > 1 and phase.control_options:
phase.continuity_comp.configure_io()
for | |
address_sorting, upb, ...]
The result of the algorithm:
end2end_tests -> [grpc_test_util]
grpc_test_util -> [grpc]
grpc -> [gpr, address_sorting, upb, ...]
"""
bazel_rule = bazel_rules[rule_name]
direct_deps = _extract_deps(bazel_rule, bazel_rules)
transitive_deps = set()
collapsed_deps = set()
exclude_deps = set()
collapsed_srcs = set(_extract_sources(bazel_rule))
collapsed_public_headers = set(_extract_public_headers(bazel_rule))
collapsed_headers = set(_extract_nonpublic_headers(bazel_rule))
for dep in direct_deps:
external_dep_name_maybe = _external_dep_name_from_bazel_dependency(dep)
if dep in bazel_rules:
# Descend recursively, but no need to do that for external deps
if external_dep_name_maybe is None:
if "_PROCESSING_DONE" not in bazel_rules[dep]:
# This item is not processed before, compute now
_compute_transitive_metadata(dep, bazel_rules,
bazel_label_to_dep_name)
transitive_deps.update(bazel_rules[dep].get(
'_TRANSITIVE_DEPS', []))
collapsed_deps.update(
collapsed_deps, bazel_rules[dep].get('_COLLAPSED_DEPS', []))
exclude_deps.update(bazel_rules[dep].get('_EXCLUDE_DEPS', []))
# This dep is a public target, add it as a dependency
if dep in bazel_label_to_dep_name:
transitive_deps.update([bazel_label_to_dep_name[dep]])
collapsed_deps.update(collapsed_deps,
[bazel_label_to_dep_name[dep]])
# Add all the transitive deps of our every public dep to exclude
# list since we want to avoid building sources that are already
# built by our dependencies
exclude_deps.update(bazel_rules[dep]['_TRANSITIVE_DEPS'])
continue
# This dep is an external target, add it as a dependency
if external_dep_name_maybe is not None:
transitive_deps.update([external_dep_name_maybe])
collapsed_deps.update(collapsed_deps, [external_dep_name_maybe])
continue
# Direct dependencies are part of transitive dependencies
transitive_deps.update(direct_deps)
# Calculate transitive public deps (needed for collapsing sources)
transitive_public_deps = set(
[x for x in transitive_deps if x in bazel_label_to_dep_name])
# Remove intermediate targets that our public dependencies already depend
# on. This is the step that further shorten the deps list.
collapsed_deps = set([x for x in collapsed_deps if x not in exclude_deps])
# Compute the final source files and headers for this build target whose
# name is `rule_name` (input argument of this function).
#
# Imaging a public target PX has transitive deps [IA, IB, PY, IC, PZ]. PX,
# PY and PZ are public build targets. And IA, IB, IC are intermediate
# targets. In addition, PY depends on IC.
#
# Translate the condition into dependency graph:
# PX -> [IA, IB, PY, IC, PZ]
# PY -> [IC]
# Public targets: [PX, PY, PZ]
#
# The collapsed dependencies of PX: [PY, PZ].
# The excluded dependencies of X: [PY, IC, PZ].
# (IC is excluded as a dependency of PX. It is already included in PY, hence
# it would be redundant to include it again.)
#
# Target PX should include source files and headers of [PX, IA, IB] as final
# build metadata.
for dep in transitive_deps:
if dep not in exclude_deps and dep not in transitive_public_deps:
if dep in bazel_rules:
collapsed_srcs.update(_extract_sources(bazel_rules[dep]))
collapsed_public_headers.update(
_extract_public_headers(bazel_rules[dep]))
collapsed_headers.update(
_extract_nonpublic_headers(bazel_rules[dep]))
# This item is a "visited" flag
bazel_rule['_PROCESSING_DONE'] = True
# Following items are described in the docstinrg.
bazel_rule['_TRANSITIVE_DEPS'] = list(sorted(transitive_deps))
bazel_rule['_COLLAPSED_DEPS'] = list(sorted(collapsed_deps))
bazel_rule['_COLLAPSED_SRCS'] = list(sorted(collapsed_srcs))
bazel_rule['_COLLAPSED_PUBLIC_HEADERS'] = list(
sorted(collapsed_public_headers))
bazel_rule['_COLLAPSED_HEADERS'] = list(sorted(collapsed_headers))
bazel_rule['_EXCLUDE_DEPS'] = list(sorted(exclude_deps))
# TODO(jtattermusch): deduplicate with transitive_dependencies.py (which has a
# slightly different logic)
# TODO(jtattermusch): This is done to avoid introducing too many intermediate
# libraries into the build.yaml-based builds (which might in cause issues
# building language-specific artifacts) and also because the libraries in
# build.yaml-based build are generally considered units of distributions (=
# public libraries that are visible to the user and are installable), while in
# bazel builds it is customary to define larger number of smaller
# "sublibraries". The need for elision (and expansion) of intermediate libraries
# can be re-evaluated in the future.
def _populate_transitive_metadata(bazel_rules: Any,
public_dep_names: Iterable[str]) -> None:
"""Add 'transitive_deps' field for each of the rules"""
# Create the map between Bazel label and public dependency name
bazel_label_to_dep_name = {}
for dep_name in public_dep_names:
bazel_label_to_dep_name[_get_bazel_label(dep_name)] = dep_name
# Make sure we reached all the Bazel rules
# TODO(lidiz) potentially we could only update a subset of rules
for rule_name in bazel_rules:
if '_PROCESSING_DONE' not in bazel_rules[rule_name]:
_compute_transitive_metadata(rule_name, bazel_rules,
bazel_label_to_dep_name)
def update_test_metadata_with_transitive_metadata(
all_extra_metadata: BuildDict, bazel_rules: BuildDict) -> None:
"""Patches test build metadata with transitive metadata."""
for lib_name, lib_dict in list(all_extra_metadata.items()):
# Skip if it isn't not an test
if lib_dict.get('build') != 'test' or lib_dict.get('_TYPE') != 'target':
continue
bazel_rule = bazel_rules[_get_bazel_label(lib_name)]
if '//external:benchmark' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['benchmark'] = True
lib_dict['defaults'] = 'benchmark'
if '//external:gtest' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['gtest'] = True
lib_dict['language'] = 'c++'
def _get_transitive_protos(bazel_rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = bazel_rules.get(name, None)
if rule:
for dep in rule['deps']:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule['srcs']:
if src.endswith('.proto'):
ret.append(src)
return list(set(ret))
def _expand_upb_proto_library_rules(bazel_rules):
# Expand the .proto files from UPB proto library rules into the pre-generated
# upb.h and upb.c files.
GEN_UPB_ROOT = '//:src/core/ext/upb-generated/'
GEN_UPBDEFS_ROOT = '//:src/core/ext/upbdefs-generated/'
EXTERNAL_LINKS = [('@com_google_protobuf//', ':src/'),
('@com_google_googleapis//', ''),
('@com_github_cncf_udpa//', ''),
('@com_envoyproxy_protoc_gen_validate//', ''),
('@envoy_api//', ''), ('@opencensus_proto//', '')]
for name, bazel_rule in bazel_rules.items():
gen_func = bazel_rule.get('generator_function', None)
if gen_func in ('grpc_upb_proto_library',
'grpc_upb_proto_reflection_library'):
# get proto dependency
deps = bazel_rule['deps']
if len(deps) != 1:
raise Exception(
'upb rule "{0}" should have 1 proto dependency but has "{1}"'
.format(name, deps))
# deps is not properly fetched from bazel query for upb_proto_library target
# so add the upb dependency manually
bazel_rule['deps'] = [
'//external:upb_lib', '//external:upb_lib_descriptor',
'//external:upb_generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
]
# populate the upb_proto_library rule with pre-generated upb headers
# and sources using proto_rule
protos = _get_transitive_protos(bazel_rules, deps[0])
if len(protos) == 0:
raise Exception(
'upb rule "{0}" should have at least one proto file.'.
format(name))
srcs = []
hdrs = []
for proto_src in protos:
for external_link in EXTERNAL_LINKS:
if proto_src.startswith(external_link[0]):
proto_src = proto_src[len(external_link[0]) +
len(external_link[1]):]
break
if proto_src.startswith('@'):
raise Exception('"{0}" is unknown workspace.'.format(name))
proto_src = _extract_source_file_path(proto_src)
ext = '.upb' if gen_func == 'grpc_upb_proto_library' else '.upbdefs'
root = GEN_UPB_ROOT if gen_func == 'grpc_upb_proto_library' else GEN_UPBDEFS_ROOT
srcs.append(root + proto_src.replace('.proto', ext + '.c'))
hdrs.append(root + proto_src.replace('.proto', ext + '.h'))
bazel_rule['srcs'] = srcs
bazel_rule['hdrs'] = hdrs
def _generate_build_metadata(build_extra_metadata: BuildDict,
bazel_rules: BuildDict) -> BuildDict:
"""Generate build metadata in build.yaml-like format bazel build metadata and build.yaml-specific "extra metadata"."""
lib_names = list(build_extra_metadata.keys())
result = {}
for lib_name in lib_names:
lib_dict = _create_target_from_bazel_rule(lib_name, bazel_rules)
# populate extra properties from the build.yaml-specific "extra metadata"
lib_dict.update(build_extra_metadata.get(lib_name, {}))
# store to results
result[lib_name] = lib_dict
# Rename targets marked with "_RENAME" extra metadata.
# This is mostly a cosmetic change to ensure that we end up with build.yaml target
# names we're used to from the past (and also to avoid too long target names).
# The rename step needs to be made after we're done with most of processing logic
# otherwise the already-renamed libraries will have different names than expected
for lib_name in lib_names:
to_name = build_extra_metadata.get(lib_name, {}).get('_RENAME', None)
if to_name:
# store lib under the new name and also change its 'name' property
if to_name in result:
raise Exception('Cannot rename target ' + str(lib_name) + ', ' +
str(to_name) + ' already exists.')
lib_dict = result.pop(lib_name)
lib_dict['name'] = to_name
result[to_name] = lib_dict
# dep names need to be updated as well
for lib_dict_to_update in list(result.values()):
lib_dict_to_update['deps'] = list([
to_name if dep == lib_name else dep
for dep in lib_dict_to_update['deps']
])
return result
def _convert_to_build_yaml_like(lib_dict: BuildMetadata) -> BuildYaml:
lib_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'library'
]
target_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'target'
]
test_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'test'
]
# list libraries and targets in predefined order
lib_list = [lib_dict[lib_name] for lib_name in lib_names]
target_list = [lib_dict[lib_name] for lib_name in target_names]
test_list = [lib_dict[lib_name] for lib_name in test_names]
# get rid of temporary private fields prefixed with "_" and some other useless fields
for lib in lib_list:
for field_to_remove in [
k for k in list(lib.keys()) if k.startswith('_')
]:
lib.pop(field_to_remove, None)
for target in target_list:
for field_to_remove in [
k for k in list(target.keys()) if k.startswith('_')
]:
target.pop(field_to_remove, None)
target.pop('public_headers',
None) # public headers make no sense for targets
for test in test_list:
for field_to_remove in [
k for k in list(test.keys()) if k.startswith('_')
]:
test.pop(field_to_remove, None)
test.pop('public_headers',
None) # public headers make no sense for tests
build_yaml_like = {
'libs': lib_list,
'filegroups': | |
<filename>CHRLINE/services/ShopService.py
# -*- coding: utf-8 -*-
class ShopService(object):
ShopService_REQ_TYPE = 3
ShopService_RES_TYPE = 3
def __init__(self):
pass
def getProduct(self, shopId, productId, language="zh-TW", country="TW"):
sqrd = [128, 1, 0, 1, 0, 0, 0, 10, 103, 101, 116,
80, 114, 111, 100, 117, 99, 116, 0, 0, 0, 0]
sqrd += [11, 0, 2, 0, 0, 0, len(shopId)] # e.g. stickershop
for value in shopId:
sqrd.append(ord(value))
sqrd += [11, 0, 3, 0, 0, 0, len(productId)]
for value in productId:
sqrd.append(ord(value))
sqrd += [12, 0, 4]
sqrd += [11, 0, 1, 0, 0, 0, len(language)]
for value in language:
sqrd.append(ord(value))
sqrd += [11, 0, 2, 0, 0, 0, len(country)]
for value in country:
sqrd.append(ord(value))
sqrd += [0, 0]
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd)
def getProductsByAuthor(self, authorId, productType=1):
sqrd = [128, 1, 0, 1] + \
self.getStringBytes('getProductsByAuthor') + [0, 0, 0, 0]
sqrd += [12, 0, 2]
sqrd += [8, 0, 1] + self.getIntBytes(productType)
sqrd += [11, 0, 2] + self.getStringBytes(authorId)
sqrd += [8, 0, 3] + self.getIntBytes(0)
sqrd += [8, 0, 4] + self.getIntBytes(100)
sqrd += [2, 0, 6, int(True)]
sqrd += [0, 0]
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd)
def getStudentInformation(self):
sqrd = [128, 1, 0, 1] + \
self.getStringBytes('getStudentInformation') + [0, 0, 0, 0]
sqrd += [12, 0, 2]
sqrd += [0, 0]
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd)
def canReceivePresent(self, shopId, productId, recipientMid):
sqrd = [128, 1, 0, 1] + \
self.getStringBytes('canReceivePresent') + [0, 0, 0, 0]
sqrd += [11, 0, 2] + self.getStringBytes(shopId)
sqrd += [11, 0, 3] + self.getStringBytes(productId)
sqrd += [12, 0, 4]
sqrd += [11, 0, 1] + self.getStringBytes('zh_TW') # language
sqrd += [11, 0, 2] + self.getStringBytes('TW') # country
sqrd += [0]
sqrd += [11, 0, 5] + self.getStringBytes(recipientMid)
sqrd += [0]
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd)
def getOwnedProductSummaries(self, shopId, offset=0, limit=200, language='zh_TW', country='TW'):
sqrd = [128, 1, 0, 1] + \
self.getStringBytes('getOwnedProductSummaries') + [0, 0, 0, 0]
sqrd += [11, 0, 2] + self.getStringBytes(shopId)
sqrd += [8, 0, 3] + self.getIntBytes(offset)
sqrd += [8, 0, 4] + self.getIntBytes(limit)
sqrd += [12, 0, 5]
sqrd += [11, 0, 1] + self.getStringBytes(language)
sqrd += [11, 0, 2] + self.getStringBytes(country)
sqrd += [0, 0]
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd)
def getShowcaseV3(self, productType: int, showcaseType: int, subType: int, continuationToken: str = None, limit: int = 20):
"""
- productType
STICKER(1),
THEME(2),
STICON(3);
- showcaseType
UNPURCHASED(1),
SUBSCRIPTION(2);
- subType
GENERAL(0),
CREATORS(1),
STICON(2);
"""
params = [
[12, 1, [ # Shop_ShowcaseRequest
[8, 1, productType],
[8, 2, showcaseType],
[8, 3, subType],
[11, 4, continuationToken],
[8, 5, limit],
]]
]
sqrd = self.generateDummyProtocol(
'getShowcaseV3', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getProductV2(self, productType: int, productId: str, carrierCode: str = "", saveBrowsingHistory: bool = True):
params = [
[12, 2, [
[8, 1, productType],
[11, 2, productId],
[11, 3, carrierCode],
[2, 4, saveBrowsingHistory],
]]
]
sqrd = self.generateDummyProtocol(
'getProductV2', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getProductByVersion(self, shopId: str, productId: str, productVersion: int, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[11, 3, productId],
[10, 4, productVersion],
[12, 5, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getProductByVersion', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def placePurchaseOrderForFreeProduct(self, shopId: str, productId: str, recipientMid: str, price: str, amount: str, priceString: str, enableLinePointAutoExchange: bool = True, language: str = 'zh_TW', country: str = 'TW', presentAttributes: dict = {}):
params = [
[12, 2, [
[11, 1, shopId],
[11, 2, productId],
[11, 5, recipientMid],
[12, 11, [
[11, 1, price],
[11, 2, amount],
[11, 5, priceString],
]],
[2, 12, enableLinePointAutoExchange],
[12, 21, [
[11, 1, language],
[11, 2, country],
]],
[13, 31, [11, 11, presentAttributes]],
]],
]
sqrd = self.generateDummyProtocol(
'placePurchaseOrderForFreeProduct', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def placePurchaseOrderWithLineCoin(self, shopId: str, productId: str, recipientMid: str, price: str, amount: str, priceString: str, enableLinePointAutoExchange: bool = True, language: str = 'zh_TW', country: str = 'TW', presentAttributes: dict = {}):
params = [
[12, 2, [
[11, 1, shopId],
[11, 2, productId],
[11, 5, recipientMid],
[12, 11, [
[11, 1, price],
[11, 2, amount],
[11, 5, priceString],
]],
[2, 12, enableLinePointAutoExchange],
[12, 21, [
[11, 1, language],
[11, 2, country],
]],
[13, 31, [11, 11, presentAttributes]],
]],
]
sqrd = self.generateDummyProtocol(
'placePurchaseOrderWithLineCoin', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def placePurchaseOrderWithIAP(self, shopId: str, productId: str, recipientMid: str, price: str, amount: str, priceString: str, enableLinePointAutoExchange: bool = True, language: str = 'zh_TW', country: str = 'TW', presentAttributes: dict = {}):
params = [
[12, 2, [
[11, 1, shopId],
[11, 2, productId],
[11, 5, recipientMid],
[12, 11, [
[11, 1, price],
[11, 2, amount],
[11, 5, priceString],
]],
[2, 12, enableLinePointAutoExchange],
[12, 21, [
[11, 1, language],
[11, 2, country],
]],
[13, 31, [11, 11, presentAttributes]],
]],
]
sqrd = self.generateDummyProtocol(
'placePurchaseOrderWithIAP', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getOwnedProducts(self, shopId: str, offset: int = 0, limit: int = 20, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[8, 3, offset],
[8, 4, limit],
[12, 5, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getOwnedProducts', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getPurchasedProducts(self, shopId: str, offset: int = 0, limit: int = 20, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[8, 3, offset],
[8, 4, limit],
[12, 5, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getPurchasedProducts', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getReceivedPresents(self, shopId: str, offset: int = 0, limit: int = 20, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[8, 3, offset],
[8, 4, limit],
[12, 5, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getReceivedPresents', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getSentPresents(self, shopId: str, offset: int = 0, limit: int = 20, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[8, 3, offset],
[8, 4, limit],
[12, 5, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getSentPresents', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def notifyProductEvent(self, shopId: str, productId: str, productVersion: int, productEvent: int):
params = [
[11, 2, shopId], # sticonshop
[11, 3, productId], # 1
[10, 4, productVersion], # 3
[10, 5, productEvent], # 16
]
sqrd = self.generateDummyProtocol(
'notifyProductEvent', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getProductValidationScheme(self, shopId: str, productId: str, productVersion: int):
params = [
[11, 2, shopId],
[11, 3, productId],
[10, 4, productVersion],
]
sqrd = self.generateDummyProtocol(
'getProductValidationScheme', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def validateProduct(self, shopId: str, productId: str, productVersion: int, key: str, offset: int, size: int, authCode: str):
params = [
[11, 2, shopId],
[11, 3, productId],
[10, 4, productVersion],
[12, 5, [
[12, 1, [
[11, 10, key],
[10, 11, offset],
[10, 12, size],
]],
[11, 10, authCode]
]],
]
sqrd = self.generateDummyProtocol(
'validateProduct', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getProductsByBillingItemId(self, shopId: str, billingItemIds: list, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[15, 3, [11, billingItemIds]]
[12, 4, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getProductsByBillingItemId', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def getUpdates(self, shopId: str, language: str = 'zh_TW', country: str = 'TW'):
params = [
[11, 2, shopId],
[12, 3, [
[11, 1, language],
[11, 2, country],
]],
]
sqrd = self.generateDummyProtocol(
'getUpdates', params, self.ShopService_REQ_TYPE)
return self.postPackDataAndGetUnpackRespData(self.LINE_UNIFIED_SHOP_ENDPOINT, sqrd, self.ShopService_RES_TYPE)
def searchProductsV2(self, query: str, productTypes: list, subtypes: list, priceTiers: list, stickerResourceTypes: list, productResourceTypes: list, continuationToken: str, limit: int = 10, deprecatedOffsetForLineSearchServer: int = 0, sortType: int = 0, enableSearchSuggestKeywords: bool = False):
params = [
[12, 2, [
[11, 1, query],
[14, 2, [8, productTypes]],
[14, 3, [8, subtypes]],
[11, 4, continuationToken],
[8, 5, limit],
[8, 6, deprecatedOffsetForLineSearchServer],
[14, 7, [8, priceTiers]],
[14, 8, [8, stickerResourceTypes]],
[8, 9, sortType],
[10, 14, [8, productResourceTypes]],
[2, 100, enableSearchSuggestKeywords],
# [12, | |
one dataframe
MAST_LIST = []
for SES, CSV in MAST:
path = f"{self.DATA_DIR}/IMAGEN_RAW/2.7/{SES}/psytools/{CSV}"
DF = pd.read_csv(path, low_memory=False)
DF['ID'] = DF['User code'] if SES=='FU3' else DF['User code'].apply(lambda x : int(x[:12]))
DF['Session'] = SES
# Renmae the values
DF['MAST total'] = DF['mast_total']
DF['MAST Alcohol dependency symptoms'] = DF['mast_dsm']
DF['MAST sum'] = DF['mast_sum']
DF['MAST flag'] = DF['mast_sum'].apply(flag)
DF2 = DF[ROI]
MAST_LIST.append(DF2)
MAST = pd.concat(MAST_LIST)
DF3 = MAST
if DATA == "FTND":
# Set the files with session and roi columns
FTND = [
('FU3','IMAGEN-IMGN_ESPAD_FU3.csv'),
('FU2','IMAGEN-IMGN_ESPAD_CHILD_FU2-IMAGEN_DIGEST.csv'),
('FU1','IMAGEN-IMGN_ESPAD_CHILD_FU_RC5-IMAGEN_DIGEST.csv'),
('BL','IMAGEN-IMGN_ESPAD_CHILD_RC5-IMAGEN_DIGEST.csv')
]
ROI = ['ID','Session','Likelihood of nicotine dependence child','FTND Sum']
# Generate the columns
def test(x):
if (7<=x and x <=10): return 'highly dependent'
elif (4<=x and x <=6): return 'moderately dependent'
elif (x<4): return 'less dependent'
else: return np.NaN
# Generate the instrument files in one dataframe
FTND_LIST = []
for SES, CSV in FTND:
path = f"{self.DATA_DIR}/IMAGEN_RAW/2.7/{SES}/psytools/{CSV}"
DF = pd.read_csv(path, low_memory=False)
DF['ID'] = DF['User code'] if SES=='FU3' else DF['User code'].apply(lambda x : int(x[:12]))
DF['Session'] = SES
# Rename the values
DF['Likelihood of nicotine dependence child'] = DF['ftnd_sum'].apply(test)
DF['FTND Sum'] = DF['ftnd_sum']
DF2 = DF[ROI]
FTND_LIST.append(DF2)
FTND = pd.concat(FTND_LIST)
DF3 = FTND
if DATA == "DAST":
# Set the files with session and roi columns
DAST = [
]
# Generate the columns
# Generate the instrument files in one dataframe
# if SESSION == 'FU3':
# Variables = ['sex', 'site', 'class']
# DATA_DF = self.NEW_DF[Variables]
# return Variables, DATA_DF
# if 'DAST' == self.DATA: # 'DAST'
# self.VARIABLES, self.NEW_DF2 = DAST_SESSION(self.SESSION)
pass
if DATA == "SCID":
# Set the files with session and roi columns
# Generate the columns
# Generate the instrument files in one dataframe
# def SCID_SESSION(SESSION):
# if SESSION == 'FU3':
# Variables = ['sex', 'site', 'class']
# DATA_DF = self.NEW_DF[Variables]
# return Variables, DATA_DF
# if 'SCID' == self.DATA: # 'SCID'
# self.VARIABLES, self.NEW_DF2 = SCID_SESSION(self.SESSION)
pass
if DATA == "DMQ":
# Set the files with session and roi columns
# Generate the columns
# Generate the instrument files in one dataframe
# if SESSION == 'FU3':
# Variables = ['sex', 'site', 'class']
# DATA_DF = self.NEW_DF[Variables]
# return Variables, DATA_DF
# if 'DMQ' == self.DATA: # 'DMQ'
# self.VARIABLES, self.NEW_DF2 = DMQ_SESSION(self.SESSION)
pass
if DATA == "BULLY":
pass
if DATA == "ESPAD":
pass
if DATA == "TLFB":
pass
if DATA == "AUDIT":
# Set the files with session and roi columns
# Generate the columns
# Generate the instrument files in one dataframe
# if SESSION == 'FU3':
# Variables = ['sex', 'site', 'class']
# DATA_DF = self.NEW_DF[Variables]
# return Variables, DATA_DF
# elif 'AUDIT' == self.DATA: # 'AUDIT'
# self.VARIABLES, self.NEW_DF2 = AUDIT_SESSION(self.SESSION)
pass
if save == True:
save_path = f"{self.DATA_DIR}/posthoc/all_{DATA}.csv"
# set the save option
if not os.path.isdir(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
DF3.to_csv(save_path, index=None)
return DF3
def get_INSTRUMENT(self, instrument_file):
""" Load the INSTRUMENT file
Parameters
----------
instrument_file : string
The IMAGEN's instrument file (*.csv)
Returns
-------
DF : pandas.dataframe
The Instrument dataframe
Notes
-----
This function select the ROI:
Demographic profile - LEQ, PBQ, GEN, BMI,
Psychological profile - NEO, SURPS,
Social profile - CTQ, CTS,
Substance use profile - FTND, DAST, SCID, DMQ, BSI, AUDIT, MAST
Examples
--------
>>> from imagen_posthocloader import *
>>> DATA = IMAGEN_INSTRUMENT()
>>> DF = DATA.get_INSTRUMENT(
... instrument_file) # INSTRUMENT
>>> DF_FU3 = DF.groupby('Session').get_group('FU3')
"""
# Load the instrument file
instrument_path = f"{self.DATA_DIR}/posthoc/{instrument_file}"
DF = pd.read_csv(instrument_path, low_memory=False)
return DF
# def __str__(self):
# """ Print the instrument loader steps """
# return "Step 1. load the instrument: " \
# + "\n File = " + str(self.instrument_path) \
# + "\n The dataset contains " + str(self.DF.shape[0]) \
# + " samples and " + str(self.DF.shape[1]) + " columns" \
# + "\n Variables = " + str(self.VARIABLES)
# print(f"{'-'*83} \n{self.__str__()} \n{'-'*83}")
# print(f"{self.NEW_DF.info(), self.NEW_DF.describe()}")
class HDF5_loader:
def __init__(self, DATA_DIR="/ritter/share/data/IMAGEN"):
""" Set up path
Parameters
----------
DATA_DIR : string, optional
Directory IMAGEN absolute path
"""
# Set the directory path: IMAGEN
self.DATA_DIR = DATA_DIR
def set_HDF5(self, DATA, save=False):
""" Save all session y in one file
Parameters
----------
DATA : string,
y name
save : boolean,
save the pandas.dataframe to .csv file
Returns
-------
DF3 : pandas.dataframe
instrument in all session (BL, FU1, FU2, FU3)
Examples
--------
>>> from imagen_posthocloader import *
>>> DATA = HDF5_loader()
>>> DF3 = DATA.set_HDF5(
... DATA, # HDF5
... save = True) # save
>>> DF_FU3 = DF3.groupby('Session').get_group('FU3')
Notes
-----
There are no session in FU2 for imaging file
y = {Binge} # Other y can be added as if
"""
if DATA == "Binge":
# Set the files with session and roi columns
BINGE = [
('FU3','Training','newlbls-clean-fu3-espad-fu3-19a-binge-n650.h5'),
('FU3','Holdout', 'newholdout-clean-fu3-espad-fu3-19a-binge-n102.h5'),
('FU2','Training','newlbls-clean-fu2-espad-fu3-19a-binge-n634.h5'),
('FU2','Holdout', 'newholdout-clean-fu2-espad-fu3-19a-binge-n102.h5'),
('BL', 'Training','newlbls-clean-bl-espad-fu3-19a-binge-n620.h5'),
('BL', 'Holdout', 'newholdout-clean-bl-espad-fu3-19a-binge-n102.h5')
]
ROI = ['ID','Session','y','Dataset','Sex','Site','Class']
# Generate the instrument files in one dataframe
BINGE_LIST = []
for SES, DATASET, HDF5 in BINGE:
path = f"{self.DATA_DIR}/h5files/{HDF5}"
# Convert HDF5 to List
d = h5.File(path,'r')
# Set All, HC, and AAM
b_list = list(np.array(d[list(d.keys())[0]]))
ALL = list(np.array(d['i']))
HC = [ALL[i] for i, j in enumerate(b_list) if j%2==0]
AAM = [ALL[i] for i, j in enumerate(b_list) if j%2==1]
# Set Sex
sex = list(np.array(d['sex']))
SEX = ['Male' if i==0 else 'Female' for i in sex]
# Set Site
sites = list(np.array(d['site']))
center = {0: 'Paris', 1: 'Nottingham', 2:'Mannheim', 3:'London',
4: 'Hamburg', 5: 'Dublin', 6:'Dresden', 7:'Berlin'}
SITE = [center[i] for i in sites]
# Set Class
target = list(np.array(d[list(d.keys())[0]]))
CLASS = ['HC' if i==0 else 'AAM' for i in target]
# Generate the DF
DF2 = pd.DataFrame(
{"ID" : ALL,
"Session" : SES,
"y" : list(d.keys())[0],
"Dataset" : DATASET,
"Sex" : SEX,
"Site" : SITE,
"Class" : CLASS}
)
BINGE_LIST.append(DF2)
DF3 = pd.concat(BINGE_LIST)
if save == True:
save_path = f"{self.DATA_DIR}/posthoc/all_{DATA}.csv"
# set the save option
if not os.path.isdir(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
DF3.to_csv(save_path, index=None)
return DF3
def get_HDF5(self, hdf5_file):
""" Select the ROI y as file
Parameters
----------
h5df_file : string
The IMAGEN's instrument file (*.csv)
Returns
-------
DF : pandas.dataframe
The Instrument dataframe
Notes
-----
There are no session in FU2 for imaging file
y = {Binge} # Other y can be added as if
Examples
--------
>>> from imagen_posthocloader import *
>>> DATA = HDF5_loader()
>>> DF = DATA.get_HDF5(
... hdf5_file) # HDF5
>>> DF_FU3 = DF.groupby('Session').get_group('FU3')
"""
# Load the hdf5 file
hdf5_path = f"{self.DATA_DIR}/posthoc/{hdf5_file}"
DF = pd.read_csv(hdf5_path, low_memory=False)
return DF
def get_train_data(self, H5_DIR, group=False):
""" Load the train data
Parameters
----------
H5_DIR : string
Directory saved File path
group : boolean
If True then generate the gorup_mask
Returns
-------
self.tr_X : numpy.ndarray
Data, hdf5 file
self.tr_X_col_names : numpy.ndarray
X features name list
self.tr_Other : list
at least contain y, ID, numpy.ndarray or other Group mask
Examples
--------
>>> from imagen_posthocloader import *
>>> DATA = HDF5_loader()
>>> tr_X, tr_X_col_names, tr_Other = DATA.get_train_data(
... 'H5_DIR') # DATA
"""
data = h5.File(self.DATA_DIR+"/h5files/"+H5_DIR, 'r')
print(data.keys(), data.attrs.keys())
X = data['X'][()]
X_col = data.attrs['X_col_names']
X_col_names = np.array([i.replace(")","") for i in X_col])
self.tr_X = X
self.tr_X_col_names = X_col_names
y = data[data.attrs['labels'][0]][()]
ID = data['i'][()]
if group == True:
sex_mask = data['sex'].astype(bool)[()]
class_mask = data['Binge'][()].astype(bool)
self.tr_Other = [y, ID, sex_mask, class_mask]
else:
self.tr_Other = [y, ID]
X.shape, len(X_col_names)
return self.tr_X, self.tr_X_col_names, self.tr_Other
def get_holdout_data(self, H5_DIR, group=False):
""" Load the holdout data
Parameters
----------
H5_DIR : string
Directory saved File path
group : boolean
If True then generate the gorup_mask
Returns
-------
self.ho_X : numpy.ndarray
Data, hdf5 file
self.ho_X_col_names : numpy.ndarray
X features name list
self.ho_Other : list
at least contain y, ID, numpy.ndarray or other Group mask
Examples
--------
>>> from imagen_posthocloader import *
>>> DATA = HDF5_loader()
>>> ho_X, ho_X_col_names, ho_Other = DATA.get_train_data(
... 'H5_DIR') # DATA
"""
data = h5.File(self.DATA_DIR+"/h5files/"+H5_DIR, 'r')
# print(data.keys(), data.attrs.keys())
X = data['X'][()]
X_col = data.attrs['X_col_names']
X_col_names = np.array([i.replace(")","") for i in X_col])
self.ho_X = X
self.ho_X_col_names = X_col_names
y = data[data.attrs['labels'][0]][()]
ID | |
[0-9]+ hold"
)
error_mapping = {
"The patron does not have the book on hold" : NotOnHold,
"The patron has no eBooks checked out" : NotCheckedOut,
}
def process_all(self, string):
try:
for i in super(ErrorParser, self).process_all(
string, "//Error"):
return i
except Exception as e:
# The server sent us an error with an incorrect or
# nonstandard syntax.
return RemoteInitiatedServerError(
string, BibliothecaAPI.SERVICE_NAME
)
# We were not able to interpret the result as an error.
# The most likely cause is that the Bibliotheca app server is down.
return RemoteInitiatedServerError(
"Unknown error", BibliothecaAPI.SERVICE_NAME,
)
def process_one(self, error_tag, namespaces):
message = self.text_of_optional_subtag(error_tag, "Message")
if not message:
return RemoteInitiatedServerError(
"Unknown error", BibliothecaAPI.SERVICE_NAME,
)
if message in self.error_mapping:
return self.error_mapping[message](message)
if message in ('Authentication failed', 'Unknown error'):
# 'Unknown error' is an unknown error on the Bibliotheca side.
#
# 'Authentication failed' could _in theory_ be an error on
# our side, but if authentication is set up improperly we
# actually get a 401 and no body. When we get a real error
# document with 'Authentication failed', it's always a
# transient error on the Bibliotheca side. Possibly some
# authentication internal to Bibliotheca has failed? Anyway, it
# happens relatively frequently.
return RemoteInitiatedServerError(
message, BibliothecaAPI.SERVICE_NAME
)
m = self.loan_limit_reached.search(message)
if m:
return PatronLoanLimitReached(message)
m = self.hold_limit_reached.search(message)
if m:
return PatronHoldLimitReached(message)
m = self.wrong_status.search(message)
if not m:
return BibliothecaException(message)
actual, expected = m.groups()
expected = expected.split(",")
if actual == 'CAN_WISH':
return NoLicenses(message)
if 'CAN_LOAN' in expected and actual == 'CAN_HOLD':
return NoAvailableCopies(message)
if 'CAN_LOAN' in expected and actual == 'HOLD':
return AlreadyOnHold(message)
if 'CAN_LOAN' in expected and actual == 'LOAN':
return AlreadyCheckedOut(message)
if 'CAN_HOLD' in expected and actual == 'CAN_LOAN':
return CurrentlyAvailable(message)
if 'CAN_HOLD' in expected and actual == 'HOLD':
return AlreadyOnHold(message)
if 'CAN_HOLD' in expected:
return CannotHold(message)
if 'CAN_LOAN' in expected:
return CannotLoan(message)
return BibliothecaException(message)
class PatronCirculationParser(BibliothecaParser):
"""Parse Bibliotheca's patron circulation status document into a list of
LoanInfo and HoldInfo objects.
"""
id_type = Identifier.BIBLIOTHECA_ID
def __init__(self, collection, *args, **kwargs):
super(PatronCirculationParser, self).__init__(*args, **kwargs)
self.collection = collection
def process_all(self, string):
parser = etree.XMLParser()
# If the data is an HTTP response, it is a bytestring and
# must be converted before it is parsed.
if isinstance(string, bytes):
string = string.decode("utf-8")
root = etree.parse(StringIO(string), parser)
sup = super(PatronCirculationParser, self)
loans = sup.process_all(
root, "//Checkouts/Item", handler=self.process_one_loan)
holds = sup.process_all(
root, "//Holds/Item", handler=self.process_one_hold)
reserves = sup.process_all(
root, "//Reserves/Item", handler=self.process_one_reserve)
everything = itertools.chain(loans, holds, reserves)
return [x for x in everything if x]
def process_one_loan(self, tag, namespaces):
return self.process_one(tag, namespaces, LoanInfo)
def process_one_hold(self, tag, namespaces):
return self.process_one(tag, namespaces, HoldInfo)
def process_one_reserve(self, tag, namespaces):
hold_info = self.process_one(tag, namespaces, HoldInfo)
hold_info.hold_position = 0
return hold_info
def process_one(self, tag, namespaces, source_class):
if not tag.xpath("ItemId"):
# This happens for events associated with books
# no longer in our collection.
return None
def datevalue(key):
value = self.text_of_subtag(tag, key)
return strptime_utc(
value, BibliothecaAPI.ARGUMENT_TIME_FORMAT
)
identifier = self.text_of_subtag(tag, "ItemId")
start_date = datevalue("EventStartDateInUTC")
end_date = datevalue("EventEndDateInUTC")
a = [self.collection, DataSource.BIBLIOTHECA, self.id_type, identifier,
start_date, end_date]
if source_class is HoldInfo:
hold_position = self.int_of_subtag(tag, "Position")
a.append(hold_position)
else:
# Fulfillment info -- not available from this API
a.append(None)
return source_class(*a)
class DateResponseParser(BibliothecaParser):
"""Extract a date from a response."""
RESULT_TAG_NAME = None
DATE_TAG_NAME = None
def process_all(self, string):
parser = etree.XMLParser()
# If the data is an HTTP response, it is a bytestring and
# must be converted before it is parsed.
if isinstance(string, bytes):
string = string.decode("utf-8")
root = etree.parse(StringIO(string), parser)
m = root.xpath("/%s/%s" % (self.RESULT_TAG_NAME, self.DATE_TAG_NAME))
if not m:
return None
due_date = m[0].text
if not due_date:
return None
return strptime_utc(due_date, EventParser.INPUT_TIME_FORMAT)
class CheckoutResponseParser(DateResponseParser):
"""Extract due date from a checkout response."""
RESULT_TAG_NAME = "CheckoutResult"
DATE_TAG_NAME = "DueDateInUTC"
class HoldResponseParser(DateResponseParser):
"""Extract availability date from a hold response."""
RESULT_TAG_NAME = "PlaceHoldResult"
DATE_TAG_NAME = "AvailabilityDateInUTC"
class EventParser(BibliothecaParser):
"""Parse Bibliotheca's event file format into our native event objects."""
EVENT_SOURCE = "Bibliotheca"
SET_DELIVERY_MECHANISM_AT = BaseCirculationAPI.BORROW_STEP
# Map Bibliotheca's event names to our names.
EVENT_NAMES = {
"CHECKOUT" : CirculationEvent.DISTRIBUTOR_CHECKOUT,
"CHECKIN" : CirculationEvent.DISTRIBUTOR_CHECKIN,
"HOLD" : CirculationEvent.DISTRIBUTOR_HOLD_PLACE,
"RESERVED" : CirculationEvent.DISTRIBUTOR_AVAILABILITY_NOTIFY,
"PURCHASE" : CirculationEvent.DISTRIBUTOR_LICENSE_ADD,
"REMOVED" : CirculationEvent.DISTRIBUTOR_LICENSE_REMOVE,
}
def process_all(self, string, no_events_error=False):
has_events = False
for i in super(EventParser, self).process_all(
string, "//CloudLibraryEvent"):
yield i
has_events = True
# If we are catching up on events and we expect to have a time
# period where there are no events, we don't want to consider that
# action as an error. By default, not having events is not
# considered to be an error.
if not has_events and no_events_error:
# An empty list of events may mean nothing happened, or it
# may indicate an unreported server-side error. To be
# safe, we'll treat this as a server-initiated error
# condition. If this is just a slow day, normal behavior
# will resume as soon as something happens.
raise RemoteInitiatedServerError(
"No events returned from server. This may not be an error, but treating it as one to be safe.",
BibliothecaAPI.SERVICE_NAME
)
def process_one(self, tag, namespaces):
isbn = self.text_of_subtag(tag, "ISBN")
bibliotheca_id = self.text_of_subtag(tag, "ItemId")
patron_id = self.text_of_optional_subtag(tag, "PatronId")
start_time = self.date_from_subtag(tag, "EventStartDateTimeInUTC")
end_time = self.date_from_subtag(
tag, "EventEndDateTimeInUTC", required=False
)
bibliotheca_event_type = self.text_of_subtag(tag, "EventType")
internal_event_type = self.EVENT_NAMES[bibliotheca_event_type]
return (bibliotheca_id, isbn, patron_id, start_time, end_time,
internal_event_type)
class BibliothecaCirculationSweep(IdentifierSweepMonitor):
"""Check on the current circulation status of each Bibliotheca book in our
collection.
In some cases this will lead to duplicate events being logged,
because this monitor and the main Bibliotheca circulation monitor will
count the same event. However it will greatly improve our current
view of our Bibliotheca circulation, which is more important.
If Bibliotheca has updated its metadata for a book, that update will
also take effect during the circulation sweep.
If a Bibliotheca license has expired, and we didn't hear about it for
whatever reason, we'll find out about it here, because Bibliotheca
will act like they never heard of it.
"""
SERVICE_NAME = "Bibliotheca Circulation Sweep"
DEFAULT_BATCH_SIZE = 25
PROTOCOL = ExternalIntegration.BIBLIOTHECA
def __init__(self, _db, collection, api_class=BibliothecaAPI, **kwargs):
_db = Session.object_session(collection)
super(BibliothecaCirculationSweep, self).__init__(
_db, collection, **kwargs
)
if isinstance(api_class, BibliothecaAPI):
self.api = api_class
else:
self.api = api_class(_db, collection)
self.replacement_policy = BibliothecaAPI.replacement_policy(_db)
self.analytics = self.replacement_policy.analytics
def process_items(self, identifiers):
identifiers_by_bibliotheca_id = dict()
bibliotheca_ids = set()
for identifier in identifiers:
bibliotheca_ids.add(identifier.identifier)
identifiers_by_bibliotheca_id[identifier.identifier] = identifier
identifiers_not_mentioned_by_bibliotheca = set(identifiers)
now = utc_now()
for metadata in self.api.bibliographic_lookup(bibliotheca_ids):
self._process_metadata(
metadata, identifiers_by_bibliotheca_id,
identifiers_not_mentioned_by_bibliotheca,
)
# At this point there may be some license pools left over
# that Bibliotheca doesn't know about. This is a pretty reliable
# indication that we no longer own any licenses to the
# book.
for identifier in identifiers_not_mentioned_by_bibliotheca:
pools = [lp for lp in identifier.licensed_through
if lp.data_source.name==DataSource.BIBLIOTHECA
and lp.collection == self.collection]
if pools:
[pool] = pools
else:
continue
if pool.licenses_owned > 0:
self.log.warn(
"Removing %s from circulation.",
identifier.identifier
)
pool.update_availability(0, 0, 0, 0, self.analytics, as_of=now)
def _process_metadata(
self, metadata, identifiers_by_bibliotheca_id,
identifiers_not_mentioned_by_bibliotheca
):
"""Process a single Metadata object (containing CirculationData)
retrieved from Bibliotheca.
"""
bibliotheca_id = metadata.primary_identifier.identifier
identifier = identifiers_by_bibliotheca_id[bibliotheca_id]
if identifier in identifiers_not_mentioned_by_bibliotheca:
# Bibliotheca mentioned this identifier. Remove it from
# this list so we know the title is still in the collection.
identifiers_not_mentioned_by_bibliotheca.remove(identifier)
edition, is_new = metadata.edition(self._db)
pool, is_new = metadata.circulation.license_pool(self._db, self.collection)
if is_new:
# We didn't have a license pool for this work. That
# shouldn't happen--how did we know about the
# identifier?--but now we do.
for library in self.collection.libraries:
self.analytics.collect_event(
library, pool, CirculationEvent.DISTRIBUTOR_TITLE_ADD,
utc_now()
)
edition, ignore = metadata.apply(edition, collection=self.collection,
replace=self.replacement_policy)
class BibliothecaTimelineMonitor(CollectionMonitor, TimelineMonitor):
"""Common superclass for our two TimelineMonitors."""
PROTOCOL = ExternalIntegration.BIBLIOTHECA
LOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def __init__(self, _db, collection, api_class=BibliothecaAPI,
analytics=None):
"""Initializer.
:param _db: Database session object.
:param collection: Collection for which this monitor operates.
:param api_class: API class or an instance thereof for this monitor.
:type api_class: Union[Type[BibliothecaAPI], BibliothecaAPI]
:param analytics: An optional Analytics object.
:type analytics: Optional[Analytics]
"""
self.analytics = analytics or Analytics(_db)
super(BibliothecaTimelineMonitor, self).__init__(_db, collection)
if isinstance(api_class, BibliothecaAPI):
# We were given an actual API object. Just | |
<gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import time
import hashlib
import glob
import os
import progressbar
import cv2
from auto_pose.renderer import renderer
from .pysixd_stuff import transform
from .pysixd_stuff import view_sampler
from .utils import lazy_property
class Dataset(object):
def __init__(self, dataset_path, **kw):
self.shape = (int(kw['h']), int(kw['w']), int(kw['c']))
self.noof_training_imgs = int(kw['noof_training_imgs'])
self.dataset_path = dataset_path
self.bg_img_paths = glob.glob(kw['background_images_glob'])
self.noof_bg_imgs = min(int(kw['noof_bg_imgs']), len(self.bg_img_paths))
self._kw = kw
# self._aug = eval(self._kw['code'])
self.train_x = np.empty( (self.noof_training_imgs,) + self.shape, dtype=np.uint8 )
self.mask_x = np.empty( (self.noof_training_imgs,) + self.shape[:2], dtype= bool)
self.noof_obj_pixels = np.empty( (self.noof_training_imgs,), dtype= bool)
self.train_y = np.empty( (self.noof_training_imgs,) + self.shape, dtype=np.uint8 )
self.bg_imgs = np.empty( (self.noof_bg_imgs,) + self.shape, dtype=np.uint8 )
if np.float(eval(self._kw['realistic_occlusion'])):
self.random_syn_masks
# initialize renderer
render_dims = eval(self._kw['render_dims'])
K = eval(self._kw['k'])
K = np.array(K).reshape(3,3)
clip_near = float(kw['clip_near']) / 1000.0
clip_far = float(kw['clip_far']) / 1000.0
self.renderer = renderer.Renderer(kw['model_path'], render_dims[0], render_dims[1], K[0, 0], K[1, 1], K[0, 2], K[1, 2], clip_near, clip_far)
@lazy_property
def viewsphere_for_embedding(self):
kw = self._kw
num_cyclo = int(kw['num_cyclo'])
azimuth_range = (0, 2 * np.pi)
elev_range = (-0.5 * np.pi, 0.5 * np.pi)
views, _ = view_sampler.sample_views(
int(kw['min_n_views']),
float(kw['radius']),
azimuth_range,
elev_range
)
Rs = np.empty( (len(views)*num_cyclo, 3, 3) )
i = 0
for view in views:
for cyclo in np.linspace(0, 2.*np.pi, num_cyclo):
rot_z = np.array([[np.cos(-cyclo), -np.sin(-cyclo), 0], [np.sin(-cyclo), np.cos(-cyclo), 0], [0, 0, 1]])
Rs[i,:,:] = rot_z.dot(view['R'])
i += 1
return Rs
def get_training_images(self, dataset_path, args):
current_config_hash = hashlib.md5((str(args.items('Dataset')+args.items('Paths'))).encode('utf-8')).hexdigest()
current_file_name = os.path.join(dataset_path, current_config_hash + '.npz')
if os.path.exists(current_file_name):
training_data = np.load(current_file_name)
self.train_x = training_data['train_x'].astype(np.uint8)
self.mask_x = training_data['mask_x']
self.train_y = training_data['train_y'].astype(np.uint8)
else:
self.render_training_images()
np.savez(current_file_name, train_x = self.train_x, mask_x = self.mask_x, train_y = self.train_y)
self.noof_obj_pixels = np.count_nonzero(self.mask_x==0,axis=(1,2))
print('loaded %s training images' % len(self.train_x))
def get_sprite_training_images(self, train_args):
dataset_path= train_args.get('Paths','MODEL_PATH')
dataset_zip = np.load(dataset_path)
# print('Keys in the dataset:', dataset_zip.keys())
imgs = dataset_zip['imgs']
latents_values = dataset_zip['latents_values']
latents_classes = dataset_zip['latents_classes']
metadata = dataset_zip['metadata'][()]
latents_sizes = metadata['latents_sizes']
latents_bases = np.concatenate((latents_sizes[::-1].cumprod()[::-1][1:],
np.array([1,])))
latents_classes_heart = latents_classes[:245760]
latents_classes_heart_rot = latents_classes_heart.copy()
latents_classes_heart_rot[:, 0] = 0
latents_classes_heart_rot[:, 1] = 0
latents_classes_heart_rot[:, 2] = 5
latents_classes_heart_rot[:, 4] = 16
latents_classes_heart_rot[:, 5] = 16
def latent_to_index(latents):
return np.dot(latents, latents_bases).astype(int)
indices_sampled = latent_to_index(latents_classes_heart_rot)
imgs_sampled_rot = imgs[indices_sampled]
indices_sampled = latent_to_index(latents_classes_heart)
imgs_sampled_all = imgs[indices_sampled]
self.train_x = np.expand_dims(imgs_sampled_all, 3)*255
self.train_y = np.expand_dims(imgs_sampled_rot, 3)*255
# def get_embedding_images(self, dataset_path, args):
# current_config_hash = hashlib.md5(str(args.items('Embedding') + args.items('Dataset')+args.items('Paths'))).hexdigest()
# current_file_name = os.path.join(dataset_path, current_config_hash + '.npz')
# if os.path.exists(current_file_name):
# embedding_data = np.load(current_file_name)
# self.embedding_data = embedding_data.astype(np.uint8)
# else:
# self.render_embedding_images()
# np.savez(current_file_name, train_x = self.train_x, mask_x = self.mask_x, train_y = self.train_y)
# print 'loaded %s training images' % len(self.train_x)
def load_bg_images(self, dataset_path):
current_config_hash = hashlib.md5((str(self.shape) + str(self.noof_bg_imgs) + str(self._kw['background_images_glob'])).encode('utf-8')).hexdigest()
current_file_name = os.path.join(dataset_path, current_config_hash +'.npy')
if os.path.exists(current_file_name):
self.bg_imgs = np.load(current_file_name)
else:
file_list = self.bg_img_paths[:self.noof_bg_imgs]
from random import shuffle
shuffle(file_list)
for j,fname in enumerate(file_list):
print('loading bg img %s/%s' % (j,self.noof_bg_imgs))
bgr = cv2.imread(fname)
H,W = bgr.shape[:2]
y_anchor = int(np.random.rand() * (H-self.shape[0]))
x_anchor = int(np.random.rand() * (W-self.shape[1]))
# bgr = cv2.resize(bgr, self.shape[:2])
bgr = bgr[y_anchor:y_anchor+self.shape[0],x_anchor:x_anchor+self.shape[1],:]
if bgr.shape[0]!=self.shape[0] or bgr.shape[1]!=self.shape[1]:
continue
if self.shape[2] == 1:
bgr = cv2.cvtColor(np.uint8(bgr), cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
self.bg_imgs[j] = bgr
np.save(current_file_name,self.bg_imgs)
print('loaded %s bg images' % self.noof_bg_imgs)
def render_rot(self, R, t=None ,downSample = 1):
kw = self._kw
h, w = self.shape[:2]
radius = float(kw['radius'])
render_dims = eval(kw['render_dims'])
K = eval(kw['k'])
K = np.array(K).reshape(3,3)
K[:2,:] = K[:2,:] / downSample
clip_near = float(kw['clip_near'])
clip_far = float(kw['clip_far'])
pad_factor = float(kw['pad_factor'])
t = np.array([0, 0, float(kw['radius'])])
bgr_y, depth_y = self.renderer.render(
obj_id=0,
W=render_dims[0]/downSample,
H=render_dims[1]/downSample,
K=K.copy(),
R=R,
t=t,
near=clip_near,
far=clip_far,
random_light=False
)
ys, xs = np.nonzero(depth_y > 0)
obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
x, y, w, h = np.array(obj_bb).astype(np.int32)
size = int(np.maximum(h, w) * pad_factor)
left = int(np.maximum(x+w/2-size/2, 0))
right = int(np.minimum(x+w/2+size/2, bgr_y.shape[1]))
top = int(np.maximum(y+h/2-size/2, 0))
bottom = int(np.minimum(y+h/2+size/2, bgr_y.shape[0]))
bgr_y = bgr_y[top:bottom, left:right]
return cv2.resize(bgr_y, self.shape[:2])
def render_training_images(self):
kw = self._kw
H, W = int(kw['h']), int(kw['w'])
render_dims = eval(kw['render_dims'])
K = eval(kw['k'])
K = np.array(K).reshape(3,3)
clip_near = float(kw['clip_near'])
clip_far = float(kw['clip_far'])
pad_factor = float(kw['pad_factor'])
max_rel_offset = float(kw['max_rel_offset'])
t = np.array([0, 0, float(kw['radius'])])
widgets = ['Render training images: ', progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.Counter(), ' / %s' % self.noof_training_imgs,
' ', progressbar.ETA(), ' ']
bar = progressbar.ProgressBar(maxval=self.noof_training_imgs,widgets=widgets)
bar.start()
for i in np.arange(self.noof_training_imgs):
bar.update(i)
print("Image " + str(i) + " / " + str(self.noof_training_imgs))
# start_time = time.time()
z = - float(kw['radius']) / 1000.0
R = transform.random_rotation_matrix()[:3,:3]
bgr_x, depth_x = self.renderer.render(R, z, random_light = True)
bgr_y, depth_y = self.renderer.render(R, z, random_light = False)
ys, xs = np.nonzero(depth_x > 0)
try:
obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
except ValueError as e:
print('Object in Rendering not visible.')
break
x, y, w, h = obj_bb
rand_trans_x = np.random.uniform(-max_rel_offset, max_rel_offset) * w
rand_trans_y = np.random.uniform(-max_rel_offset, max_rel_offset) * h
obj_bb_off = obj_bb + np.array([rand_trans_x,rand_trans_y,0,0])
bgr_x = self.extract_square_patch(bgr_x, obj_bb_off, pad_factor,resize=(W,H),interpolation = cv2.INTER_NEAREST)
depth_x = self.extract_square_patch(depth_x, obj_bb_off, pad_factor,resize=(W,H),interpolation = cv2.INTER_NEAREST)
mask_x = depth_x == 0.
ys, xs = np.nonzero(depth_y > 0)
obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
bgr_y = self.extract_square_patch(bgr_y, obj_bb, pad_factor,resize=(W,H),interpolation = cv2.INTER_NEAREST)
if self.shape[2] == 1:
bgr_x = cv2.cvtColor(np.uint8(bgr_x), cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
bgr_y = cv2.cvtColor(np.uint8(bgr_y), cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
self.train_x[i] = bgr_x.astype(np.uint8)
self.mask_x[i] = mask_x
self.train_y[i] = bgr_y.astype(np.uint8)
#print 'rendertime ', render_time, 'processing ', time.time() - start_time
bar.finish()
def render_embedding_image_batch(self, start, end):
kw = self._kw
h, w = self.shape[:2]
azimuth_range = (0, 2 * np.pi)
elev_range = (-0.5 * np.pi, 0.5 * np.pi)
radius = float(kw['radius'])
render_dims = eval(kw['render_dims'])
K = eval(kw['k'])
K = np.array(K).reshape(3,3)
clip_near = float(kw['clip_near'])
clip_far = float(kw['clip_far'])
pad_factor = float(kw['pad_factor'])
t = np.array([0, 0, float(kw['radius'])])
batch = np.empty( (end-start,)+ self.shape)
obj_bbs = np.empty( (end-start,)+ (4,))
for i, R in enumerate(self.viewsphere_for_embedding[start:end]):
bgr_y, depth_y = self.renderer.render(
obj_id=0,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=R,
t=t,
near=clip_near,
far=clip_far,
random_light=False
)
# cv2.imshow('depth',depth_y)
# cv2.imshow('bgr',bgr_y)
# print depth_y.max()
# cv2.waitKey(0)
ys, xs = np.nonzero(depth_y > 0)
obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
obj_bbs[i] = obj_bb
resized_bgr_y = self.extract_square_patch(bgr_y, obj_bb, pad_factor,resize=self.shape[:2],interpolation = cv2.INTER_NEAREST)
if self.shape[2] == 1:
resized_bgr_y = cv2.cvtColor(resized_bgr_y, cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
batch[i] = resized_bgr_y / 255.
return (batch, obj_bbs)
def extract_square_patch(self, scene_img, bb_xywh, pad_factor,resize=(128,128),interpolation=cv2.INTER_NEAREST,black_borders=False):
x, y, w, h = np.array(bb_xywh).astype(np.int32)
size = int(np.maximum(h, w) * pad_factor)
left = int(np.maximum(x+w/2-size/2, 0))
right = int(np.minimum(x+w/2+size/2, scene_img.shape[1]))
top = int(np.maximum(y+h/2-size/2, 0))
bottom = int(np.minimum(y+h/2+size/2, scene_img.shape[0]))
scene_crop = scene_img[top:bottom, left:right].copy()
if black_borders:
scene_crop[:(y-top),:] = 0
scene_crop[(y+h-top):,:] = 0
scene_crop[:,:(x-left)] = 0
scene_crop[:,(x+w-left):] = 0
scene_crop = cv2.resize(scene_crop, resize, interpolation = interpolation)
return scene_crop
@property
def embedding_size(self):
return len(self.viewsphere_for_embedding)
@lazy_property
def _aug(self):
from imgaug.augmenters import Sequential,SomeOf,OneOf,Sometimes,WithColorspace,WithChannels, \
Noop,Lambda,AssertLambda,AssertShape,Scale,CropAndPad, \
Pad,Crop,Fliplr,Flipud,Superpixels,ChangeColorspace, PerspectiveTransform, \
Grayscale,GaussianBlur,AverageBlur,MedianBlur,Convolve, \
Sharpen,Emboss,EdgeDetect,DirectedEdgeDetect,Add,AddElementwise, \
AdditiveGaussianNoise,Multiply,MultiplyElementwise,Dropout, \
CoarseDropout,Invert,ContrastNormalization,Affine,PiecewiseAffine, \
ElasticTransformation
return eval(self._kw['code'])
@lazy_property
def _aug_occl(self):
from imgaug.augmenters import Sequential,SomeOf,OneOf,Sometimes,WithColorspace,WithChannels, \
Noop,Lambda,AssertLambda,AssertShape,Scale,CropAndPad, \
Pad,Crop,Fliplr,Flipud,Superpixels,ChangeColorspace, PerspectiveTransform, \
Grayscale,GaussianBlur,AverageBlur,MedianBlur,Convolve, \
Sharpen,Emboss,EdgeDetect,DirectedEdgeDetect,Add,AddElementwise, \
AdditiveGaussianNoise,Multiply,MultiplyElementwise,Dropout, \
CoarseDropout,Invert,ContrastNormalization,Affine,PiecewiseAffine, \
ElasticTransformation
return Sequential([Sometimes(0.7, CoarseDropout( p=0.4, size_percent=0.01) )])
@lazy_property
def random_syn_masks(self):
import bitarray
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
random_syn_masks = bitarray.bitarray()
with open(os.path.join(workspace_path,'random_tless_masks/arbitrary_syn_masks_1000.bin'), 'r') as fh:
random_syn_masks.fromfile(fh)
occlusion_masks = np.fromstring(random_syn_masks.unpack(), dtype=np.bool)
occlusion_masks = occlusion_masks.reshape(-1,224,224,1).astype(np.float32)
print(occlusion_masks.shape)
occlusion_masks = np.array([cv2.resize(mask,(self.shape[0],self.shape[1]), interpolation = cv2.INTER_NEAREST) for mask in occlusion_masks])
return occlusion_masks
def augment_occlusion_mask(self, masks, verbose=False, min_trans = 0.2, max_trans=0.7, max_occl = 0.25,min_occl = 0.0):
new_masks = np.zeros_like(masks,dtype=np.bool)
occl_masks_batch = self.random_syn_masks[np.random.choice(len(self.random_syn_masks),len(masks))]
for idx,mask in enumerate(masks):
occl_mask = occl_masks_batch[idx]
while True:
trans_x = int(np.random.choice([-1,1])*(np.random.rand()*(max_trans-min_trans) + min_trans)*occl_mask.shape[0])
trans_y = int(np.random.choice([-1,1])*(np.random.rand()*(max_trans-min_trans) + min_trans)*occl_mask.shape[1])
M = np.float32([[1,0,trans_x],[0,1,trans_y]])
transl_occl_mask = cv2.warpAffine(occl_mask,M,(occl_mask.shape[0],occl_mask.shape[1]))
overlap_matrix = np.invert(mask.astype(np.bool)) * transl_occl_mask.astype(np.bool)
overlap = len(overlap_matrix[overlap_matrix==True])/float(len(mask[mask==0]))
if overlap < max_occl and overlap > min_occl:
new_masks[idx,...] = np.logical_xor(mask.astype(np.bool), overlap_matrix)
if verbose:
print('overlap is ', overlap)
break
return new_masks
def augment_squares(self,masks,rand_idcs,max_occl=0.25):
new_masks = np.invert(masks)
idcs = np.arange(len(masks))
while len(idcs) > 0:
new_masks[idcs] = self._aug_occl.augment_images(np.invert(masks[idcs]))
new_noof_obj_pixels = np.count_nonzero(new_masks,axis=(1,2))
idcs = np.where(new_noof_obj_pixels/self.noof_obj_pixels[rand_idcs].astype(np.float32) < 1-max_occl)[0]
print(idcs)
return np.invert(new_masks)
def batch(self, batch_size):
# batch_x = np.empty( (batch_size,) + self.shape, dtype=np.uint8 )
# batch_y = np.empty( (batch_size,) + self.shape, dtype=np.uint8 )
rand_idcs = np.random.choice(self.noof_training_imgs, batch_size, replace=False)
assert self.noof_bg_imgs > 0
rand_idcs_bg = np.random.choice(self.noof_bg_imgs, batch_size, replace=False)
batch_x, masks, batch_y = self.train_x[rand_idcs], self.mask_x[rand_idcs], self.train_y[rand_idcs]
rand_vocs = self.bg_imgs[rand_idcs_bg]
if eval(self._kw['realistic_occlusion']):
masks = self.augment_occlusion_mask(masks.copy(),max_occl=np.float(self._kw['realistic_occlusion']))
if eval(self._kw['square_occlusion']):
masks = self.augment_squares(masks.copy(),rand_idcs,max_occl=np.float(self._kw['square_occlusion']))
batch_x[masks] = rand_vocs[masks]
# random in-plane rotation, not necessary
# for i in range(batch_size):
# rot_angle= np.random.rand()*360
# cent = int(self.shape[0]/2)
# M = cv2.getRotationMatrix2D((cent,cent),rot_angle,1)
# batch_x[i] = cv2.warpAffine(batch_x[i],M,self.shape[:2])[:,:,np.newaxis]
# batch_y[i] = cv2.warpAffine(batch_y[i],M,self.shape[:2])[:,:,np.newaxis]
#needs uint8
batch_x = self._aug.augment_images(batch_x)
#slow...
batch_x = batch_x / 255.
batch_y = | |
True
def hangup(self):
"""Close the connection.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
if self._server:
self._server._log('\t%d\thangup' % self.client_port)
self._client.shutdown(socket.SHUT_RDWR)
return True
hangs_up = hangup
"""Synonym for `.hangup`."""
def _matches_docs(self, docs, other_docs):
"""Overridable method."""
for doc, other_doc in zip(docs, other_docs):
if not self._match_map(doc, other_doc):
return False
return True
def _match_map(self, doc, other_doc):
for key, val in doc.items():
if val is absent:
if key in other_doc:
return False
elif not self._match_val(val, other_doc.get(key, None)):
return False
if isinstance(doc, (OrderedDict, bson.SON)):
if not isinstance(other_doc, (OrderedDict, bson.SON)):
raise TypeError(
"Can't compare ordered and unordered document types:"
" %r, %r" % (doc, other_doc))
keys = [key for key, val in doc.items()
if val is not absent]
if not seq_match(keys, list(other_doc.keys())):
return False
return True
def _match_list(self, lst, other_lst):
if len(lst) != len(other_lst):
return False
for val, other_val in zip(lst, other_lst):
if not self._match_val(val, other_val):
return False
return True
def _match_val(self, val, other_val):
if _ismap(val) and _ismap(other_val):
if not self._match_map(val, other_val):
return False
elif _islist(val) and _islist(other_val):
if not self._match_list(val, other_val):
return False
elif (isinstance(val, datetime.datetime)
and isinstance(other_val, datetime.datetime)):
if _dt_rounded(val) != _dt_rounded(other_val):
return False
elif val != other_val:
return False
return True
def _replies(self, *args, **kwargs):
"""Overridable method."""
reply_msg = make_reply(*args, **kwargs)
if self._server:
self._server._log('\t%d\t<-- %r' % (self.client_port, reply_msg))
reply_bytes = reply_msg.reply_bytes(self)
self._client.sendall(reply_bytes)
def __contains__(self, item):
if item in self.docs:
return True
if len(self.docs) == 1 and isinstance(item, (string_type, text_type)):
return item in self.doc
return False
def __getitem__(self, item):
return self.doc[item] if len(self.docs) == 1 else self.docs[item]
def __str__(self):
return docs_repr(*self.docs)
def __repr__(self):
name = self.__class__.__name__
parts = []
if self.docs:
parts.append(docs_repr(*self.docs))
if self._flags:
if self._flags_map:
parts.append('flags=%s' % (
'|'.join(name for name, value in self._flags_map.items()
if self._flags & value)))
else:
parts.append('flags=%d' % self._flags)
if self._namespace:
parts.append('namespace="%s"' % self._namespace)
return '%s(%s)' % (name, ', '.join(str(part) for part in parts))
class CommandBase(Request):
"""A command the client executes on the server."""
is_command = True
# Check command name case-insensitively.
_non_matched_attrs = Request._non_matched_attrs + ('command_name',)
@property
def command_name(self):
"""The command name or None.
>>> OpMsg({'count': 'collection'}).command_name
'count'
>>> OpMsg('aggregate', 'collection', cursor=absent).command_name
'aggregate'
"""
if self.docs and self.docs[0]:
return list(self.docs[0])[0]
def _matches_docs(self, docs, other_docs):
assert len(docs) == len(other_docs) == 1
doc, = docs
other_doc, = other_docs
items = list(doc.items())
other_items = list(other_doc.items())
# Compare command name case-insensitively.
if items and other_items:
if items[0][0].lower() != other_items[0][0].lower():
return False
if items[0][1] != other_items[0][1]:
return False
return super(CommandBase, self)._matches_docs(
[OrderedDict(items[1:])],
[OrderedDict(other_items[1:])])
class OpMsg(CommandBase):
"""An OP_MSG request the client executes on the server."""
opcode = OP_MSG
is_command = True
_flags_map = OP_MSG_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpMsg`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
payload_document = OrderedDict()
flags, = _UNPACK_UINT(msg[:4])
pos = 4
if flags != 0 and flags != 2:
raise ValueError('OP_MSG flag must be 0 or 2 not %r' % (flags,))
while pos < len(msg):
payload_type, = _UNPACK_BYTE(msg[pos:pos + 1])
pos += 1
payload_size, = _UNPACK_INT(msg[pos:pos + 4])
if payload_type == 0:
doc = bson.decode_all(msg[pos:pos + payload_size],
CODEC_OPTIONS)[0]
payload_document.update(doc)
pos += payload_size
elif payload_type == 1:
section_size, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
identifier, pos = _get_c_string(msg, pos)
# Section starts w/ 4-byte size prefix, identifier ends w/ nil.
documents_len = section_size - len(identifier) - 1 - 4
documents = bson.decode_all(msg[pos:pos + documents_len],
CODEC_OPTIONS)
payload_document[identifier] = documents
pos += documents_len
database = payload_document['$db']
return OpMsg(payload_document, namespace=database, flags=flags,
_client=client, request_id=request_id,
_server=server)
def __init__(self, *args, **kwargs):
super(OpMsg, self).__init__(*args, **kwargs)
if len(self._docs) > 1:
raise_args_err('OpMsg too many documents', ValueError)
@property
def slave_ok(self):
"""True if this OpMsg can read from a secondary."""
read_preference = self.doc.get('$readPreference')
return read_preference and read_preference.get('mode') != 'primary'
slave_okay = slave_ok
"""Synonym for `.slave_ok`."""
@property
def command_name(self):
"""The command name or None.
>>> OpMsg({'count': 'collection'}).command_name
'count'
>>> OpMsg('aggregate', 'collection', cursor=absent).command_name
'aggregate'
"""
if self.docs and self.docs[0]:
return list(self.docs[0])[0]
def _replies(self, *args, **kwargs):
if self.flags & OP_MSG_FLAGS['moreToCome']:
assert False, "Cannot reply to OpMsg with moreToCome: %r" % (self,)
reply = make_op_msg_reply(*args, **kwargs)
if not reply.docs:
reply.docs = [{'ok': 1}]
else:
if len(reply.docs) > 1:
raise ValueError('OP_MSG reply with multiple documents: %s'
% (reply.docs,))
reply.doc.setdefault('ok', 1)
super(OpMsg, self)._replies(reply)
class OpQuery(Request):
"""A query (besides a command) the client executes on the server.
>>> OpQuery({'i': {'$gt': 2}}, fields={'j': False})
OpQuery({"i": {"$gt": 2}}, fields={"j": false})
"""
opcode = OP_QUERY
is_command = False
_flags_map = QUERY_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpQuery` or `Command`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
is_command = namespace.endswith('.$cmd')
num_to_skip, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
if is_command:
assert len(docs) == 1
command_ns = namespace[:-len('.$cmd')]
return Command(docs, namespace=command_ns, flags=flags,
_client=client, request_id=request_id,
_server=server)
else:
if len(docs) == 1:
fields = None
else:
assert len(docs) == 2
fields = docs[1]
return OpQuery(docs[0], fields=fields, namespace=namespace,
flags=flags, num_to_skip=num_to_skip,
num_to_return=num_to_return, _client=client,
request_id=request_id, _server=server)
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
if fields is not None and not _ismap(fields):
raise_args_err()
self._fields = fields
self._num_to_skip = kwargs.pop('num_to_skip', None)
self._num_to_return = kwargs.pop('num_to_return', None)
super(OpQuery, self).__init__(*args, **kwargs)
if not self._docs:
self._docs = [{}] # Default query filter.
elif len(self._docs) > 1:
raise_args_err('OpQuery too many documents', ValueError)
@property
def num_to_skip(self):
"""Client query's numToSkip or None."""
return self._num_to_skip
@property
def num_to_return(self):
"""Client query's numToReturn or None."""
return self._num_to_return
@property
def fields(self):
"""Client query's fields selector or None."""
return self._fields
def __repr__(self):
rep = super(OpQuery, self).__repr__().rstrip(')')
if self._fields:
rep += ', fields=%s' % docs_repr(self._fields)
if self._num_to_skip is not None:
rep += ', numToSkip=%d' % self._num_to_skip
if self._num_to_return is not None:
rep += ', numToReturn=%d' % self._num_to_return
return rep + ')'
class Command(CommandBase, OpQuery):
"""A command the client executes on the server."""
def _replies(self, *args, **kwargs):
reply = make_reply(*args, **kwargs)
if not reply.docs:
reply.docs = [{'ok': 1}]
else:
if len(reply.docs) > 1:
raise ValueError('Command reply with multiple documents: %s'
% (reply.docs,))
reply.doc.setdefault('ok', 1)
super(Command, self)._replies(reply)
def replies_to_gle(self, **kwargs):
"""Send a getlasterror response.
Defaults to ``{ok: 1, err: null}``. Add or override values by passing
keyword arguments.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('err', None)
return self.replies(**kwargs)
class OpGetMore(Request):
"""An OP_GET_MORE the client executes on the server."""
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpGetMore`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
cursor_id, = _UNPACK_LONG(msg[pos:pos + 8])
return OpGetMore(namespace=namespace, flags=flags, _client=client,
num_to_return=num_to_return, cursor_id=cursor_id,
request_id=request_id, _server=server)
def __init__(self, **kwargs):
self._num_to_return = kwargs.pop('num_to_return', None)
self._cursor_id = kwargs.pop('cursor_id', None)
super(OpGetMore, self).__init__(**kwargs)
@property
def num_to_return(self):
"""The client message's numToReturn field."""
return self._num_to_return
@property
def cursor_id(self):
"""The client message's cursorId field."""
return self._cursor_id
class OpKillCursors(Request):
"""An OP_KILL_CURSORS the client executes on the server."""
@classmethod
def unpack(cls, msg, client, server, _):
"""Parse message and return an `OpKillCursors`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
# Leading 4 bytes are reserved.
num_of_cursor_ids, = _UNPACK_INT(msg[4:8])
cursor_ids = []
pos = 8
for _ in range(num_of_cursor_ids):
cursor_ids.append(_UNPACK_INT(msg[pos:pos + 4])[0])
pos += 4
return OpKillCursors(_client=client, cursor_ids=cursor_ids,
_server=server)
def __init__(self, **kwargs):
self._cursor_ids = kwargs.pop('cursor_ids', None)
super(OpKillCursors, self).__init__(**kwargs)
@property
def cursor_ids(self):
"""List of cursor ids the client wants to kill."""
return self._cursor_ids
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._cursor_ids)
class _LegacyWrite(Request):
is_command = False
class OpInsert(_LegacyWrite):
"""A legacy OP_INSERT the client executes on the server."""
opcode = OP_INSERT
_flags_map = INSERT_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpInsert`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
return cls(*docs, namespace=namespace, | |
import re
import datetime
import pkgutil
import inspect
import pathlib
# catch block start
# ex. Args:, Returns:
_BLOCKSTART_LIST = re.compile(
r"(Args:|Arg:|Arguments:|Parameters:|Kwargs:|Attributes:|Returns:|Yields:|Kwargs:|Raises:)",
re.IGNORECASE,
)
_BLOCKSTART_TEXT = re.compile(r"(Examples:|Example:|Todo:)", re.IGNORECASE)
_QUOTE_TEXT = re.compile(r"(Notes:|Note:)", re.IGNORECASE)
# catch value context
_TYPED_ARGSTART = re.compile(r"([\w\[\]_]{1,}?)\s*?\((.*?)\):(.{2,})", re.IGNORECASE)
_ARGSTART = re.compile(r"(.{1,}?):(.{2,})", re.IGNORECASE)
_MODULE_TEMPLATE = """
{header}
{doc}{global_vars}{functions}{classes}
"""
_FUNC_TEMPLATE = """
### <kbd>function</kbd> `{header}`
```python
{definition}
```
{doc}
"""
_CLASS_TEMPLATE = """
### <kbd>class</kbd> `{header}`
{class_doc}
{init}{variables}{handlers}{methods}
"""
def generate_markdown(path):
"""generate markdown from package dir path
Args:
path (str): package root dir path
"""
output = f"# API\n\nUpdate: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}"
ignored_modules = []
for loader, name, _ in pkgutil.walk_packages([path]):
if _is_module_ignored(name, ignored_modules):
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(name)
continue
try:
module = loader.find_module(name).load_module(name)
except Exception as e:
print(f"Error: Can't generate {name} doc. {e}")
continue
markdown = module_to_markdown(module)
if not markdown:
# Module md is empty -> ignore module and all submodules
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(name)
continue
output += markdown
output = validate_output(output)
output_path = pathlib.Path(path).parent / "docs"
output_path.mkdir(parents=True, exist_ok=True)
with open(output_path / "doc.md", "w") as f:
f.write(output)
def _is_module_ignored(module_name, ignored_modules) -> bool:
"""Checks if a given module is ignored."""
if module_name.split(".")[-1].startswith("_"):
return True
for ignored_module in ignored_modules:
if module_name == ignored_module:
return True
# Check is module is subpackage of an ignored package
if module_name.startswith(ignored_module + "."):
return True
return False
def validate_output(output):
"""markdown validator
fix number of new line and period newline.
Args:
output (str): input markdown text
Returns:
str: validated markdown
"""
output = re.sub("\n+#", "\n\n#", output)
output = re.sub("\n+$", "\n", output)
return output
def get_docstring(module):
"""get docstring from module
Args:
module (obj): module had docstring or not
Returns:
str: return docstring or ""
"""
docstring = inspect.getdoc(module)
if docstring is None:
return ""
else:
return docstring
def docstring_to_markdown(docstring):
"""parse google format docstring to markdown
Args:
docstring (str): google format docstring
Returns:
str: markdown text
"""
block_indent = 0
arg_indent = 0
markdown_output = []
args = False
literal_block = False
md_code_snippet = False
quote_block = False
for row in docstring.split("\n"):
indent_depth = len(row) - len(row.lstrip())
if not md_code_snippet and not literal_block:
row = row.lstrip()
if row.startswith(">>>"):
# support for doctest
row = row.replace(">>>", ">")
if _BLOCKSTART_LIST.match(row) or _BLOCKSTART_TEXT.match(row) or _QUOTE_TEXT.match(row):
block_indent = indent_depth
if quote_block:
quote_block = False
if literal_block:
markdown_output.append("```\n")
literal_block = False
markdown_output.append(f"#### {row.strip()}\n")
args = _BLOCKSTART_LIST.match(row)
if _QUOTE_TEXT.match(row):
quote_block = True
markdown_output.append("\n>")
elif row.strip().startswith("```") or row.strip().startswith("`"):
md_code_snippet = not md_code_snippet
markdown_output.append("\n" + row)
elif row.strip().endswith("::"):
literal_block = True
markdown_output.append(row.replace("::", ":\n```"))
elif quote_block:
markdown_output.append(row.strip())
elif row.strip().startswith("-"):
markdown_output.append("\n" + (" " * indent_depth) + row)
elif indent_depth > block_indent:
if args and not literal_block:
if _TYPED_ARGSTART.match(row):
markdown_output.append(
"\n" + " " * block_indent + " - " + _TYPED_ARGSTART.sub(r"<b>`\1`</b> (\2): \3", row)
)
elif _ARGSTART.match(row):
markdown_output.append("\n" + " " * block_indent + "- " + _ARGSTART.sub(r"*`\1`*: \2", row))
arg_indent = indent_depth
elif indent_depth > arg_indent:
markdown_output.append(f" {row}")
else:
markdown_output.append("\n" + row)
else:
if row.strip() and literal_block:
row = "```\n" + row
literal_block = False
markdown_output.append(row)
if md_code_snippet:
markdown_output.append("\n")
elif not row and not quote_block:
markdown_output.append("\n\n")
elif not row and quote_block:
markdown_output.append("\n>")
return "".join(markdown_output)
def _get_function_signature(
function,
owner_class=None,
wrap_arguments=False,
remove_package=False,
):
"""get function definition
example:
```python
def hoge(a: int) -> int
```
Args:
function (obj): function which you want definition.
owner_class (class, optional): Input parent class when input function is class method. Defaults to None.
wrap_arguments (bool, optional): Limited line num at 119 charactors. Defaults to False.
remove_package (bool, optional): hoge.func -> func. Defaults to False.
Returns:
string: markdown code block about function definition.
"""
isclass = inspect.isclass(function)
# Get base name.
name_parts = []
if owner_class:
name_parts.append(owner_class.__name__)
if hasattr(function, "__name__"):
name_parts.append(function.__name__)
else:
name_parts.append(type(function).__name__)
name_parts.append("__call__")
function = function.__call__ # type: ignore
name = ".".join(name_parts)
if isclass:
function = getattr(function, "__init__", None)
arguments = []
return_type = ""
if hasattr(inspect, "signature"):
parameters = inspect.signature(function).parameters
if inspect.signature(function).return_annotation != inspect.Signature.empty:
return_type = str(inspect.signature(function).return_annotation)
if return_type.startswith("<class"):
# Base class -> get real name
try:
return_type = inspect.signature(function).return_annotation.__name__
except Exception:
pass
# Remove all typing path prefixes
return_type = return_type.replace("typing.", "")
if remove_package:
# Remove all package path return type
return_type = re.sub(r"([a-zA-Z0-9_]*?\.)", "", return_type)
for parameter in parameters:
argument = str(parameters[parameter])
# Reintroduce Optionals
argument = re.sub(r"Union\[(.*?), NoneType\]", r"Optional[\1]", argument)
# Remove package
if remove_package:
# Remove all package path from parameter signature
if "=" not in argument:
argument = re.sub(r"([a-zA-Z0-9_]*?\.)", "", argument)
else:
# Remove only from part before the first =
argument_split = argument.split("=")
argument_split[0] = re.sub(r"([a-zA-Z0-9_]*?\.)", "", argument_split[0])
argument = "=".join(argument_split)
arguments.append(argument)
else:
print("Seems like function " + name + " does not have any signature")
signature = name + "("
if wrap_arguments:
for i, arg in enumerate(arguments):
signature += "\n " + arg
signature += "," if i is not len(arguments) - 1 else "\n"
else:
signature += ", ".join(arguments)
signature += ")" + ((" → " + return_type) if return_type else "")
return signature
def function_to_markdown(func, clsname="") -> str:
"""get markdown from function docstring
Args:
func (obj): function
clsname (str, optional): if you input class method, this variable is class name. Defaults to "".
Returns:
str: markdown text from function.
"""
funcname = func.__name__
doc = get_docstring(func)
escfuncname = "%s" % funcname if funcname.startswith("_") else funcname # "`%s`"
full_name = "%s%s" % ("%s." % clsname if clsname else "", escfuncname)
header = full_name
doc = docstring_to_markdown(doc)
definition = _get_function_signature(func)
# split the function definition if it is too long
lmax = 119
if len(definition) > lmax:
definition = _get_function_signature(
func,
wrap_arguments=True,
)
# build the signature
markdown = _FUNC_TEMPLATE.format(
header=header,
definition=definition,
doc=doc if doc else "",
)
return markdown
def class_to_markdown(cls) -> str:
"""get markdown from class docstring and class method.
Returns:
str: markdown about class
"""
clsname = cls.__name__
modname = cls.__module__
doc = get_docstring(cls)
header = clsname
doc = docstring_to_markdown(doc)
try:
# object module should be the same as the calling module
if hasattr(cls.__init__, "__module__") and cls.__init__.__module__ == modname:
init = function_to_markdown(cls.__init__, get_docstring(cls.__init__), clsname=clsname)
else:
init = ""
except (ValueError, TypeError):
# this happens if __init__ is outside the repo
init = ""
variables = []
for name, obj in inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a))):
if not name.startswith("_") and type(obj) == property:
comments = docstring_to_markdown(get_docstring(obj)) or inspect.getcomments(obj)
comments = "\n\n%s" % comments if comments else ""
property_name = f"{clsname}.{name}"
variables.append(f"\n#### {property_name}{comments}\n")
handlers = []
for name, obj in inspect.getmembers(cls, inspect.ismethoddescriptor):
if (
not name.startswith("_")
and hasattr(obj, "__module__")
# object module should be the same as the calling module
and obj.__module__ == modname
):
handler_name = f"{clsname}.{name}"
handlers.append(f"\n#### {handler_name}\n")
methods = []
# for name, obj in getmembers(cls, inspect.isfunction):
for name, obj in inspect.getmembers(cls, lambda a: inspect.ismethod(a) or inspect.isfunction(a)):
if (
not name.startswith("_")
and hasattr(obj, "__module__")
and name not in handlers
# object module should be the same as the calling module
and obj.__module__ == modname
):
function_md = function_to_markdown(obj, clsname=clsname)
methods.append(function_md)
markdown = _CLASS_TEMPLATE.format(
header=header,
class_doc=doc if doc else "",
init=init,
variables="".join(variables),
handlers="".join(handlers),
methods="".join(methods),
)
return markdown
def module_to_markdown(module) -> str:
"""get markdown from module
Args:
module (obj): module
Returns:
str: markdown about module
"""
def _order_by_line_nos(objs, line_nos):
"""Orders the set of `objs` by `line_nos`."""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering]
def _get_line_no(_obj):
"""Gets the source line number of this object. None if `obj` code cannot be found."""
try:
return inspect.getsourcelines(_obj)[1]
except Exception:
# no code found
return None
modname = module.__name__
doc = get_docstring(module)
found = []
classes = []
line_nos = []
for name, obj in inspect.getmembers(module, inspect.isclass):
# handle classes
found.append(name)
if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
class_markdown = class_to_markdown(obj)
if class_markdown:
classes.append(class_markdown)
line_nos.append(_get_line_no(obj) or 0)
classes = _order_by_line_nos(classes, line_nos)
functions = []
line_nos = []
for name, obj in inspect.getmembers(module, inspect.isfunction):
# handle functions
found.append(name)
if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
function_md = function_to_markdown(obj, clsname=modname.split(".")[0])
if function_md:
functions.append(function_md)
line_nos.append(_get_line_no(obj) or 0)
functions = _order_by_line_nos(functions, line_nos)
variables = []
line_nos = []
for name, obj in module.__dict__.items():
if not name.startswith("_") and name not in found:
if hasattr(obj, "__module__") | |
<filename>counter_attack/cli/options.py<gh_stars>0
import datetime
import functools
import logging
import sys
import click
import foolbox
import numpy as np
import torch
from counter_attack import detectors, loaders, model_tools, rejectors, utils
from counter_attack.cli import parsing, definitions
logger = logging.getLogger(__name__)
def global_options(func):
@click.argument('dataset', type=click.Choice(definitions.datasets))
@click.option('--batch-size', default=5, show_default=True, type=click.IntRange(1, None),
help='The size of each batch.')
@click.option('--max-model-batch-size', type=click.IntRange(0, None), default=0,
help='The maximum number of images passed in the same batch. 0 disables batch limiting (default).')
@click.option('--max-batches', type=click.IntRange(1, None), default=None,
help='The maximum number of batches. If unspecified, no batch limiting is applied.')
@click.option('--shuffle', type=bool, default=True, show_default=True,
help='Whether to shuffle the dataset.')
@click.option('--config-path', default='./config.ini', type=click.Path(file_okay=True, exists=True),
help='The path to the configuration file.')
@click.option('--no-cuda', is_flag=True)
@click.option('--no-shuffle-warning', is_flag=True,
help='Disables the warning for limiting batches without shuffling.')
@click.option('--log-level', default='info', show_default=True, type=click.Choice(['debug', 'info', 'warning', 'error', 'critical']),
help='Sets the logging level.')
@functools.wraps(func)
def _parse_global_options(dataset, batch_size, max_model_batch_size, max_batches, shuffle, config_path, no_cuda, no_shuffle_warning, log_level, *args, **kwargs):
start_time = datetime.datetime.now()
command = ' '.join(sys.argv[1:])
if max_batches is not None:
if (not shuffle) and (not no_shuffle_warning):
logger.warning('You are limiting the number of batches, but you aren\'t applying any shuffling. '
'This means that the last parts of your dataset will be never used. You can disable this '
'warning by passing \'--no-shuffle-warning\'.')
num_classes = parsing.get_num_classes(dataset)
logger.debug('CUDA is supported: {}'.format(torch.cuda.is_available()))
cuda = torch.cuda.is_available() and not no_cuda
device = torch.cuda.current_device() if cuda else 'cpu'
logging.getLogger('counter_attack').setLevel(log_level.upper())
logger.info('Batch size: {}'.format(batch_size))
global_options = {
'batch_size': batch_size,
'command': command,
'config_path': config_path,
'cuda': cuda,
'device': device,
'dataset': dataset,
'max_batches': max_batches,
'max_model_batch_size' : max_model_batch_size,
'no_shuffle_warning': no_shuffle_warning,
'num_classes': num_classes,
'shuffle': shuffle,
'start_time': start_time
}
return func(global_options, *args, **kwargs)
return _parse_global_options
def standard_model_options(func):
@functools.wraps(func)
def _parse_standard_model_options(options, *args, **kwargs):
dataset = options['dataset']
base_model = parsing.get_torch_model(dataset)
options = dict(options)
options['base_model'] = base_model
return func(options, *args, **kwargs)
return _parse_standard_model_options
def pretrained_model_options(func):
"""
Loads the pretrained weights and saves
the model in foolbox and torch format.
Requires:
base_model
cuda
dataset
device
num_classes
Adds:
foolbox_model
torch_model
"""
@click.option('--weights-path', type=click.Path(file_okay=True, dir_okay=False), default=None)
@click.option('--download-model', is_flag=True,
help='If the model file does not exist, download the pretrained model for the corresponding dataset.')
@functools.wraps(func)
def _parse_pretrained_model_options(options, weights_path, download_model, *args, **kwargs):
base_model = options['base_model']
cuda = options['cuda']
dataset = options['dataset']
device = options['device']
max_model_batch_size = options['max_model_batch_size']
num_classes = options['num_classes']
if weights_path is None:
weights_path = './pretrained_models/' + dataset + '.pth.tar'
logger.debug('Loading pretrained weights from {}'.format(weights_path))
torch_model = parsing.get_pretrained_torch_model(
dataset, base_model, weights_path, download_model)
torch_model = torch.nn.Sequential(
parsing.get_normalisation_by_name(dataset), torch_model)
torch_model.eval()
if cuda:
torch_model.cuda()
foolbox_model = foolbox.models.PyTorchModel(
torch_model, (0, 1), num_classes, channel_axis=3, device=device, preprocessing=(0, 1))
if max_model_batch_size > 0:
foolbox_model = model_tools.MaxBatchModel(foolbox_model, max_model_batch_size)
options = dict(options)
options['foolbox_model'] = foolbox_model
options['torch_model'] = torch_model
return func(options, *args, **kwargs)
return _parse_pretrained_model_options
def custom_model_options(func):
@click.option('--custom-weights-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None)
@click.option('--custom-model-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None)
@click.option('--custom-model-normalisation', default=None,
help='The normalisation that will be applied by the custom model. Supports both dataset names ({}) and '
'channel stds-means (format: "red_mean green_mean blue_mean red_stdev green_stdev blue_stdev" including quotes).'.format(', '.join(definitions.datasets)))
@functools.wraps(func)
def _parse_custom_model_options(options, custom_weights_path, custom_model_path, custom_model_normalisation, *args, **kwargs):
cuda = options['cuda']
dataset = options['dataset']
device = options['device']
max_model_batch_size = options['max_model_batch_size']
num_classes = options['num_classes']
# NXOR between custom_weights_path and custom_model_path
if (custom_weights_path is None) == (custom_model_path is None):
raise click.BadOptionUsage('--custom-weights-path',
'You must pass either \'--custom-weights-path [PATH]\' or \'--custom-model-path [PATH]\' (but not both).')
if custom_model_path is None:
logger.info('No custom architecture path passed. Using default architecture {}'.format(
definitions.default_architecture_names[dataset]))
custom_torch_model = parsing.get_torch_model(dataset)
logger.debug('Loading weights from {}'.format(custom_weights_path))
model_tools.load_state_dict(
custom_torch_model, custom_weights_path, False, False)
else:
logger.debug('Loading model from {}'.format(custom_model_path))
custom_torch_model = torch.load(custom_model_path)
custom_torch_model = parsing.apply_normalisation(custom_torch_model, custom_model_normalisation, 'custom model', '--custom-model-normalisation')
custom_torch_model.eval()
if cuda:
custom_torch_model.cuda()
custom_foolbox_model = foolbox.models.PyTorchModel(
custom_torch_model, (0, 1), num_classes, channel_axis=3, device=device, preprocessing=(0, 1))
if max_model_batch_size > 0:
logger.debug('Applying model batch limiting: {}'.format(max_model_batch_size))
custom_foolbox_model = model_tools.MaxBatchModel(custom_foolbox_model, max_model_batch_size)
options = dict(options)
options['custom_foolbox_model'] = custom_foolbox_model
options['custom_torch_model'] = custom_torch_model
return func(options, *args, **kwargs)
return _parse_custom_model_options
def dataset_options(default_dataset, recommended=None):
def _dataset_options(func):
@click.option('--data-folder', default=None, type=click.Path(file_okay=False, dir_okay=True),
help='The path to the folder where the dataset is stored (or will be downloaded). '
'If unspecified, it defaults to \'./data/genuine/$dataset\'.')
@click.option('--dataset-type', default=default_dataset, show_default=True, type=click.Choice(['train', 'test']),
help='Sets the dataset (train or test) that will be used. For ImageNet, we use the validation set '
'as test dataset.')
@click.option('--download-dataset', is_flag=True,
help='If the dataset files do not exist, download them.')
@click.option('--loader-workers', default=2, show_default=True, type=click.IntRange(0, None),
help='The number of parallel workers that will load the samples from the dataset. '
'0 disables parallelization.')
@functools.wraps(func)
def _parse_dataset_options(options, data_folder, dataset_type, download_dataset, loader_workers, *args, **kwargs):
batch_size = options['batch_size']
config_path = options['config_path']
dataset = options['dataset']
max_batches = options['max_batches']
shuffle = options['shuffle']
if data_folder is None:
data_folder = './data/genuine/' + dataset
if recommended is not None and dataset_type != recommended:
logger.warning('You are using the {} dataset. We recommend using the {} dataset for this command.'.format(
dataset_type, recommended))
logger.info('Using {} {} dataset.'.format(dataset, dataset_type))
train_loader, test_loader = parsing.get_genuine_loaders(
dataset, data_folder, batch_size, shuffle, loader_workers, download_dataset, config_path)
if dataset_type == 'train':
loader = train_loader
else:
loader = test_loader
if max_batches is not None:
loader = loaders.MaxBatchLoader(loader, max_batches)
options = dict(options)
options['dataset_type'] = dataset_type
options['loader'] = loader
return func(options, *args, **kwargs)
return _parse_dataset_options
return _dataset_options
def train_options(func):
@click.argument('epochs', type=click.IntRange(1, None))
@click.option('--optimiser', type=click.Choice(['adam', 'sgd']), default='adam', show_default=True,
help='The optimiser that will be used for training.')
@click.option('--learning_rate', type=float, default=1e-3, show_default=True,
help='The learning rate for the optimiser.')
@click.option('--weight-decay', type=float, default=0, show_default=True,
help='The weight decay for the optimiser.')
@click.option('--adam-betas', nargs=2, type=click.Tuple([float, float]), default=(0.9, 0.999), show_default=True,
help='The two beta values. Ignored if the optimiser is not \'adam\'')
@click.option('--adam-epsilon', type=float, default=1e-8, show_default=True,
help='The value of epsilon. Ignored if the optimiser is not \'adam\'')
@click.option('--adam-amsgrad', is_flag=True,
help='Enables AMSGrad. Ignored if the optimiser is not \'adam\'')
@click.option('--sgd-momentum', type=float, default=0, show_default=True,
help='The intensity of momentum. Ignored if the optimiser is not \'sgd\'')
@click.option('--sgd-dampening', type=float, default=0, show_default=True,
help='The intensity of dampening. Ignored if the optimiser is not \'sgd\'')
@click.option('--sgd-nesterov', is_flag=True,
help='Enables Nesterov Accelerated Gradient. Ignored if the optimiser is not \'adam\'')
@functools.wraps(func)
def _parse_train_options(options, epochs, optimiser, learning_rate, weight_decay, adam_betas, adam_epsilon, adam_amsgrad, sgd_momentum, sgd_dampening, sgd_nesterov, *args, **kwargs):
options = dict(options)
options['adam_amsgrad'] = adam_amsgrad
options['adam_betas'] = adam_betas
options['adam_epsilon'] = adam_epsilon
options['epochs'] = epochs
options['learning_rate'] = learning_rate
options['optimiser_name'] = optimiser
options['sgd_dampening'] = sgd_dampening
options['sgd_momentum'] = sgd_momentum
options['sgd_nesterov'] = sgd_nesterov
options['weight_decay'] = weight_decay
return func(options, *args, **kwargs)
return _parse_train_options
def test_options(test_name):
def _test_options(func):
@click.option('--results-path', default=None, type=click.Path(file_okay=True, dir_okay=False),
help='The path to the CSV file where the results will be saved. If unspecified '
'it defaults to \'./results/{}/$dataset $start_time.csv\''.format(test_name))
@functools.wraps(func)
def _parse_test_options(options, results_path, *args, **kwargs):
dataset = options['dataset']
start_time = options['start_time']
if results_path is None:
results_path = parsing.get_results_default_path(
test_name, dataset, start_time)
options = dict(options)
options['results_path'] = results_path
return func(options, *args, **kwargs)
return _parse_test_options
return _test_options
def attack_options(attacks, mandatory_parallelization=False):
def _attack_options(func):
@click.argument('attack', type=click.Choice(attacks))
@click.argument('attack_p', type=click.Choice(definitions.supported_ps))
@functools.wraps(func)
def _parse_attack_options(options, attack, attack_p, attack_workers, *args, **kwargs):
attack_p = float(attack_p)
logger.info('Attack p: {}'.format(attack_p))
if attack in definitions.parallelizable_attacks:
logger.debug('Attack supports parallelization.')
else:
logger.debug('Attack does not support parallelization.')
if attack_workers > 0:
raise click.BadOptionUsage('--attack-workers', 'The chosen attack \'{}\' does not support parallelization.'.format(attack))
logger.info('Attack workers: {}.'.format(attack_workers))
options = dict(options)
# We don't immediately parse 'attack' because every test needs a specific configuration
options['attack_name'] = attack
options['attack_p'] = attack_p
options['attack_workers'] = attack_workers
return func(options, *args, **kwargs)
parse_func = _parse_attack_options
if mandatory_parallelization:
parse_func = click.argument('attack_workers', type=click.IntRange(1, None))(parse_func)
else:
parse_func = click.option('--attack-workers', type=click.IntRange(0, None), default=0, show_default=True,
help='The number of parallel workers that will be used to speed up the attack. 0 disables parallelization.')(parse_func)
return parse_func
return _attack_options
def distance_options(func):
@click.argument('defense_p', type=click.Choice(definitions.supported_ps))
@functools.wraps(func)
def _parse_distance_options(options, defense_p, *args, **kwargs):
defense_p = float(defense_p)
logger.info('Defense Lp: {}'.format(defense_p))
options = dict(options)
options['defense_p'] = defense_p
return func(options, *args, **kwargs)
return _parse_distance_options
def counter_attack_options(required):
def _counter_attack_options(func):
@click.option('--counter-attack-workers', type=click.IntRange(0, None), default=0, show_default=True,
help='The number of attack workers of the counter attack.')
@functools.wraps(func)
def _parse_counter_attack_options(options, counter_attack, counter_attack_workers, *args, **kwargs):
defense_p = options['defense_p']
max_model_batch_size = options['max_model_batch_size']
if counter_attack in definitions.parallelizable_attacks:
logger.debug('Counter attack supports parallelization.')
else:
logger.debug('Counter attack does not support parallelization.')
if counter_attack_workers is not None and counter_attack_workers > 0:
raise click.BadOptionUsage('--counter-attack-workers', 'The chosen counter-attack \'{}\' does not support parallelization.'.format(counter_attack))
counter_attack_workers = 0
logger.info('Counter attack workers: {}.'.format(counter_attack_workers))
if max_model_batch_size > 0 and counter_attack_workers > max_model_batch_size:
raise click.BadOptionUsage('--counter-attack-workers',
'The number of counter attack workers must be at most the maximum model batch size. '
'Either increase the maximum model batch size, decrease the number of '
'counter attack workers, | |
<filename>regression/module_NN_ens.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow as tf
import datetime
from scipy.special import erf
import importlib
import utils
importlib.reload(utils)
from utils import *
class NN():
def __init__(self,
activation_fn, x_dim, y_dim, hidden_size,
init_stddev_1_w, init_stddev_1_b, init_stddev_2_w, optimiser_in, n, learning_rate, decay_rate=1.0,
drop_out=False, deep_NN=False):
'''set up one single-layer NN'''
''' unless deep_NN = True, in which case add two layers'''
self.activation_fn = activation_fn
self.x_dim = x_dim
self.y_dim = y_dim
self.hidden_size = hidden_size
self.optimiser_in = optimiser_in
self.n = n
self.learning_rate = learning_rate
self.decay_rate = decay_rate
self.drop_out = drop_out
self.deep_NN = deep_NN
if activation_fn == 'tanh':
fn_use = tf.nn.tanh
elif activation_fn == 'relu':
fn_use = tf.nn.relu
elif activation_fn == 'Lrelu':
def tp_Lrelu(x):
a=0.2
return tf.maximum(a*x,x)
# not sure why but this didn't work
# fn_use = tf.nn.leaky_relu # alpha=0.2 by default
fn_use = tp_Lrelu
elif activation_fn == 'erf':
fn_use = tf.erf
elif activation_fn == 'softplus':
def tp_softplus(x):
# manually adjust so it is more similar to relu
c=3. # if this is bigger -> relu behaviour, but less 'soft'
return tf.divide(tf.log(tf.exp(tf.multiply(x,c)) + 1),c)
# https://stackoverflow.com/questions/44230635/avoid-overflow-with-softplus-function-in-python
# to avoid overflow we could do something like if x>30/c, return x
# return tf.cond(x>30/c, lambda: tf.divide(tf.log(tf.exp(tf.multiply(x,c)) + 1),c), lambda: x)
# def f1(): return tf.divide(tf.log(tf.exp(tf.multiply(x,c)) + 1),c)
# def f2(): return x
# return tf.cond(tf.less(x,30/c), f1, f2)
fn_use = tp_softplus
# used to have these as float32
self.inputs = tf.placeholder(tf.float64, [None, x_dim], name='inputs')
self.y_target = tf.placeholder(tf.float64, [None, y_dim], name='target')
anchor_factor = 1 # could inflate the init. dist. by this
if activation_fn != 'rbf':
# we use 'Dense' instead of 'dense' - so can access weights more easily
self.layer_1_w = tf.layers.Dense(hidden_size,
activation=fn_use, #trainable=False,
kernel_initializer=tf.random_normal_initializer(mean=0.,stddev=anchor_factor*init_stddev_1_w),
bias_initializer=tf.random_normal_initializer(mean=0.,stddev=anchor_factor*init_stddev_1_b))
self.layer_1 = self.layer_1_w.apply(self.inputs)
if self.drop_out:
self.layer_1 = tf.layers.dropout(self.layer_1,rate=0.4,training=True)
self.output_w = tf.layers.Dense(y_dim,
activation=None, use_bias=False,
kernel_initializer=tf.random_normal_initializer(mean=0.,stddev=anchor_factor*init_stddev_2_w))
if self.deep_NN:
print('going deep...')
# add an extra hidden layer
self.layer_2_w = tf.layers.Dense(hidden_size,
activation=fn_use, #trainable=False,
kernel_initializer=tf.random_normal_initializer(mean=0.,stddev=anchor_factor*init_stddev_2_w),
bias_initializer=tf.random_normal_initializer(mean=0.,stddev=anchor_factor*init_stddev_2_w))
self.layer_2 = self.layer_2_w.apply(self.layer_1)
if self.drop_out:
self.layer_2 = tf.layers.dropout(self.layer_2,rate=0.4,training=True)
self.output = self.output_w.apply(self.layer_2)
else:
self.output = self.output_w.apply(self.layer_1)
else:
self.c = tf.Variable(tf.random_normal([x_dim,hidden_size],mean=0.,stddev=anchor_factor*init_stddev_1_w, dtype=tf.float64),trainable=True, dtype=tf.float64)
self.beta = tf.Variable(initial_value=[init_stddev_1_b],trainable=False, dtype=tf.float64)
self.beta_2 = tf.pow(2*tf.square(self.beta),-1)
self.w2_rbf = tf.Variable(tf.random_normal([hidden_size,y_dim],mean=0.,stddev=anchor_factor*init_stddev_2_w, dtype=tf.float64), dtype=tf.float64)
self.layer_1_rbf = tf.exp(-self.beta_2*tf.square(self.inputs - self.c))
if self.drop_out:
self.layer_1_rbf = tf.layers.dropout(self.layer_1_rbf,rate=0.4,training=True)
self.output = tf.matmul(self.layer_1_rbf,self.w2_rbf)
# self.output = tf.matmul(tf.exp(-tf.multiply(self.beta_2,tf.square(self.inputs - self.c))),self.w2_rbf)
self.batch = tf.Variable(0, trainable=False)
# decayed_learning_rate = learning_rate *
# decay_rate ^ (global_step / decay_steps)
self.l_rate_decay = tf.train.exponential_decay(
self.learning_rate, # Base learning rate.
global_step=self.batch, # Current index into the dataset.
decay_steps=20, # Decay step.
decay_rate=self.decay_rate, # Decay rate.
staircase=True)
if optimiser_in == 'adam':
# self.opt_method = tf.train.AdamOptimizer(learning_rate)
self.opt_method = tf.train.AdamOptimizer(self.l_rate_decay)
elif optimiser_in == 'SGD':
self.opt_method = tf.train.GradientDescentOptimizer(learning_rate)
elif optimiser_in == 'AdaDel':
self.opt_method = tf.train.AdadeltaOptimizer(learning_rate)
elif optimiser_in == 'RMS':
self.opt_method = tf.train.RMSPropOptimizer(learning_rate)
elif optimiser_in == 'AdaGrad':
self.opt_method = tf.train.AdagradOptimizer(learning_rate)
# self.loss_ = tf.reduce_mean(tf.square(self.y_target - self.output))
# self.loss_ = 1/self.n * tf.reduce_sum(tf.square(self.y_target - self.output))
self.loss_ = 1/tf.shape(self.inputs, out_type=tf.int64)[0] * tf.reduce_sum(tf.square(self.y_target - self.output))
# self.mse_loss = 1/self.n * tf.reduce_sum(tf.square(self.y_target - self.output)) # useful for val
self.mse_loss = 1/tf.shape(self.inputs, out_type=tf.int64)[0] * tf.reduce_sum(tf.square(self.y_target - self.output)) # useful for val
# self.loss_ = 1/tf.cast(tf.size(self.inputs),tf.float32) * tf.reduce_sum(tf.square(self.y_target - self.output))
self.optimizer = self.opt_method.minimize(self.loss_, global_step=self.batch)
return
def get_weights(self, sess):
'''method to return current params - yes it rly does seem this hard..'''
if self.activation_fn != 'rbf':
ops = [self.layer_1_w.kernel, self.layer_1_w.bias, self.output_w.kernel]
else:
ops = [self.c, self.beta, self.w2_rbf]
w1, b1, w2 = sess.run(ops)
# b2 = sess.run(self.output_w.bias)
return w1, b1, w2
def anchor(self, sess, lambda_anchor, regularise=False, unconstrain=False):
'''method to set loss to account for anchoring'''
# lambda_anchor=[0,0,0] # hardcode for testing effect of anchoring
# regularise = True ### hardcode for testing effect of anchoring
# lambda_anchor = lambda_anchor*0.01
# print('\nlambda_anchor',lambda_anchor)
if unconstrain:
# turn off effect of prior
lambda_anchor=[0,0,0]
print('unconstraining!!!')
if regularise:
# to do normal regularisation
print('regularising!!!')
w1, b1, w2 = self.get_weights(sess)
self.w1_init, self.b1_init, self.w2_init = np.zeros_like(w1),np.zeros_like(b1),np.zeros_like(w2) # overwrite for normal regulariser
else:
# get weights
w1, b1, w2 = self.get_weights(sess)
# set around initial params
self.w1_init, self.b1_init, self.w2_init = w1, b1, w2
# print('w1_init',self.w1_init)
# print('b1_init',self.b1_init)
# print('w2_init',self.w2_init)
if self.activation_fn != 'rbf':
# set squared loss around it
loss_anchor = lambda_anchor[0]*tf.reduce_sum(tf.square(self.w1_init - self.layer_1_w.kernel))
loss_anchor += lambda_anchor[1]*tf.reduce_sum(tf.square(self.b1_init - self.layer_1_w.bias))
loss_anchor += lambda_anchor[2]*tf.reduce_sum(tf.square(self.w2_init - self.output_w.kernel))
else:
loss_anchor = lambda_anchor[0]*tf.reduce_sum(tf.square(self.w1_init - self.c))
loss_anchor += lambda_anchor[1]*tf.reduce_sum(tf.square(self.b1_init - self.beta))
loss_anchor += lambda_anchor[2]*tf.reduce_sum(tf.square(self.w2_init - self.w2_rbf))
# combine with original loss
self.loss_ = self.loss_ + 1/tf.shape(self.inputs, out_type=tf.int64)[0] * loss_anchor
# I spent a long time analysing if we need to divide this by n
# although we should in the eqn, actually tf doesn't repeat loss_anchor
# n times, so no need!
# 25 aug, actually I got this wrong - do need to. cost me a lot of time...
# reset optimiser
self.optimizer = self.opt_method.minimize(self.loss_, global_step=self.batch)
return
def get_weights_deep(self, sess):
'''method to return current params - yes it rly does seem this hard..'''
ops = [self.layer_1_w.kernel, self.layer_1_w.bias, self.layer_2_w.kernel, self.layer_2_w.bias, self.output_w.kernel]
w1, b1, w2, b2, w3 = sess.run(ops)
# b2 = sess.run(self.output_w.bias)
return w1, b1, w2, b2, w3
def anchor_deep(self, sess, lambda_anchor, regularise=False, unconstrain=False):
'''method to set loss to account for anchoring for a deep NN'''
if unconstrain:
# turn off effect of prior
lambda_anchor=[0,0,0,0,0]
print('unconstraining!!!')
if regularise:
# to do normal regularisation
print('regularising!!!')
w1, b1, w2, b2, w3 = self.get_weights_deep(sess)
self.w1_init, self.b1_init, self.w2_init, self.b2_init, self.w3_init = np.zeros_like(w1),np.zeros_like(b1),np.zeros_like(w2),np.zeros_like(b2),np.zeros_like(w3)
else:
# get weights
w1, b1, w2, b2, w3 = self.get_weights_deep(sess)
# set around initial params
self.w1_init, self.b1_init, self.w2_init, self.b2_init, self.w3_init = w1, b1, w2, b2, w3
# print('w1_init',self.w1_init)
if self.activation_fn == 'rbf':
raise Exception('tp: deep NN not set up for rbf activations')
# set squared loss around it
loss_anchor = lambda_anchor[0]*tf.reduce_sum(tf.square(self.w1_init - self.layer_1_w.kernel))
loss_anchor += lambda_anchor[1]*tf.reduce_sum(tf.square(self.b1_init - self.layer_1_w.bias))
loss_anchor += lambda_anchor[2]*tf.reduce_sum(tf.square(self.w2_init - self.layer_2_w.kernel))
loss_anchor += lambda_anchor[2]*tf.reduce_sum(tf.square(self.b2_init - self.layer_2_w.bias))
loss_anchor += lambda_anchor[2]*tf.reduce_sum(tf.square(self.w3_init - self.output_w.kernel))
# combine with original loss
self.loss_ = self.loss_ + 1/tf.shape(self.inputs, out_type=tf.int64)[0] * loss_anchor
# I spent a long time analysing if we need to divide this by n
# although we should in the eqn, actually tf doesn't repeat loss_anchor
# n times, so no need!
# 25 aug, actually I got this wrong - do need to. cost me a lot of time...
# reset optimiser
self.optimizer = self.opt_method.minimize(self.loss_, global_step=self.batch)
return
def predict(self, x, sess):
feed = {self.inputs: x}
y_pred = sess.run(self.output, feed_dict=feed)
return y_pred
class NN_ens:
def __init__(self,
activation_fn,
data_noise,
b_0_var=1.0, w_0_var=1.0, u_var=1.0, g_var=1.0,
optimiser_in = 'adam',
learning_rate = 0.001,
hidden_size = 100,
n_epochs = 100,
cycle_print = 10,
n_ensembles = 3,
regularise = False,
unconstrain = False,
drop_out = False,
deep_NN = False,
batch_size = 32,
total_trained=0,
decay_rate=1.0
):
''' create object that will be an ensemble of NNs '''
self.activation_fn = activation_fn
self.data_noise = data_noise
self.optimiser_in = optimiser_in
self.learning_rate = learning_rate
self.decay_rate = decay_rate
self.hidden_size = hidden_size
self.n_epochs = n_epochs
self.cycle_print = cycle_print
self.n_ensembles = n_ensembles
self.regularise = regularise # regularise around zero, not anchor
self.unconstrain = unconstrain # set regularisation lambdas to zero
self.drop_out = drop_out # use dropout for training and test time
self.deep_NN = deep_NN # use more than one hidden layer
self.total_trained = total_trained
self.batch_size = batch_size
self.drop_out = drop_out
if self.drop_out:
self.name_ = 'NN_drop_h' + str(hidden_size) + '_ens' + str(n_ensembles+total_trained)
elif self.regularise:
self.name_ = 'NN_regular_h' + str(hidden_size) + '_ens' + str(n_ensembles+total_trained)
elif self.unconstrain:
self.name_ = 'NN_uncons_h' + str(hidden_size) + '_ens' + str(n_ensembles+total_trained)
elif self.deep_NN:
self.name_ = 'NN_deepanch_h' + str(hidden_size) + '_ens' + str(n_ensembles+total_trained)
else:
self.name_ = 'NN_anch_h' + str(hidden_size) + '_ens' + str(n_ensembles+total_trained)
# variance for step fn, relu, erf
self.b_0_var = b_0_var # first layer bias variance
self.w_0_var = w_0_var # first layer weight variance
# variance for rbf - we use williams 1996 notation
# i.e. node = exp(-(x-u)^2 / 2*var_g)
self.g_var = g_var # param of rbf fn (fixed)
self.u_var = u_var # var of centers, as -> inf, goes to stationary cov dist
# place holders
self.mse_unnorm = 0.
self.rmse = 0.
self.nll = 0.
return
def train(self, X_train, y_train, X_val=None, y_val=None, is_print=True):
''' train an ensemble of NNs '''
# note we use different notation in this file,
# so b_1 is first bias - elsewhere we call this b_0
if self.activation_fn == 'relu' or self.activation_fn == 'softplus' or self.activation_fn == 'Lrelu':
init_stddev_1_w = np.sqrt(self.w_0_var) # /np.sqrt(self.hidden_size)
init_stddev_1_b = np.sqrt(self.b_0_var) # /np.sqrt(self.hidden_size)
init_stddev_2_w = 1.0/np.sqrt(self.hidden_size)#*np.sqrt(10) # 2nd layer init. dist
# init_stddev_2_w = np.sqrt(10.0)/np.sqrt(self.hidden_size)#*np.sqrt(10) # 2nd layer init. dist
lambda_anchor = self.data_noise/(np.array([init_stddev_1_w,init_stddev_1_b,init_stddev_2_w*1])**2)#/X_train.shape[0]
# lambda_anchor = [0.,0.,0.]
elif self.activation_fn == 'tanh' or self.activation_fn == 'erf':
init_stddev_1_w = np.sqrt(self.w_0_var) # 1st layer init. dist for weights
init_stddev_1_b = np.sqrt(self.b_0_var) # for bias
init_stddev_2_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
# lambda_anchor = [0.,0.,0.] # lambda for weight layer 1, bias layer 1, weight layer 2
lambda_anchor = self.data_noise/(np.array([init_stddev_1_w,init_stddev_1_b,init_stddev_2_w])**2)
elif self.activation_fn == 'rbf':
init_stddev_1_w = np.sqrt(self.u_var) # centres = sig_u
init_stddev_1_b = np.sqrt(self.g_var) # fixed /beta
init_stddev_2_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
lambda_anchor = self.data_noise/(np.array([init_stddev_1_w,init_stddev_1_b,init_stddev_2_w])**2)
n = X_train.shape[0]
X_dim = X_train.shape[1]
y_dim = 1 #y_train.shape[1]
# batch_size = n
# --- ensembles w proper anchoring! ---
NNs=[]
y_pred=[]
y_prior=[]
tf.reset_default_graph()
sess = tf.Session()
for ens in range(0,self.n_ensembles):
if is_print:
print('\n\n-- working on ensemble number '+ str(self.total_trained + ens) + ' ---')
else:
print('-- working on ensemble number '+ str(self.total_trained + ens) + ' ---', end='\r')
# create a NN
NNs.append(NN(self.activation_fn, X_dim, y_dim, self.hidden_size,
init_stddev_1_w, init_stddev_1_b, init_stddev_2_w,
self.optimiser_in, n, self.learning_rate, decay_rate=self.decay_rate, drop_out=self.drop_out, deep_NN=self.deep_NN))
# sess.run(tf.global_variables_initializer()) # must do this after NN created
# sess.run(tf.initialize_variables([NNs[ens].layer_1_w.kernel, NNs[ens].layer_1_w.bias, NNs[ens].output_w.kernel]))
# initialise only unitialized variables
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
# set anchoring
if self.deep_NN == False:
NNs[ens].anchor(sess, lambda_anchor, regularise=self.regularise, unconstrain=self.unconstrain)
else:
NNs[ens].anchor_deep(sess, lambda_anchor, regularise=self.regularise, unconstrain=self.unconstrain)
# prior
# if X_val != None:
y_prior.append(NNs[ens].predict(X_val, sess))
# do training
feed = {}
feed[NNs[ens].inputs] = X_train
feed[NNs[ens].y_target] = y_train
# feed[NNs[ens].l_rate_in] = 0.1
# print('\n\nhhhhh\n\n\n')
# print(self.learning_rate)
# if | |
<filename>tests/unit/test_connection.py<gh_stars>0
# -*- coding: utf-8 -*-
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import json
import ssl
import unittest
import mmap
import os
import shutil
import os.path
from mock import patch, call, Mock, ANY
from http.client import HTTPSConnection, BadStatusLine, HTTPException
from hpeOneView.connection import connection
from hpeOneView.exceptions import HPEOneViewException
class ConnectionTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host, 800)
self.accept_language_header = {
'Accept-Language': 'en_US'
}
self.default_headers = {
'X-API-Version': 800,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.default_headers_with_etag_validation_off = {
'X-API-Version': 800,
'Accept': 'application/json',
'Content-Type': 'application/json',
'If-Match': '*'
}
self.merged_headers = {
'X-API-Version': 800,
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept-Language': 'en_US'
}
self.request_body = {"request body": "content"}
self.response_body = {"response body": "content",
"message": "An error occurred."}
self.dumped_request_body = json.dumps(self.request_body.copy())
self.expected_response_body = self.response_body.copy()
def __make_http_response(self, status):
mock_response = Mock(status=status)
mock_response.read.return_value = json.dumps(self.response_body).encode('utf-8')
if status == 200 or status == 202:
mock_response.getheader.return_value = '/task/uri'
return mock_response
def __create_fake_mapped_file(self):
mock_mapped_file = Mock()
mock_mapped_file.tell.side_effect = [0, 1048576, 2097152, 2621440] # 0, 1MB, 2MB 2.5MB
mock_mapped_file.size.return_value = 2621440 # 2.5MB
mock_mapped_file.read.side_effect = ['data chunck 1', 'data chunck 2', 'data chunck 3']
return mock_mapped_file
def __prepare_connection_to_post_multipart(self, response_status=200):
fake_connection = Mock()
fake_connection.getresponse.return_value.read.return_value = json.dumps(self.response_body).encode('utf-8')
fake_connection.getresponse.return_value.status = response_status
self.connection.get_connection = Mock()
self.connection.get_connection.return_value = fake_connection
self.connection._open = Mock()
self.connection._headers['auth'] = '<KEY>'
encode_multipart = "multipart/form-data; boundary=----------ThIs_Is_tHe_bouNdaRY_$"
self.connection.encode_multipart_formdata = Mock()
self.connection.encode_multipart_formdata.return_value = encode_multipart
def test_default_headers(self):
self.assertEqual(self.default_headers, self.connection._headers)
def test_default_headers_when_etag_validation_is_disabled(self):
self.connection.disable_etag_validation()
self.assertEqual(self.default_headers_with_etag_validation_off, self.connection._headers)
def test_default_headers_when_etag_validation_is_enabled(self):
self.connection.enable_etag_validation()
self.assertEqual(self.default_headers, self.connection._headers)
def test_default_headers_when_etag_validation_is_disabled_and_enabled(self):
self.connection.disable_etag_validation()
self.connection.enable_etag_validation()
self.assertEqual(self.default_headers, self.connection._headers)
def test_default_headers_when_etag_validation_is_enabled_and_disabled(self):
self.connection.enable_etag_validation()
self.connection.disable_etag_validation()
self.assertEqual(self.default_headers_with_etag_validation_off, self.connection._headers)
def test_headers_with_api_version_800(self):
self.connection = connection(self.host, 800)
expected_headers = self.default_headers.copy()
expected_headers['X-API-Version'] = 800
self.assertEqual(expected_headers, self.connection._headers)
@patch.object(connection, 'get')
def test_headers_with_default_api_version_800(self, mock_get):
self.connection = connection(self.host)
self.connection._apiVersion = None
mock_get.side_effect = [{'minimumVersion': 400, 'currentVersion': 1800}]
expected_version = self.connection.get_default_api_version()
self.assertEqual(expected_version, 1800)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_when_status_is_202_and_task_contains_taskState(self, mock_response, mock_request):
mock_request.return_value = {}
fake_task = {"taskState": "Completed"}
response = Mock(status=202)
response.read.return_value = json.dumps(fake_task).encode('utf-8')
response.getheader.return_value = ''
mock_response.return_value = response
task, body = self.connection.post('/path', self.request_body)
self.assertEqual(task, fake_task)
self.assertEqual(body, fake_task)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_when_status_is_202_and_response_is_not_a_task(self, mock_response, mock_request):
mock_request.return_value = {}
response = Mock(status=202)
response.read.return_value = json.dumps(self.response_body).encode('utf-8')
response.getheader.return_value = ''
mock_response.return_value = response
task, body = self.connection.post('/path', self.request_body)
self.assertEqual(task, None)
self.assertEqual(body, self.response_body)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.post('/path', self.request_body)
mock_request.assert_called_once_with('POST', '/path', self.dumped_request_body, self.default_headers)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.post('/path', self.request_body)
expected_calls = [call('POST', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.post('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('POST', ANY, ANY, self.merged_headers), ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.post('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(expected_result, result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.post('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.post('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_post_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.post('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.put('/path', self.request_body)
mock_request.assert_called_once_with('PUT', '/path', self.dumped_request_body, self.default_headers)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.put('/path', self.request_body)
expected_calls = [call('PUT', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.put('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('PUT', ANY, ANY, self.merged_headers), ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.put('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.put('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.put('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_put_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.put('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.patch('/path', self.request_body)
mock_request.assert_called_once_with('PATCH', '/path', self.dumped_request_body, self.default_headers)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.patch('/path', self.request_body)
expected_calls = [call('PATCH', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.patch('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('PATCH', ANY, ANY, self.merged_headers), ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.patch('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.patch('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.patch('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.patch('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_do_rest_calls_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.delete('/path')
mock_request.assert_called_once_with('DELETE', '/path', json.dumps({}), self.default_headers)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.delete('/path')
expected_calls = [call('DELETE', '/path', json.dumps({}), self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_calls = [call('DELETE', ANY, ANY, self.merged_headers), ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.delete('/path')
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(HTTPSConnection, 'request')
@patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.delete('/path', self.request_body)
except HPEOneViewException as e:
self.assertEqual(e.oneview_response, self.expected_response_body)
else:
self.fail()
@patch.object(connection, 'do_http')
def test_task_in_response_body_without_202_status(self, mock_do_http):
# create the return values
mockedResponse = type('mockResponse', (), {'status': 200})()
mockedTaskBody = {'category': 'tasks'}
# set-up the mock
mock_do_http.return_value = (mockedResponse, mockedTaskBody)
# call the method we are testing
(testTask, testBody) = self.connection._connection__do_rest_call('PUT', '/rest/test', '{ "body": "test" }',
None)
# verify the result
self.assertEqual(mockedTaskBody, testTask)
self.assertEqual(mockedTaskBody, testBody)
@patch.object(connection, 'do_http')
def test_do_rest_call_with_304_status(self, mock_do_http):
mockedResponse = type('mockResponse', (), {'status': 304})()
mock_do_http.return_value = (mockedResponse, '{ "body": "test" }')
(testTask, testBody) = self.connection._connection__do_rest_call('PUT',
'/rest/test',
'{ "body": "test" }',
None)
self.assertIsNone(testTask)
self.assertEqual(testBody, {"body": "test"})
@patch.object(connection, 'do_http')
def test_do_rest_call_with_304_status_and_invalid_json(self, mock_do_http):
mockedResponse = type('mockResponse', (), {'status': 304})()
mock_do_http.return_value = (mockedResponse, 111)
(testTask, testBody) = self.connection._connection__do_rest_call('PUT',
'/rest/test',
111,
None)
self.assertIsNone(testTask)
self.assertEqual(testBody, 111)
@patch('time.sleep')
@patch.object(connection, 'get_connection')
def test_download_to_stream_when_status_ok(self, mock_get_conn, mock_sleep):
mock_conn = Mock()
# First attempt: Error, second attempt: successful connection
mock_get_conn.side_effect = [BadStatusLine(0), mock_conn]
mock_response = mock_conn.getresponse.return_value
# Stops at the fourth read call
mock_response.read.side_effect = ['111', '222', '333', None]
mock_response.status = 200
mock_stream = Mock()
result = self.connection.download_to_stream(mock_stream, '/rest/download.zip',
custom_headers={'custom': 'custom'})
self.assertTrue(result)
mock_stream.write.assert_has_calls([call('111'), call('222'), call('333')])
@patch.object(connection, 'get_connection')
def test_download_to_stream_handling_of_status_302(self, mock_get_conn):
# Mocking two responses as the first response would be redirect status 302 with header
# | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Stolen and slightly modified from https://github.com/Dinnerbone/mcstatus
"TCP and UDP Connections, both asynchronous and not."
# This version of mcstatus's connection module
# has support for varlongs and has general text reformatting
# and spaceing changes. Slight changes to Asyncronous TCP and UDP
# closing as well.
__all__ = ['BaseConnection', 'Connection', 'AsyncReadConnection',
'TCPSocketConnection', 'UDPSocketConnection',
'TCPAsyncSocketConnection', 'UDPAsyncSocketConnection']
from abc import abstractmethod, ABC
import socket
import struct
import asyncio
from ctypes import c_uint32 as unsigned_int32
from ctypes import c_int32 as signed_int32
from ctypes import c_uint64 as unsigned_int64
from ctypes import c_int64 as signed_int64
import asyncio_dgram
from status.address_tools import ip_type
class BaseConnection(ABC):
"Base connection class."
@abstractmethod
def read(self, length: int) -> bytearray:
"Read length bytes from self, return a bytearray."
...
@abstractmethod
def write(self, data: bytes) -> None:
"Write data to self."
...
def __repr__(self) -> str:
"Return representation of self."
return f'{self.__class__.__name__} Object'
def flush(self) -> None:
"Raise TypeError, unsupported."
raise TypeError(f'{self.__class__.__name__} does not support flush()')
def receive(self, data: None) -> None:
"Raise TypeError, unsupported."
raise TypeError(f'{self.__class__.__name__} does not support receive()')
def remaining(self) -> None:
"Raise TypeError, unsupported."
raise TypeError(f'{self.__class__.__name__} does not support remaining()')
@staticmethod
def _unpack(format_, data: bytes) -> str:
"Unpack data as bytes with format in big-enidian."
return struct.unpack('>' + format_, bytes(data))[0]
@staticmethod
def _pack(format_, data: str) -> bytes:
"Pack data in with format in big-endian mode."
return struct.pack('>' + format_, data)
def read_varint(self) -> int:
"""Read varint from self and return it.
Max: 2 ** 31 - 1, Min: -(2 ** 31)
Raises IOError when varint recieved is too big."""
result = 0
for i in range(5):
part = self.read(1)[0]
result |= (part & 0x7F) << (7 * i)
if not part & 0x80:
return signed_int32(result).value
raise IOError('Recieved varint is too big!')
def write_varint(self, value: int) -> None:
"""Write varint with value value to self.
Max: 2 ** 31 - 1, Min: -(2 ** 31).
Raises ValueError if varint is too big."""
remaining = unsigned_int32(value).value
for _ in range(5):
if not remaining & -0x80:#remaining & ~0x7F == 0:
self.write(struct.pack('!B', remaining))
if value > 2 ** 31 - 1 or value < -(2 ** 31):
break
return
self.write(struct.pack('!B', remaining & 0x7F | 0x80))
remaining >>= 7
raise ValueError(f'The value "{value}" is too big to send in a varint')
def read_varlong(self) -> int:
"""Read varlong from self and return it.
Max: 2 ** 63 - 1, Min: -(2 ** 63).
Raises IOError when varint recieved is too big."""
result = 0
for i in range(10):
part = self.read(1)[0]
result |= (part & 0x7F) << (7 * i)
if not part & 0x80:
return signed_int64(result).value
raise IOError('Recieved varlong is too big!')
def write_varlong(self, value: int) -> None:
"""Write varlong with value value to self.
Max: 2 ** 63 - 1, Min: -(2 ** 63).
Raises ValueError if varint is too big."""
remaining = unsigned_int64(value).value
for _ in range(10):
if not remaining & -0x80:#remaining & ~0x7F == 0:
self.write(struct.pack('!B', remaining))
if value > 2 ** 63 - 1 or value < -(2 ** 31):
break
return
self.write(struct.pack('!B', remaining & 0x7F | 0x80))
remaining >>= 7
raise ValueError(f'The value "{value}" is too big to send in a varlong')
def read_utf(self) -> str:
"Read up to 32767 bytes by reading a varint, then decode bytes as utf8."
length = self.read_varint()
return self.read(length).decode('utf8')
def write_utf(self, value: str) -> None:
"Write varint of length of value up to 32767 bytes, then write value encoded with utf8."
self.write_varint(len(value))
self.write(bytearray(value, 'utf8'))
def read_ascii(self) -> str:
"Read self until last value is not zero, then return that decoded with ISO-8859-1"
result = bytearray()
while len(result) == 0 or result[-1] != 0:
result.extend(self.read(1))
return result[:-1].decode('ISO-8859-1')
def write_ascii(self, value: str) -> None:
"Write value encoded with ISO-8859-1, then write an additional 0x00 at the end."
self.write(bytearray(value, 'ISO-8859-1'))
self.write(bytearray.fromhex('00'))
def read_short(self) -> int:
"""-32768 - 32767.
Read two bytes from self and unpack with format h."""
return self._unpack('h', self.read(2))
def write_short(self, value: int) -> None:
"""-32768 - 32767.
Write value packed with format h."""
self.write(self._pack('h', value))
def read_ushort(self) -> int:
"""0 - 65535.
Read two bytes and return unpacked with format H."""
return self._unpack('H', self.read(2))
def write_ushort(self, value: int) -> None:
"""0 - 65535. Write value packed as format H."""
self.write(self._pack('H', value))
def read_int(self) -> int:
"""0 - something big. Return 4 bytes read and unpacked in format i."""
return self._unpack('i', self.read(4))
def write_int(self, value: int) -> None:
"""0 - something big. Write value packed with format i."""
self.write(self._pack('i', value))
def read_uint(self) -> int:
"""-2147483648 - 2147483647. Read 4 bytes and return unpacked with format I."""
return self._unpack('I', self.read(4))
def write_uint(self, value: int) -> None:
"""-2147483648 - 2147483647. Write value packed with format I."""
self.write(self._pack('I', value))
def read_long(self) -> int:
"""0 - something big. Read 8 bytes and return unpacked with format q."""
return self._unpack('q', self.read(8))
def write_long(self, value: int) -> None:
"""Write value packed with format q."""
self.write(self._pack('q', value))
def read_ulong(self) -> int:
"""-9223372036854775808 - 9223372036854775807.
Read 8 bytes and return them unpacked with format Q."""
return self._unpack('Q', self.read(8))
def write_ulong(self, value: int) -> None:
"""-9223372036854775808 - 9223372036854775807.
Write value packed with format Q."""
self.write(self._pack('Q', value))
def read_buffer(self):
"""Read a varint for length,
then return a new connection from length read bytes."""
length = self.read_varint()
result = Connection()
result.receive(self.read(length))
return result
def write_buffer(self, buffer) -> None:
"""Flush buffer, then write a varint of the length of the buffer's
data, then write buffer data."""
data = buffer.flush()
self.write_varint(len(data))
self.write(data)
class Connection(BaseConnection):
"Base connection class."
def __init__(self):
"Initialize self.send and self.received to an empty bytearray."
self.sent = bytearray()
self.received = bytearray()
def read(self, length: int) -> bytes:
"Return self.recieved up to length bytes, then cut recieved up to that point."
result = self.received[:length]
self.received = self.received[length:]
return result
def write(self, data: bytearray) -> None:
"Extend self.sent from data."
if isinstance(data, Connection):
data = data.flush()
if isinstance(data, str):
data = bytearray(data, 'utf-8')
self.sent.extend(data)
def receive(self, data: bytearray) -> None:
"""Extend self.received with data."""
if not isinstance(data, bytearray):
data = bytearray(data)
self.received.extend(data)
def remaining(self) -> int:
"""Return length of self.received."""
return len(self.received)
def flush(self) -> bytearray:
"""Return self.sent. Clears self.sent."""
result = self.sent
self.sent = bytearray()
return result
class AsyncReadConnection(BaseConnection):
"Asyncronous Read connection base class."
async def read_varint(self) -> int:
result = 0
for i in range(5):
part = (await self.read(1))[0]
result |= (part & 0x7F) << 7 * i
if not part & 0x80:
return signed_int32(result).value
raise IOError('Recieved a varint that was too big!')
async def read_varlong(self) -> int:
result = 0
for i in range(10):
part = (await self.read(1))[0]
result |= (part & 0x7F) << (7 * i)
if not part & 0x80:
return signed_int64(result).value
raise IOError('Recieved varlong is too big!')
async def read_utf(self) -> str:
length = await self.read_varint()
return (await self.read(length)).decode('utf8')
async def read_ascii(self) -> str:
result = bytearray()
while len(result) == 0 or result[-1] != 0:
result.extend(await self.read(1))
return result[:-1].decode('ISO-8859-1')
async def read_short(self) -> int:
return self._unpack('h', await self.read(2))
async def read_ushort(self) -> int:
return self._unpack('H', await self.read(2))
async def read_int(self) -> int:
return self._unpack('i', await self.read(4))
async def read_uint(self) -> int:
return self._unpack('I', await self.read(4))
async def read_long(self) -> int:
return self._unpack('q', await self.read(8))
async def read_ulong(self) -> int:
return self._unpack('Q', await self.read(8))
async def read_buffer(self) -> int:
length = await self.read_varint()
result = Connection()
result.receive(await self.read(length))
return result
class SocketConnection(BaseConnection):
"Socket connection."
__slots__ = ('socket',)
def __init__(self):
self.socket = socket.socket()
def close(self) -> None:
"Close self."
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def __del__(self) -> None:
"Shutdown and Close self.socket."
self.close()
class TCPSocketConnection(SocketConnection):
"TCP Connection to addr. Timeout defaults to 3 secconds."
def __init__(self, addr, timeout=3):
"Create a connection to addr with self.socket, set TCP NODELAY to True."
self.socket = socket.create_connection(addr, timeout=timeout)
| |
'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_pod`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'export' in params:
query_params['export'] = params['export']
if 'exact' in params:
query_params['exact'] = params['exact']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def replace_namespaced_pod(self, body, namespace, name, **kwargs):
"""
replace the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_namespaced_pod(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1Pod body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_pod" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_pod`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_pod`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_namespaced_pod(self, body, namespace, name, **kwargs):
"""
delete a Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_pod(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1DeleteOptions body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_pod" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_pod`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_pod`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_namespaced_pod(self, body, namespace, name, **kwargs):
"""
partially update the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UnversionedPatch body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_pod" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_pod`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_pod`")
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def connect_get_namespaced_pod_attach(self, namespace, name, **kwargs):
"""
connect GET requests to attach of Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.connect_get_namespaced_pod_attach(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Pod (required)
:param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
:param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty | |
Inverse
* ARMIDO <NAME> and <NAME>, JR.
* ACM Transactions on Mathematical Software, Vol. 12, No. 4,
* December 1986, Pages 377-393.
*
* See equation 32.
*/
double s, t;
double a[4] = {0.213623493715853, 4.28342155967104,
11.6616720288968, 3.31125922108741};
double b[5] = {0.3611708101884203e-1, 1.27364489782223,
6.40691597760039, 6.61053765625462, 1};
if (p < 0.5) {
t = sqrt(-2 * log(p));
}
else {
t = sqrt(-2 * log(q));
}
s = t - polevl<3>(t, a) / polevl<4>(t, b);
if(p < 0.5)
s = -s;
return s;
}
__device__ double didonato_SN(double a, double x, unsigned N, double tolerance)
{
/*
* Computation of the Incomplete Gamma Function Ratios and their Inverse
* ARMID<NAME> and <NAME>, JR.
* ACM Transactions on Mathematical Software, Vol. 12, No. 4,
* December 1986, Pages 377-393.
*
* See equation 34.
*/
double sum = 1.0;
if (N >= 1) {
unsigned i;
double partial = x / (a + 1);
sum += partial;
for(i = 2; i <= N; ++i) {
partial *= x / (a + i);
sum += partial;
if(partial < tolerance) {
break;
}
}
}
return sum;
}
__device__ double Gamma(double in0)
{
double out0;
if (isinf(in0) && in0 < 0) {
out0 = -1.0 / 0.0;
} else if (in0 < 0. && in0 == floor(in0)) {
out0 = 1.0 / 0.0;
} else {
out0 = tgamma(in0);
}
return out0;
}
__device__ double find_inverse_gamma(double a, double p, double q)
{
/*
* In order to understand what's going on here, you will
* need to refer to:
*
* Computation of the Incomplete Gamma Function Ratios and their Inverse
* ARMID<NAME> and <NAME>, JR.
* ACM Transactions on Mathematical Software, Vol. 12, No. 4,
* December 1986, Pages 377-393.
*/
double result;
if (a == 1) {
if (q > 0.9) {
result = -log1p(-p);
}
else {
result = -log(q);
}
}
else if (a < 1) {
double g = Gamma(a);
double b = q * g;
if ((b > 0.6) || ((b >= 0.45) && (a >= 0.3))) {
/* DiDonato & Morris Eq 21:
*
* There is a slight variation from DiDonato and Morris here:
* the first form given here is unstable when p is close to 1,
* making it impossible to compute the inverse of Q(a,x) for small
* q. Fortunately the second form works perfectly well in this case.
*/
double u;
if((b * q > 1e-8) && (q > 1e-5)) {
u = pow(p * g * a, 1 / a);
}
else {
u = exp((-q / a) - NPY_EULER);
}
result = u / (1 - (u / (a + 1)));
}
else if ((a < 0.3) && (b >= 0.35)) {
/* DiDonato & Morris Eq 22: */
double t = exp(-NPY_EULER - b);
double u = t * exp(t);
result = t * exp(u);
}
else if ((b > 0.15) || (a >= 0.3)) {
/* DiDonato & Morris Eq 23: */
double y = -log(b);
double u = y - (1 - a) * log(y);
result = y - (1 - a) * log(u) - log(1 + (1 - a) / (1 + u));
}
else if (b > 0.1) {
/* DiDonato & Morris Eq 24: */
double y = -log(b);
double u = y - (1 - a) * log(y);
result = y - (1 - a) * log(u)
- log((u * u + 2 * (3 - a) * u + (2 - a) * (3 - a))
/ (u * u + (5 - a) * u + 2));
}
else {
/* DiDonato & Morris Eq 25: */
double y = -log(b);
double c1 = (a - 1) * log(y);
double c1_2 = c1 * c1;
double c1_3 = c1_2 * c1;
double c1_4 = c1_2 * c1_2;
double a_2 = a * a;
double a_3 = a_2 * a;
double c2 = (a - 1) * (1 + c1);
double c3 = (a - 1) * (-(c1_2 / 2)
+ (a - 2) * c1
+ (3 * a - 5) / 2);
double c4 = (a - 1) * ((c1_3 / 3) - (3 * a - 5) * c1_2 / 2
+ (a_2 - 6 * a + 7) * c1
+ (11 * a_2 - 46 * a + 47) / 6);
double c5 = (a - 1)
* (-(c1_4 / 4)
+ (11 * a - 17) * c1_3 / 6
+ (-3 * a_2 + 13 * a -13) * c1_2
+ (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2
+ (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12);
double y_2 = y * y;
double y_3 = y_2 * y;
double y_4 = y_2 * y_2;
result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3) + (c5 / y_4);
}
}
else {
/* DiDonato and Morris Eq 31: */
double s = find_inverse_s(p, q);
double s_2 = s * s;
double s_3 = s_2 * s;
double s_4 = s_2 * s_2;
double s_5 = s_4 * s;
double ra = sqrt(a);
double w = a + s * ra + (s_2 - 1) / 3;
w += (s_3 - 7 * s) / (36 * ra);
w -= (3 * s_4 + 7 * s_2 - 16) / (810 * a);
w += (9 * s_5 + 256 * s_3 - 433 * s) / (38880 * a * ra);
if ((a >= 500) && (fabs(1 - w / a) < 1e-6)) {
result = w;
}
else if (p > 0.5) {
if (w < 3 * a) {
result = w;
}
else {
double D = fmax(2, a * (a - 1));
double lg = lgam(a);
double lb = log(q) + lg;
if (lb < -D * 2.3) {
/* DiDonato and Morris Eq 25: */
double y = -lb;
double c1 = (a - 1) * log(y);
double c1_2 = c1 * c1;
double c1_3 = c1_2 * c1;
double c1_4 = c1_2 * c1_2;
double a_2 = a * a;
double a_3 = a_2 * a;
double c2 = (a - 1) * (1 + c1);
double c3 = (a - 1) * (-(c1_2 / 2)
+ (a - 2) * c1
+ (3 * a - 5) / 2);
double c4 = (a - 1) * ((c1_3 / 3)
- (3 * a - 5) * c1_2 / 2
+ (a_2 - 6 * a + 7) * c1
+ (11 * a_2 - 46 * a + 47) / 6);
double c5 = (a - 1) * (-(c1_4 / 4)
+ (11 * a - 17) * c1_3 / 6
+ (-3 * a_2 + 13 * a -13) * c1_2
+ (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2
+ (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12);
double y_2 = y * y;
double y_3 = y_2 * y;
double y_4 = y_2 * y_2;
result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3)
+ (c5 / y_4);
}
else {
/* DiDonato and Morris Eq 33: */
double u = -lb + (a - 1) * log(w)
- log(1 + (1 - a) / (1 + w));
result = -lb + (a - 1) * log(u)
- log(1 + (1 - a) / (1 + u));
}
}
}
else {
double z = w;
double ap1 = a + 1;
double ap2 = a | |
from unittest import mock, skipUnless
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH,
BasePasswordHasher, BCryptPasswordHasher, BCryptSHA256PasswordHasher,
MD5PasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher, SHA1PasswordHasher, check_password, get_hasher,
identify_hasher, is_password_usable, make_password,
)
from django.test import SimpleTestCase
from django.test.utils import override_settings
try:
import crypt
except ImportError:
crypt = None
else:
# On some platforms (e.g. OpenBSD), crypt.crypt() always return None.
if crypt.crypt('') is None:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
try:
import argon2
except ImportError:
argon2 = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPass(SimpleTestCase):
def test_simple(self):
encoded = make_password('<PASSWORD>')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_bytes(self):
encoded = make_password(b'<PASSWORD>')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertIs(is_password_usable(encoded), True)
self.assertIs(check_password(b'bytes_password', encoded), True)
def test_invalid_password(self):
msg = 'Password must be a string or bytes, got int.'
with self.assertRaisesMessage(TypeError, msg):
make_password(1)
def test_pbkdf2(self):
encoded = make_password('<PASSWORD>', '<PASSWORD>', '<PASSWORD>')
self.assertEqual(encoded, 'pbkdf2_sha256$320000$seasalt$Toj2II2rBvFiGQcPmUml1Nlni2UtvyRWwz/jz4q6q/4=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', '<PASSWORD>', '<PASSWORD>')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
# Salt entropy check.
hasher = get_hasher('pbkdf2_sha256')
encoded_weak_salt = make_password('<PASSWORD>', '<PASSWORD>', '<PASSWORD>')
encoded_strong_salt = make_password('<PASSWORD>', hasher.salt(), 'pbkdf2_sha256')
self.assertIs(hasher.must_update(encoded_weak_salt), True)
self.assertIs(hasher.must_update(encoded_strong_salt), False)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
def test_sha1(self):
encoded = make_password('<PASSWORD>', 'seasalt', 'sha1')
self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
# Salt entropy check.
hasher = get_hasher('sha1')
encoded_weak_salt = make_password('<PASSWORD>', 'iod<PASSWORD>', 'sha1')
encoded_strong_salt = make_password('<PASSWORD>', hasher.salt(), 'sha1')
self.assertIs(hasher.must_update(encoded_weak_salt), True)
self.assertIs(hasher.must_update(encoded_strong_salt), False)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher'])
def test_md5(self):
encoded = make_password('<PASSWORD>', 'seasalt', 'md5')
self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', '<PASSWORD>alt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
# Salt entropy check.
hasher = get_hasher('md5')
encoded_weak_salt = make_password('<PASSWORD>', 'iod<PASSWORD>', 'md5')
encoded_strong_salt = make_password('<PASSWORD>', hasher.salt(), 'md5')
self.assertIs(hasher.must_update(encoded_weak_salt), True)
self.assertIs(hasher.must_update(encoded_strong_salt), False)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher'])
def test_unsalted_md5(self):
encoded = make_password('<PASSWORD>', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca<PASSWORD>4cd<PASSWORD>3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('<PASSWORD>', alt_encoded))
self.assertFalse(check_password('<PASSWORD>', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', '<PASSWORD>')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher'])
def test_unsalted_sha1(self):
encoded = make_password('<PASSWORD>', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('<PASSWORD>', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher'])
def test_crypt(self):
encoded = make_password('<PASSWORD>', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
self.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('<PASSWORD>', hasher='bcrypt_<PASSWORD>')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# password truncation no longer works
password = (
'<PASSWORD>'
'<PASSWORD>'
)
encoded = make_password(password, hasher='<PASSWORD>')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='<PASSWORD>')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher'])
def test_bcrypt(self):
encoded = make_password('<PASSWORD>', hasher='bcrypt')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt$'))
self.assertTrue(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt')
self.assertTrue(blank_encoded.startswith('bcrypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher'])
def test_bcrypt_upgrade(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
self.assertNotEqual(hasher.rounds, 4)
old_rounds = hasher.rounds
try:
# Generate a password with 4 rounds.
hasher.rounds = 4
encoded = make_password('<PASSWORD>', hasher='bcrypt')
rounds = hasher.safe_summary(encoded)['work factor']
self.assertEqual(rounds, 4)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered.
self.assertTrue(check_password('<PASSWORD>', encoded, setter, 'bcrypt'))
self.assertFalse(state['upgraded'])
# Revert to the old rounds count and ...
hasher.rounds = old_rounds
# ... check if the password would get updated to the new count.
self.assertTrue(check_password('<PASSWORD>', encoded, setter, 'bcrypt'))
self.assertTrue(state['upgraded'])
finally:
hasher.rounds = old_rounds
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher'])
def test_bcrypt_harden_runtime(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
with mock.patch.object(hasher, 'rounds', 4):
encoded = make_password('<PASSWORD>', hasher='bcrypt')
with mock.patch.object(hasher, 'rounds', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Increasing rounds from 4 to 6 means an increase of 4 in workload,
# therefore hardening should run 3 times to make the timing the
# same (the original encode() call already ran once).
self.assertEqual(hasher.encode.call_count, 3)
# Get the original salt (includes the original workload factor)
algorithm, data = encoded.split('$', 1)
expected_call = (('wrong_password', data[:29].encode()),)
self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3)
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password('', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
self.assertFalse(check_password('<PASSWORD>', encoded))
with self.assertRaisesMessage(ValueError, 'Unknown password hashing algorith'):
identify_hasher(encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password('<PASSWORD>')))
def test_bad_algorithm(self):
msg = (
"Unknown password hashing algorithm '%s'. Did you specify it in "
"the PASSWORD_HASHERS setting?"
)
with self.assertRaisesMessage(ValueError, msg % 'lolcat'):
make_password('<PASSWORD>', hasher='lolcat')
with self.assertRaisesMessage(ValueError, msg % 'lolcat'):
identify_hasher('lolcat$salt$hash')
def test_is_password_usable(self):
passwords = ('<PASSWORD>', '', None)
for password in passwords:
with self.subTest(password=password):
self.assertIs(is_password_usable(password), True)
def test_low_level_pbkdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha256$320000$seasalt2$BRr4pYNIQDsLFP+u4dzjs7pFuWJEin4lFMMoO9wBYvo=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha1$320000$seasalt2$sDOkTvzV93jPWTRVxFGh50Jefo0=')
self.assertTrue(hasher.verify('lètmein', encoded))
@skipUnless(bcrypt, 'bcrypt not installed')
def test_bcrypt_salt_check(self):
hasher = BCryptPasswordHasher()
encoded = hasher.encode('lètmein', hasher.salt())
self.assertIs(hasher.must_update(encoded), False)
@skipUnless(bcrypt, 'bcrypt not installed')
def test_bcryptsha256_salt_check(self):
hasher = BCryptSHA256PasswordHasher()
encoded = hasher.encode('lètmein', hasher.salt())
self.assertIs(hasher.must_update(encoded), False)
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
with self.subTest(algo=algo):
encoded = make_password('<PASSWORD>', hasher=algo)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
self.assertTrue(check_password('<PASSWORD>', encoded, setter))
self.assertTrue(state['upgraded'])
def test_no_upgrade(self):
encoded = make_password('<PASSWORD>')
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
with self.subTest(algo=algo):
encoded = make_password('<PASSWORD>', hasher=algo)
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_pbkdf2_upgrade(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password('<PASSWORD>')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered
self.assertTrue(check_password('<PASSWORD>', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration count.
self.assertTrue(check_password('<PASSWORD>', encoded, setter))
self.assertTrue(state['upgraded'])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_harden_runtime(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
with mock.patch.object(hasher, 'iterations', 1):
encoded = make_password('<PASSWORD>')
with mock.patch.object(hasher, 'iterations', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Encode should get called once ...
self.assertEqual(hasher.encode.call_count, 1)
# ... with the original salt and 5 iterations.
algorithm, iterations, salt, hash = encoded.split('$', 3)
expected_call = (('wrong_password', salt, 5),)
self.assertEqual(hasher.encode.call_args, expected_call)
def test_pbkdf2_upgrade_new_hasher(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
with self.settings(PASSWORD_HASHERS=[
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
encoded = make_password('<PASSWORD>')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
# No upgrade is triggered
self.assertTrue(check_password('<PASSWORD>', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
self.assertTrue(check_password('<PASSWORD>', encoded, setter))
self.assertTrue(state['upgraded'])
def test_check_password_calls_harden_runtime(self):
hasher = get_hasher('default')
encoded = make_password('<PASSWORD>')
with mock.patch.object(hasher, 'harden_runtime'), \
mock.patch.object(hasher, 'must_update', return_value=True):
# Correct password supplied, no hardening needed
check_password('<PASSWORD>', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 0)
# Wrong password supplied, hardening needed
check_password('wrong_password', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 1)
def test_encode_invalid_salt(self):
hasher_classes = [
MD5PasswordHasher,
PBKDF2PasswordHasher,
PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher,
SHA1PasswordHasher,
]
msg = 'salt must be provided and cannot contain $.'
for hasher_class in hasher_classes:
hasher = hasher_class()
for salt in [None, '', 'sea$salt']:
with self.subTest(hasher_class.__name__, salt=salt):
with self.assertRaisesMessage(ValueError, msg):
hasher.encode('password', salt)
def test_encode_password_required(self):
hasher_classes = [
MD5PasswordHasher,
PBKDF2PasswordHasher,
PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher,
SHA1PasswordHasher,
]
msg = 'password must be provided.'
for hasher_class in hasher_classes:
hasher = hasher_class()
with self.subTest(hasher_class.__name__):
with self.assertRaisesMessage(TypeError, msg):
hasher.encode(None, 'seasalt')
class BasePasswordHasherTests(SimpleTestCase):
not_implemented_msg = 'subclasses of BasePasswordHasher must provide %s() method'
def setUp(self):
self.hasher = BasePasswordHasher()
def | |
:func:`cr_uid`,
:func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`,
:func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method.
Method calls are considered traditional style when their first parameter
is a database cursor.
"""
if hasattr(method, '_api'):
return method
# introspection on argument names to determine api style
args, vname, kwname, defaults = getargspec(method)
names = tuple(args) + (None,) * 4
if names[0] == 'self':
if names[1] in ('cr', 'cursor'):
if names[2] in ('uid', 'user'):
if names[3] == 'ids':
if 'context' in names or kwname:
return cr_uid_ids_context(method)
else:
return cr_uid_ids(method)
elif names[3] == 'id' or names[3] == 'res_id':
if 'context' in names or kwname:
return cr_uid_id_context(method)
else:
return cr_uid_id(method)
elif 'context' in names or kwname:
return cr_uid_context(method)
else:
return cr_uid(method)
elif 'context' in names:
return cr_context(method)
else:
return cr(method)
# no wrapping by default
return noguess(method)
def expected(decorator, func):
""" Decorate ``func`` with ``decorator`` if ``func`` is not wrapped yet. """
return decorator(func) if not hasattr(func, '_api') else func
def _call_kw_model(method, self, args, kwargs):
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def _call_kw_model_create(method, self, args, kwargs):
# special case for method 'create'
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return result.id if isinstance(args[0], Mapping) else result.ids
def _call_kw_multi(method, self, args, kwargs):
ids, args = args[0], args[1:]
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {}).browse(ids)
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def call_kw(model, name, args, kwargs):
""" Invoke the given method ``name`` on the recordset ``model``. """
method = getattr(type(model), name)
api = getattr(method, '_api', None)
if api == 'model':
return _call_kw_model(method, model, args, kwargs)
elif api == 'model_create':
return _call_kw_model_create(method, model, args, kwargs)
else:
return _call_kw_multi(method, model, args, kwargs)
class Environment(Mapping):
""" An environment wraps data for ORM records:
- :attr:`cr`, the current database cursor;
- :attr:`uid`, the current user id;
- :attr:`context`, the current context dictionary.
It provides access to the registry by implementing a mapping from model
names to new api models. It also holds a cache for records, and a data
structure to manage recomputations.
"""
_local = Local()
@classproperty
def envs(cls):
return cls._local.environments
@classmethod
@contextmanager
def manage(cls):
""" Context manager for a set of environments. """
if hasattr(cls._local, 'environments'):
yield
else:
try:
cls._local.environments = Environments()
yield
finally:
release_local(cls._local)
@classmethod
def reset(cls):
""" Clear the set of environments.
This may be useful when recreating a registry inside a transaction.
"""
cls._local.environments = Environments()
def __new__(cls, cr, uid, context):
assert context is not None
args = (cr, uid, context)
# if env already exists, return it
env, envs = None, cls.envs
for env in envs:
if env.args == args:
return env
# otherwise create environment, and add it in the set
self = object.__new__(cls)
self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context))
self.registry = Registry(cr.dbname)
self.cache = envs.cache
self._cache_key = (cr, uid)
self._protected = StackMap() # {field: ids, ...}
self.dirty = defaultdict(set) # {record: set(field_name), ...}
self.all = envs
envs.add(self)
return self
#
# Mapping methods
#
def __contains__(self, model_name):
""" Test whether the given model exists. """
return model_name in self.registry
def __getitem__(self, model_name):
""" Return an empty recordset from the given model. """
return self.registry[model_name]._browse((), self)
def __iter__(self):
""" Return an iterator on model names. """
return iter(self.registry)
def __len__(self):
""" Return the size of the model registry. """
return len(self.registry)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return object.__hash__(self)
def __call__(self, cr=None, user=None, context=None):
""" Return an environment based on ``self`` with modified parameters.
:param cr: optional database cursor to change the current cursor
:param user: optional user/user id to change the current user
:param context: optional context dictionary to change the current context
"""
cr = self.cr if cr is None else cr
uid = self.uid if user is None else int(user)
context = self.context if context is None else context
return Environment(cr, uid, context)
def ref(self, xml_id, raise_if_not_found=True):
""" return the record corresponding to the given ``xml_id`` """
return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found)
@property
def user(self):
""" return the current user (as an instance) """
return self(user=SUPERUSER_ID)['res.users'].browse(self.uid)
@property
def lang(self):
""" return the current language code """
return self.context.get('lang')
@contextmanager
def _do_in_mode(self, mode):
if self.all.mode:
yield
else:
try:
self.all.mode = mode
yield
finally:
self.all.mode = False
self.dirty.clear()
def do_in_draft(self):
""" Context-switch to draft mode, where all field updates are done in
cache only.
"""
return self._do_in_mode(True)
@property
def in_draft(self):
""" Return whether we are in draft mode. """
return bool(self.all.mode)
def do_in_onchange(self):
""" Context-switch to 'onchange' draft mode, which is a specialized
draft mode used during execution of onchange methods.
"""
return self._do_in_mode('onchange')
@property
def in_onchange(self):
""" Return whether we are in 'onchange' draft mode. """
return self.all.mode == 'onchange'
def clear(self):
""" Clear all record caches, and discard all fields to recompute.
This may be useful when recovering from a failed ORM operation.
"""
self.cache.invalidate()
self.all.todo.clear()
@contextmanager
def clear_upon_failure(self):
""" Context manager that clears the environments (caches and fields to
recompute) upon exception.
"""
try:
yield
except Exception:
self.clear()
raise
def protected(self, field):
""" Return the recordset for which ``field`` should not be invalidated or recomputed. """
return self[field.model_name].browse(self._protected.get(field, ()))
@contextmanager
def protecting(self, what, records=None):
""" Prevent the invalidation or recomputation of fields on records.
The parameters are either:
- ``what`` a collection of fields and ``records`` a recordset, or
- ``what`` a collection of pairs ``(fields, records)``.
"""
protected = self._protected
try:
protected.pushmap()
what = what if records is None else [(what, records)]
for fields, records in what:
for field in fields:
ids = protected.get(field, frozenset())
protected[field] = ids.union(records._ids)
yield
finally:
protected.popmap()
def field_todo(self, field):
""" Return a recordset with all records to recompute for ``field``. """
ids = {rid for recs in self.all.todo.get(field, ()) for rid in recs.ids}
return self[field.model_name].browse(ids)
def check_todo(self, field, record):
""" Check whether ``field`` must be recomputed on ``record``, and if so,
return the corresponding recordset to recompute.
"""
for recs in self.all.todo.get(field, []):
if recs & record:
return recs
def add_todo(self, field, records):
""" Mark ``field`` to be recomputed on ``records``. """
recs_list = self.all.todo.setdefault(field, [])
for i, recs in enumerate(recs_list):
if recs.env == records.env:
# only add records if not already in the recordset, much much
# cheaper in case recs is big and records is a singleton
# already present
if not records <= recs:
recs_list[i] |= records
break
else:
recs_list.append(records)
def remove_todo(self, field, records):
""" Mark ``field`` as recomputed on ``records``. """
recs_list = [recs - records for recs in self.all.todo.pop(field, [])]
recs_list = [r for r in recs_list if r]
if recs_list:
self.all.todo[field] = recs_list
def has_todo(self):
""" Return whether some fields must be recomputed. """
return bool(self.all.todo)
def get_todo(self):
""" Return a pair ``(field, records)`` to recompute.
The field is such that none of its dependencies must be recomputed.
"""
field = min(self.all.todo, key=self.registry.field_sequence)
return field, self.all.todo[field][0]
@property
def recompute(self):
return self.all.recompute
@contextmanager
def norecompute(self):
tmp = self.all.recompute
self.all.recompute = False
try:
yield
finally:
self.all.recompute = tmp
def cache_key(self, field):
""" Return the key to store the value of ``field`` in cache, the full
cache key being ``(key, field, record.id)``.
"""
return self if field.context_dependent else self._cache_key
class Environments(object):
""" A common object for all environments in a request. """
def __init__(self):
self.envs = WeakSet() # weak set of environments
self.cache = Cache() # cache for all records
self.todo = {} # recomputations {field: [records]}
self.mode = False # flag for draft/onchange
self.recompute = True
def add(self, env):
""" Add the environment ``env``. """
self.envs.add(env)
def __iter__(self):
""" Iterate over environments. """
return iter(self.envs)
class Cache(object):
""" Implementation of the cache of records. """
| |
import json
from math import atan2, pi, hypot
from typing import Union, List
import os
from os.path import exists as _exists
import subprocess
import numpy as np
import utm
from osgeo import gdal, osr, ogr
from wepppy.all_your_base import isfloat
from wepppy.all_your_base.geo import get_utm_zone, utm_srid
from .wepp_top_translator import WeppTopTranslator
gdal.UseExceptions()
def is_channel(topaz_id: Union[int, str]) -> bool:
return str(topaz_id).endswith('4')
def garbrecht_length(distances: List[List[float]]) -> float:
"""
calculates the length of a subcatchment from the flowpaths
contained within the subcatchment. The length is weighted_flowpaths
by the flowpaths length relative to its area.
distances should be an array of distances between cells along the
flowpath (not cumulative distance)
eq. 3.4 in Thomas Cochrane's Dissertation
# """
x = np.array([np.sum(d) for d in distances])
a = np.array([len(d) for d in distances], dtype=np.float64)
return float(np.sum(x * a) / np.sum(a))
def cummnorm_distance(distance: List[float]) -> np.array:
"""
builds and returns cumulative normalized distance array from an array
of cell-to-cell distances
"""
assert len(distance) > 0
if len(distance) == 1:
assert distance[0] > 0.0
return np.array([0, 1])
distance_p = np.cumsum(np.array(distance, np.float64))
distance_p -= distance_p[0]
distance_p /= distance_p[-1]
return distance_p
def representative_normalized_elevations(x: List[float], dy: List[float]) -> List[float]:
"""
x should be a normed distance array between 0 and 1
dy is an array of slopes
returns normalized elevations (relative to the length of x)
"""
assert len(x) == len(dy), (x, dy)
assert x[0] == 0.0
assert x[-1] == 1.0
# calculate the positions, assume top of hillslope is 0 y
y = [0.0]
for i in range(len(dy) - 1):
step = x[i+1] - x[i]
y.append(y[-1] - step * dy[i])
return y
def read_geojson(fname):
data = json.loads(open(fname).read())
d = {}
for feature in data['features']:
top = feature['properties']['TopazID']
d[top] = np.array(feature['geometry']['coordinates'][0])
return d
def interpolate_slp(distances, slopes, max_points):
_s = np.array(slopes)
_d = np.array(distances)
if _s.shape == (1,) and _d.shape == (2,): # slope is a single cell
_s = np.array([slopes[0], slopes[0]])
assert _s.shape == _d.shape, str([_s.shape, _d.shape])
for i in range(len(_d)-1):
assert _d[i] < _d[i+1], distances
assert _d[0] == 0.0
assert _d[-1] == 1.0
npts = len(_d)
# interpolate if there are too many slope points
if npts > max_points:
_d2 = np.linspace(0.0, 1.0, max_points)
_s2 = np.interp(_d2, _d, _s)
_d, _s = _d2, _s2
return _d, _s
def write_slp(aspect, width, cellsize, length, slope, distance_p, fp, version=97.3, max_points=19):
"""
writes a slope file in the 97.3 format for WEPP
"""
assert isfloat(aspect)
assert isfloat(width)
assert isfloat(length)
assert isfloat(cellsize)
_d, _s = distance_p, slope
nofes = 1
npts = len(_d)
if npts > max_points:
_d, _s = interpolate_slp(distance_p, slope, max_points)
npts = len(_d)
if version == 97.3:
_slp = '97.3\n{nofes}\n{aspect} {width}\n{npts} {length}\n{defs} '
defs = ' '.join(['%0.4f, %0.5f' % ds for ds in zip(_d, _s)])
fp.write(_slp.format(nofes=nofes, aspect=aspect, width=width,
npts=npts, length=length, defs=defs))
else:
_slp = '{aspect} {cellsize}\n{npts} {length}\n{defs} \n'
defs = ' '.join(['%0.4f %0.5f' % ds for ds in zip(_d, _s)])
fp.write(_slp.format(nofes=nofes, aspect=aspect, cellsize=float(cellsize),
npts=npts, length=length, defs=defs))
def identify_subflows(flowpaths: List[np.array]) -> List[List[int]]:
"""
given an ndarray of flowpaths flowpath (n, 2) for a subcatchment
identify which flowpaths travel over the same path down the
hillslope. These are grouped into subflows of flowpath indices
"""
# if there is only 1 flowpath then there are no subflows
if len(flowpaths) == 1:
return [[0]]
# create a dictionary of px coord tuples so we can point back to the
# index of the flowpaths list. This is aligned
# with slopes and distance.
fps_d = {}
for k, fp in enumerate(flowpaths):
fps_d[k] = list(set((fp[j, 0], fp[j, 1]) for j in range(fp.shape[0])))
# here we create a list of tuples containing
# the flow indices and the length of the flows
# we want to iterate through the flows in
# descending flow length
lns = [(k, len(v)) for k, v in fps_d.items()]
# build an empty list to populate subflows
subflows = []
# iterate over the flowpaths. If a subflow is identified as
# a subflow then add it to the subflows index list
# otherwise create a new subflow list
for i, n in sorted(lns, key=lambda y: y[1], reverse=True):
# unpack the set of pixel coords for the subflow
fp0 = fps_d[i]
# this gets set to 1 if fp0 is identified as a subflow
issub = 0
for j, fp1_indx_arr in enumerate(subflows):
# because we are iterating in descending order the
# first index of each subflow index list will
# be the longest
fp1 = fps_d[fp1_indx_arr[0]]
# is fp0 a subset of fp1
if fp0 <= fp1:
subflows[j].append(i)
issub = 1
break
# fp0 is not a subset so create a new subflow list
if not issub:
subflows.append([i])
return subflows
def weighted_slope_average(areas, slopes, lengths, max_points=19):
"""
calculates weighted slopes based on the flowpaths contained on the hillslope
eq. 3.3 in Thomas Cochrane's Dissertation
"""
# determine longest flowpath
i = int(np.argmax(lengths))
longest = float(lengths[i])
# determine number of points to define slope
num_points = len(lengths)
if num_points > max_points:
num_points = max_points
if num_points == 1:
slope = float(slopes[i])
return [slope, slope], [0.0, 1.0]
kps = np.array([L * a for L, a in zip(lengths, areas)])
kpsum = np.sum(kps)
eps = []
for slp, length, kp in zip(slopes, lengths, kps):
eps.append((slp * kp) / kpsum)
# build an array with equally spaced points to interpolate on
distance_p = np.linspace(0, longest, num_points)
w_slopes = np.interp(distance_p, lengths, eps)
# normalize distance_p array
distance_p /= longest
return w_slopes.flatten().tolist(), distance_p.tolist()
def compute_direction(head: List, tail: List) -> float:
a = atan2(tail[1] - head[1],
head[0] - tail[0]) * (180.0 / pi) - 180.0
if a < 0:
return a + 360.0
return a
slope_template = """\
{aspect} {profile_width}
{num_points} {length}
{profile}"""
colordict = {
0: "#787878",
"n": ["#9afb0c", "#9ddd5e", "#9fc085"],
"ne": ["#00ad43", "#3dab71", "#72a890"],
"e": ["#0068c0", "#5078b6", "#7c8ead"],
"se": ["#6c00a3", "#77479d", "#8c75a0"],
"s": ["#ca009c", "#c04d9c", "#b47ba1"],
"sw": ["#ff5568", "#e76f7a", "#cb8b8f"],
"w": ["#ffab47", "#e2a66c", "#c5a58a"],
"nw": ["#f4fa00", "#d6db5e", "#bdbf89"]
}
c_slope_breaks = [0.05, 0.15, 0.30]
def slp_asp_color(slope: float, aspect: float) -> str:
aspect %= 360.0
i = 0
for j, brk in enumerate(c_slope_breaks):
if slope > brk:
i = j + 1
if i == 0:
return colordict[0]
cat_asp = "n"
if 22.5 < aspect < 67.5:
cat_asp = "ne"
elif aspect < 112.5:
cat_asp = "e"
elif aspect < 157.5:
cat_asp = "se"
elif aspect < 202.5:
cat_asp = "s"
elif aspect < 247.5:
cat_asp = "sw"
elif aspect < 292.5:
cat_asp = "w"
elif aspect < 337.5:
cat_asp = "nw"
return colordict[cat_asp][i-1]
def rect_to_polar(d):
point = d['point']
origin = d['origin']
refvec = d['refvec']
# Vector between point and the origin: v = p - o
vector = [point[0] - origin[0], point[1] - origin[1]]
# Length of vector: ||v||
lenvector = hypot(vector[0], vector[1])
# If length is zero there is no angle
if lenvector == 0:
return -pi
normalized = [vector[0]/lenvector, vector[1]/lenvector]
dotprod = normalized[0] * refvec[0] + normalized[1] * refvec[1]
diffprod = refvec[1] * normalized[0] - refvec[0] * normalized[1]
angle = atan2(diffprod, dotprod)
angle %= 2 * pi
return angle
def json_to_wgs(src_fn):
assert _exists(src_fn)
dst_wgs_fn = src_fn.split('.')
dst_wgs_fn.insert(-1, 'WGS')
dst_wgs_fn = '.'.join(dst_wgs_fn)
if _exists(dst_wgs_fn):
os.remove(dst_wgs_fn)
cmd = ['ogr2ogr', '-f', 'GeoJSON', '-t_srs', 'EPSG:4326', dst_wgs_fn, src_fn]
# run command, check_output returns standard output
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout \
.read() \
.decode('utf-8')
if not _exists(dst_wgs_fn):
print(' '.join(cmd))
raise Exception(output)
def polygonize_netful(src_fn, dst_fn):
assert _exists(src_fn)
src_ds = gdal.Open(src_fn)
srcband = src_ds.GetRasterBand(1)
drv = ogr.GetDriverByName("GeoJSON")
dst_ds = drv.CreateDataSource(dst_fn)
srs = osr.SpatialReference()
srs.ImportFromWkt(src_ds.GetProjectionRef())
datum, utm_zone, hemisphere = get_utm_zone(srs)
epsg = utm_srid(utm_zone, hemisphere == 'N')
dst_layer = dst_ds.CreateLayer("NETFUL", srs=srs)
dst_fieldname = 'TopazID'
fd = ogr.FieldDefn(dst_fieldname, ogr.OFTInteger)
dst_layer.CreateField(fd)
dst_field = 0
prog_func = None
gdal.Polygonize(srcband, None, dst_layer, dst_field, [],
callback=prog_func)
del src_ds
del dst_ds
# remove the TopazID = 0 feature defining a bounding box
# and the channels
with open(dst_fn) as fp:
js = json.load(fp)
if "crs" not in js:
js["crs"] = {"type": "name", "properties": {"name": "urn:ogc:def:crs:EPSG::%s" % epsg}}
_features = []
for f in js['features']:
topaz_id = str(f['properties']['TopazID'])
if topaz_id == "1":
_features.append(f)
js['features'] = _features
with open(dst_fn, 'w') as fp:
json.dump(js, fp, allow_nan=False)
| |
the median of ifg minus model
# Only needed if reference phase correction has already been applied?
if offset:
offset_removal = nanmedian(np.ravel(fullres_phase - fullorb))
else:
offset_removal = 0
orbital_correction = fullorb - offset_removal
return orbital_correction
def __orb_inversion(design_matrix, data):
"""Inversion using pseudoinverse of design matrix"""
# remove NaN elements before inverting to get the model
B = design_matrix[~isnan(data)]
d = data[~isnan(data)]
return pinv(B) @ d
def network_orbital_correction(ifg_paths, params, m_ifgs: Optional[List] = None):
"""
This algorithm implements a network inversion to determine orbital
corrections for a set of interferograms forming a connected network.
Warning: This will write orbital error corrected phase_data to the ifgs.
:param list ifg_paths: List of Ifg class objects reduced to a minimum spanning tree network
:param dict params: dictionary of configuration parameters
:param list m_ifgs: list of multilooked Ifg class objects (sequence must be multilooked versions of 'ifgs' arg)
:return: None - interferogram phase data is updated and saved to disk
"""
# pylint: disable=too-many-locals, too-many-arguments
offset = params[C.ORBFIT_OFFSET]
degree = params[C.ORBITAL_FIT_DEGREE]
preread_ifgs = params[C.PREREAD_IFGS]
intercept = params[C.ORBFIT_INTERCEPT]
scale = params[C.ORBFIT_SCALE]
# all orbit corrections available?
if isinstance(ifg_paths[0], str):
if __check_and_apply_orberrors_found_on_disc(ifg_paths, params):
log.warning("Reusing orbfit errors from previous run!!!")
return
# all corrections are available in numpy files already saved - return
ifgs = [shared.Ifg(i) for i in ifg_paths]
else: # alternate test paths # TODO: improve
ifgs = ifg_paths
src_ifgs = ifgs if m_ifgs is None else m_ifgs
src_ifgs = mst.mst_from_ifgs(src_ifgs)[3] # use networkx mst
if preread_ifgs:
temp_ifgs = OrderedDict(sorted(preread_ifgs.items())).values()
ids = first_second_ids(get_all_epochs(temp_ifgs))
else:
ids = first_second_ids(get_all_epochs(ifgs))
nepochs = len(set(ids))
# call the actual inversion routine
coefs = calc_network_orb_correction(src_ifgs, degree, scale, nepochs, intercept=intercept)
# create full res DM to expand determined coefficients into full res
# orbital correction (eg. expand coarser model to full size)
if preread_ifgs:
temp_ifg = Ifg(ifg_paths[0]) # ifgs here are paths
temp_ifg.open()
dm = get_design_matrix(temp_ifg, degree, intercept=intercept, scale=scale)
temp_ifg.close()
else:
ifg = ifgs[0]
dm = get_design_matrix(ifg, degree, intercept=intercept, scale=scale)
for i in ifg_paths:
# open if not Ifg instance
if isinstance(i, str): # pragma: no cover
# are paths
i = Ifg(i)
i.open(readonly=False)
shared.nan_and_mm_convert(i, params)
_remove_network_orb_error(coefs, dm, i, ids, offset, params)
def calc_network_orb_correction(src_ifgs, degree, scale, nepochs, intercept=False):
"""
Calculate and return coefficients for the network orbital correction model
given a set of ifgs:
:param list src_ifgs: iterable of Ifg objects
:param str degree: the degree of the orbital fit (planar, quadratic or part-cubic)
:param int scale: Scale factor for design matrix to improve inversion robustness
:param int nepochs: The number of epochs in the network
:param intercept: whether to include a constant offset to fit to each ifg. This
intercept is discarded and not returned.
:return coefs: a list of coefficient lists, indexed by epoch. The coefficient lists are in the following order:
PLANAR - x, y
QUADRATIC - x^2, y^2, x*y, x, y
PART_CUBIC - x*y^2, x^2, y^2, x*y, x, y
"""
vphase = vstack([i.phase_data.reshape((i.num_cells, 1)) for i in src_ifgs])
vphase = squeeze(vphase)
B = get_network_design_matrix(src_ifgs, degree, scale, intercept=intercept)
orbparams = __orb_inversion(B, vphase)
ncoef = _get_num_params(degree) + intercept
# extract all params except intercept terms
coefs = [orbparams[i:i+ncoef] for i in range(0, nepochs * ncoef, ncoef)]
return coefs
def __check_and_apply_orberrors_found_on_disc(ifg_paths, params):
saved_orb_err_paths = [MultiplePaths.orb_error_path(ifg_path, params) for ifg_path in ifg_paths]
for p, i in zip(saved_orb_err_paths, ifg_paths):
if p.exists():
orb = np.load(p)
if isinstance(i, str):
# are paths
ifg = Ifg(i)
ifg.open(readonly=False)
shared.nan_and_mm_convert(ifg, params)
else:
ifg = i
ifg.phase_data -= orb
# set orbfit meta tag and save phase to file
_save_orbital_error_corrected_phase(ifg, params)
return all(p.exists() for p in saved_orb_err_paths)
def _remove_network_orb_error(coefs, dm, ifg, ids, offset, params):
"""
remove network orbital error from input interferograms
"""
saved_orb_err_path = MultiplePaths.orb_error_path(ifg.data_path, params)
orb = dm.dot(coefs[ids[ifg.second]] - coefs[ids[ifg.first]])
orb = orb.reshape(ifg.shape)
# Estimate the offset of the interferogram as the median of ifg minus model
# Only needed if reference phase correction has already been applied?
if offset:
# brings all ifgs to same reference level
orb -= nanmedian(np.ravel(ifg.phase_data - orb))
# subtract orbital error from the ifg
ifg.phase_data -= orb
# save orb error on disc
np.save(file=saved_orb_err_path, arr=orb)
# set orbfit meta tag and save phase to file
_save_orbital_error_corrected_phase(ifg, params)
def _save_orbital_error_corrected_phase(ifg, params):
"""
Convenience function to update metadata and save latest phase after
orbital fit correction
"""
# set orbfit tags after orbital error correction
ifg.dataset.SetMetadataItem(ifc.PYRATE_ORB_METHOD, __methods_as_string(params[C.ORBITAL_FIT_METHOD]))
ifg.dataset.SetMetadataItem(ifc.PYRATE_ORB_DEG, __degrees_as_string(params[C.ORBITAL_FIT_DEGREE]))
ifg.dataset.SetMetadataItem(ifc.PYRATE_ORB_XLOOKS, str(params[C.ORBITAL_FIT_LOOKS_X]))
ifg.dataset.SetMetadataItem(ifc.PYRATE_ORB_YLOOKS, str(params[C.ORBITAL_FIT_LOOKS_Y]))
ifg.dataset.SetMetadataItem(ifc.PYRATE_ORBITAL_ERROR, ifc.ORB_REMOVED)
ifg.write_modified_phase()
ifg.close()
def __methods_as_string(method):
"""Look up table to get orbital method string names"""
meth = {1:ifc.PYRATE_ORB_INDEPENDENT, 2:ifc.PYRATE_ORB_NETWORK}
return str(meth[method])
def __degrees_as_string(degree):
"""Look up table to get orbital degree string names"""
deg = {1: ifc.PYRATE_ORB_PLANAR, 2: ifc.PYRATE_ORB_QUADRATIC, 3: ifc.PYRATE_ORB_PART_CUBIC}
return str(deg[degree])
# TODO: subtract reference pixel coordinate from x and y
def get_design_matrix(ifg, degree, intercept: Optional[bool] = True, scale: Optional[int] = 1):
"""
Returns orbital error design matrix with columns for model parameters.
:param Ifg class instance ifg: interferogram to get design matrix for
:param str degree: model to fit (PLANAR / QUADRATIC / PART_CUBIC)
:param bool intercept: whether to include column for the intercept term.
:param int scale: Scale factor for design matrix to improve inversion robustness
:return: dm: design matrix
:rtype: ndarray
"""
if not ifg.is_open:
ifg.open()
if degree not in [PLANAR, QUADRATIC, PART_CUBIC]:
raise OrbitalError("Invalid degree argument")
if scale < 1:
raise OrbitalError("Scale argument must be greater or equal to 1")
# scaling required with higher degree models to help with param estimation
xsize = ifg.x_size / scale if scale else ifg.x_size
ysize = ifg.y_size / scale if scale else ifg.y_size
# mesh needs to start at 1, otherwise first cell resolves to 0 and ignored
xg, yg = [g+1 for g in meshgrid(range(ifg.ncols), range(ifg.nrows))]
x = xg.reshape(ifg.num_cells) * xsize
y = yg.reshape(ifg.num_cells) * ysize
# TODO: performance test this vs np.concatenate (n by 1 cols)??
dm = empty((ifg.num_cells, _get_num_params(degree, intercept)), dtype=float32)
# apply positional parameter values, multiply pixel coordinate by cell size
# to get distance (a coord by itself doesn't tell us distance from origin)
if degree == PLANAR:
dm[:, 0] = x
dm[:, 1] = y
elif degree == QUADRATIC:
dm[:, 0] = x**2
dm[:, 1] = y**2
dm[:, 2] = x * y
dm[:, 3] = x
dm[:, 4] = y
elif degree == PART_CUBIC:
dm[:, 0] = x * (y**2)
dm[:, 1] = x**2
dm[:, 2] = y**2
dm[:, 3] = x * y
dm[:, 4] = x
dm[:, 5] = y
if intercept:
dm[:, -1] = np.ones(ifg.num_cells) # estimate the intercept term
# report condition number of the design matrix - L2-norm computed using SVD
log.debug(f'The condition number of the design matrix is {cond(dm)}')
return dm
def get_network_design_matrix(ifgs, degree, scale, intercept=True):
# pylint: disable=too-many-locals
"""
Returns larger-format design matrix for network error correction. The
network design matrix includes rows which relate to those of NaN cells.
:param list ifgs: List of Ifg class objects
:param str degree: model to fit (PLANAR / QUADRATIC / PART_CUBIC)
:param int scale: Scale factor for design matrix to improve inversion robustness
:param bool intercept: whether to include columns for intercept estimation.
:return: netdm: network design matrix
:rtype: ndarray
"""
if degree not in [PLANAR, QUADRATIC, PART_CUBIC]:
raise OrbitalError("Invalid degree argument")
if scale < 1:
raise OrbitalError("Scale argument must be greater or equal to 1")
nifgs = len(ifgs)
if nifgs < 1:
# can feasibly do correction on a single Ifg/2 epochs
raise OrbitalError("Invalid number of Ifgs: %s" % nifgs)
# init sparse network design matrix
nepochs = len(set(get_all_epochs(ifgs)))
# no intercepts here; they are included separately below
ncoef = _get_num_params(degree)
shape = [ifgs[0].num_cells * nifgs, ncoef * nepochs]
if intercept:
shape[1] += nepochs # add extra space for intercepts
netdm = zeros(shape, dtype=float32)
# calc location for individual design matrices
dates = [ifg.first for ifg in ifgs] + [ifg.second for ifg in ifgs]
ids = first_second_ids(dates)
tmpdm = get_design_matrix(ifgs[0], degree, intercept=intercept, scale=scale)
# iteratively build up sparse matrix
for i, ifg in enumerate(ifgs):
rs = i * ifg.num_cells # | |
class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Counselling psychologist</h2>\n",
"<h3 class=\"subtitle is-6 company\">Smith PLC</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Port Devonville, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/counselling-psychologist-63.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Insurance underwriter</h2>\n",
"<h3 class=\"subtitle is-6 company\">Patterson-Singh</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" East Thomas, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/insurance-underwriter-64.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Engineer, automotive</h2>\n",
"<h3 class=\"subtitle is-6 company\">Martinez-Berry</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" New Jeffrey, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/engineer-automotive-65.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Producer, radio</h2>\n",
"<h3 class=\"subtitle is-6 company\">May, <NAME> Fisher</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Davidside, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/producer-radio-66.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Dispensing optician</h2>\n",
"<h3 class=\"subtitle is-6 company\">Bailey, Owen and Thompson</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Jamesville, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/dispensing-optician-67.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Designer, fashion/clothing</h2>\n",
"<h3 class=\"subtitle is-6 company\">Vasquez Ltd</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" New Kelly, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/designer-fashion-clothing-68.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Chartered loss adjuster</h2>\n",
"<h3 class=\"subtitle is-6 company\">Leblanc LLC</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Lake Antonio, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/chartered-loss-adjuster-69.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Back-End Web Developer (Python, Django)</h2>\n",
"<h3 class=\"subtitle is-6 company\"><NAME> and Mckee</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" New Elizabethside, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/back-end-web-developer-python-django-70.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Forest/woodland manager</h2>\n",
"<h3 class=\"subtitle is-6 company\">Blankenship, Knight and Powell</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Millsbury, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/forest-woodland-manager-71.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Clinical cytogeneticist</h2>\n",
"<h3 class=\"subtitle is-6 company\">Patton, Haynes and Jones</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Lloydton, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/clinical-cytogeneticist-72.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Print production planner</h2>\n",
"<h3 class=\"subtitle is-6 company\">Wood Inc</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Port Jeremy, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/print-production-planner-73.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Systems developer</h2>\n",
"<h3 class=\"subtitle is-6 company\">Collins Group</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" New Elizabethtown, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/systems-developer-74.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Graphic designer</h2>\n",
"<h3 class=\"subtitle is-6 company\">Flores-Nelson</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Charlesstad, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/graphic-designer-75.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Writer</h2>\n",
"<h3 class=\"subtitle is-6 company\"><NAME> and Olson</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Josephbury, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/writer-76.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Field seismologist</h2>\n",
"<h3 class=\"subtitle is-6 company\">Howard Group</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Seanfurt, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/field-seismologist-77.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Chief Strategy Officer</h2>\n",
"<h3 class=\"subtitle is-6 company\">Kramer-Edwards</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Williambury, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/chief-strategy-officer-78.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Air cabin crew</h2>\n",
"<h3 class=\"subtitle is-6 company\">Berry-Houston</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" South Jorgeside, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/air-cabin-crew-79.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Python Programmer (Entry-Level)</h2>\n",
"<h3 class=\"subtitle is-6 company\">Mathews Inc</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Robertborough, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/python-programmer-entry-level-80.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Warden/ranger</h2>\n",
"<h3 class=\"subtitle is-6 company\">Riley-Johnson</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" South Saratown, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/warden-ranger-81.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Sports therapist</h2>\n",
"<h3 class=\"subtitle is-6 company\">Spencer and Sons</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Hullview, AA\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/sports-therapist-82.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Arts development officer</h2>\n",
"<h3 class=\"subtitle is-6 company\">Camacho-Sanchez</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" Philipland, AP\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer class=\"card-footer\">\n",
"<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
"<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/arts-development-officer-83.html\" target=\"_blank\">Apply</a>\n",
"</footer>\n",
"</div>\n",
"\n",
"<div class=\"card-content\">\n",
"<div class=\"media\">\n",
"<div class=\"media-left\">\n",
"<figure class=\"image is-48x48\">\n",
"<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
"</figure>\n",
"</div>\n",
"<div class=\"media-content\">\n",
"<h2 class=\"title is-5\">Printmaker</h2>\n",
"<h3 class=\"subtitle is-6 company\">Oliver and Sons</h3>\n",
"</div>\n",
"</div>\n",
"<div class=\"content\">\n",
"<p class=\"location\">\n",
" North Patty, AE\n",
" </p>\n",
"<p class=\"is-small has-text-grey\">\n",
"<time datetime=\"2021-04-08\">2021-04-08</time>\n",
"</p>\n",
"</div>\n",
"<footer | |
import keras
from keras.models import load_model
from keras import backend as K
import math
import sys
import argparse
import numpy as np
import scipy.io as sio
import os
import glob
import h5py
import cv2
import gc
''' This code is based on <NAME>., <NAME>., & Arganda-Carreras,
I. (2017). "Vision-Based Fall Detection with Convolutional Neural Networks"
Wireless Communications and Mobile Computing, 2017.
Also, new features were added by <NAME> working in
Semantix.
'''
''' Documentation: class Fextractor
This class has a few methods:
extract
The only method that should be called outside of this class is:
extract: receives a CNN already trained until the last two full connected
layers and extract features from optical flows extracted from a video.
A feature is the result from a feedforward using a stack of optical flows,
later these features will be used for training these last two layers.
'''
class Fextractor:
def __init__(self, classes, id):
self.num_features = 4096
self.folders = []
self.classes = classes
self.classes_dirs = []
self.classes_videos = []
self.class_value = []
self.data_images = []
self.data_images_1 = []
self.x_size = 224
self.y_size = 224
self.id = id
def extract(self, stream, model, data_folder):
print("### Model loading", flush=True)
extractor_model = load_model(model)
features_file = stream + '_features_' + self.id + '.h5'
labels_file = stream + '_labels_' + self.id + '.h5'
samples_file = stream + '_samples_' + self.id + '.h5'
num_file = stream + '_num_' + self.id + '.h5'
features_key = 'features'
labels_key = 'labels'
samples_key = 'samples'
num_key = 'num'
sliding_height = 10
'''
Function to load the optical flow stacks, do a feed-forward through
the feature extractor (VGG16) and store the output feature vectors in
the file 'features_file' and the labels in 'labels_file'.
Input:
* extractor_model: CNN model until the last two layers.
* features_file: path to the hdf5 file where the extracted features are
going to be stored
* labels_file: path to the hdf5 file where the labels of the features
are going to be stored
* samples_file: path to the hdf5 file where the number of stacks in
each video is going to be stored
* num_file: path to the hdf5 file where the number of fall and not fall
videos are going to be stored
* features_key: name of the key for the hdf5 file to store the features
* labels_key: name of the key for the hdf5 file to store the labels
* samples_key: name of the key for the hdf5 file to store the samples
* num_key: name of the key for the hdf5 file to store the num
* data_folder: folder with class0 and class1 folders
* sliding_height: height of stack to process
'''
try:
flow_mean = sio.loadmat('flow_mean.mat')['image_mean']
except:
print("***********************************************************",
file=sys.stderr)
print("A flow_mean.mat file with mean values for your trained CNN",
file=sys.stderr)
print("should be in the same directory as fextractor.py. This",
file=sys.stderr)
print("file also needs a image_mean key", file=sys.stderr)
print("***********************************************************",
file=sys.stderr)
exit(1)
dirs = []
num_class = []
# File to store the extracted features and datasets to store them
# IMPORTANT NOTE: 'w' mode totally erases previous data
print("### Creating h5 files", flush=True)
h5features = h5py.File(features_file,'w')
h5labels = h5py.File(labels_file,'w')
h5samples = h5py.File(samples_file, 'w')
h5num_classes = h5py.File(num_file, 'w')
cams = ['cam1', 'cam2', 'cam3', 'cam4', 'cam5', 'cam6', 'cam7', 'cam8']
datas_in_cam = dict()
videos_in_cam = dict()
cam_video_count = dict()
for c in self.classes:
datas_in_cam[c] = dict()
videos_in_cam[c] = dict()
cam_video_count[c] = dict()
h5features.create_group(c)
h5labels.create_group(c)
h5samples.create_group(c)
for cam in cams:
datas_in_cam[c][cam] = 0
videos_in_cam[c][cam] = 0
cam_video_count[c][cam] = 0
h5features[c].create_group(cam)
h5labels[c].create_group(cam)
h5samples[c].create_group(cam)
if stream == 'temporal':
file_name = '/flow_x*.jpg'
file_name_1 = '/flow_y*.jpg'
elif stream == 'pose':
file_name = '/pose_*.jpg'
elif stream == 'spatial':
file_name = '/frame_*.jpg'
else:
print("INVALID STREAM ERROR")
exit(1)
for c in range(len(self.classes)):
num_class.append(0)
if self.classes[c] != 'Falls' and self.classes[c] != 'NotFalls':
print("Sorry. Classes possibles are Falls and NotFalls, its \
hardcoded and will be expanded really soon. Its being \
used inside Extracting Features for, setting label value")
exit(1)
for dir in self.classes_dirs[c]:
check_size = glob.glob(data_folder + self.classes[c] + '/' +
dir + '/flow_x*.jpg')
self.data = glob.glob(data_folder + self.classes[c] + '/' +
dir + file_name)
if int(len(check_size)) >= sliding_height:
# search with cam is being used in this dir
# dir is something like: chute01cam2 or chute01cam2_00
num_class[-1] += 1
for cam in cams:
if cam in dir:
videos_in_cam[self.classes[c]][cam] += 1
if stream == 'temporal':
datas_in_cam[self.classes[c]][cam] = datas_in_cam[self.classes[c]][cam] + len(self.data) - sliding_height + 1
else:
datas_in_cam[self.classes[c]][cam] = datas_in_cam[self.classes[c]][cam] + len(self.data) - sliding_height
self.folders.append(data_folder + self.classes[c] + '/' + dir)
dirs.append(dir)
self.class_value.append(self.classes[c])
datasets_f = dict()
datasets_l = dict()
datasets_s = dict()
for c in self.classes:
datasets_f[c] = dict()
datasets_l[c] = dict()
datasets_s[c] = dict()
for cam in cams:
datasets_f[c][cam] = h5features[c][cam].create_dataset(cam, shape=(datas_in_cam[c][cam], self.num_features), dtype='float64')
datasets_l[c][cam] = h5labels[c][cam].create_dataset(cam, shape=(datas_in_cam[c][cam], 1), dtype='float64')
datasets_s[c][cam] = h5samples[c][cam].create_dataset(cam, shape=(videos_in_cam[c][cam], 1), dtype='int32')
dataset_num = h5num_classes.create_dataset(num_key, shape=(len(self.classes), 1),
dtype='int32')
for c in range(len(self.classes)):
dataset_num[c] = num_class[c]
number = 0
cam_cont_sum = 0
cont = dict()
for c in self.classes:
cont[c] = dict()
for cam in cams:
cam_cont_sum += datas_in_cam[c][cam]
cont[c][cam] = 0
progress_cams = 0.0
print("### Extracting Features", flush=True)
for folder, dir, classe in zip(self.folders, dirs, self.class_value):
self.update_progress(progress_cams/cam_cont_sum)
self.data_images = glob.glob(folder + file_name)
self.data_images.sort()
if stream == 'temporal':
self.data_images_1 = glob.glob(folder + file_name_1)
self.data_images_1.sort()
elif stream == 'spatial' or stream == 'pose':
self.data_images = self.data_images[:-sliding_height]
else:
print("INVALID STREAM ERROR")
exit(1)
label = -1
if classe == 'Falls':
label = 0
else:
label = 1
#label = glob.glob(data_folder + classe + '/' + dir + '/' + '*.npy')
#label_values = np.load(label[0])
if stream == 'temporal':
nb_datas = len(self.data_images) - sliding_height + 1
elif stream == 'spatial' or 'pose':
nb_datas = len(self.data_images)
else:
print("INVALID STREAM ERROR")
exit(1)
amount_datas = 100
fraction_datas = nb_datas // amount_datas
iterr = iter(self.data_images)
image_c = 0
for fraction in range(fraction_datas):
if stream == 'temporal':
flow = np.zeros(shape=(self.x_size, self.y_size, 2*sliding_height,
amount_datas), dtype=np.float64)
for i in range(amount_datas + sliding_height -1):
flow_x_file = self.data_images[image_c]
flow_y_file = self.data_images_1[image_c]
image_c += 1
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
# Assign an image i to the jth stack in the kth position,
# but also in the j+1th stack in the k+1th position and so
# on (for sliding window)
for s in list(reversed(range(min(sliding_height,i+1)))):
if i-s < amount_datas:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
del img_x,img_y
gc.collect()
# Restore last images from previous fraction to start next
# fraction
image_c = image_c - sliding_height + 1
# Subtract mean
flow = flow - np.tile(flow_mean[...,np.newaxis],
(1, 1, 1, flow.shape[3]))
# Transpose for channel ordering (Tensorflow in this case)
flow = np.transpose(flow, (3, 0, 1, 2))
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
prediction = extractor_model.predict(
np.expand_dims(flow[i, ...], 0))
predictions[i, ...] = prediction
#truth[i] = self.get_media_optflow(label_values, i+(fraction*amount_datas), sliding_height)
truth[i] = label
else:
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
frame = next(iterr)
frame = cv2.imread(frame)
predictions[i, ...] = extractor_model.predict(np.expand_dims(frame, 0))
truth[i] = label
for cam in cams:
if cam in dir:
datasets_f[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = predictions
datasets_l[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = truth
cont[classe][cam] += amount_datas
progress_cams += amount_datas
break
amount_datas = nb_datas % amount_datas
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
if stream == 'temporal':
flow = np.zeros(shape=(self.x_size, self.y_size, 2*sliding_height,
amount_datas), dtype=np.float64)
for i in range(amount_datas + sliding_height - 1):
flow_x_file = self.data_images[image_c]
flow_y_file = self.data_images_1[image_c]
image_c += 1
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
# Assign an image i to the jth stack in the kth position,
# but also in the j+1th stack in the k+1th | |
"""
File Name: genome_domain_dataset.py
Project: bioseq-learning
File Description:
This file contains functions and classes for the conserved domain dataset
for genomes. Each eligible genome will be transformed into a sentence
of words of conserved domains, along with some special words.
TODO: what would be the target of each "sentence"?
The workflow of this dataset class initialization is shown below:
1. get all the contig conserved domain csv files
2. perform train/test split of csv files
- randomly
- stratified on organism (with hold-out organism)
3. get the vocabulary of domains from training set
4. construct training, validation, and test sets
(1) tokenize the genome contigs with special characters
(2) summarize the number of sequences and prepare the indexing
"""
import os
import pickle
import logging
import resource
from glob import glob
from enum import Enum
from bisect import bisect
from collections import Counter
from dataclasses import dataclass
from multiprocessing import Pool, cpu_count
from typing import Dict, List, Optional, Set, Tuple
import torch
import pandas as pd
from tqdm import tqdm
from torch.utils.data import Dataset
from src import (
RAW_DATA_DIR_PATH,
INTERIM_DATA_DIR_PATH,
PROCESSED_DATA_DIR_PATH,
)
BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH = os.path.join(
RAW_DATA_DIR_PATH, 'genomes', 'bacteria.csv')
REF_N_REP_BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH = os.path.join(
RAW_DATA_DIR_PATH, 'genomes', 'reference_or_representative_bacteria.csv')
REF_OR_REP_GNOME_PARENT_DIR_PATH = os.path.join(
INTERIM_DATA_DIR_PATH,
'genomes/reference_or_representative_bacteria',
)
REF_OR_REP_BACTERIA_CONTIGS_WITH_CDS_FILE_PATH = os.path.join(
PROCESSED_DATA_DIR_PATH,
'genomes/reference_or_representative_bacteria_contigs_'
'with_conserved_domains.{annotation}.pickle'
)
REF_OR_REP_BACTERIA_CONTIG_CDS_SEQS_FILE_PATH = os.path.join(
PROCESSED_DATA_DIR_PATH,
'genomes/reference_or_representative_bacteria_contig_'
'sequences.{annotation}.pickle'
)
_LOGGER = logging.getLogger(__name__)
resource.setrlimit(
resource.RLIMIT_NOFILE,
(4096, resource.getrlimit(resource.RLIMIT_NOFILE)[1]),
)
# max allowed overlap for conserved domains
MAX_CD_OVERLAP: float = 0.5
HALF_MAX_CD_OVERLAP: float = MAX_CD_OVERLAP / 2
# special makers for domain sequences
CONTIG_BEGIN_MARKER: str = '<bos>'
CONTIG_END_MARKER: str = '<eos>'
GENE_BEGIN_MARKER: str = '<cds>'
UNKNOWN_MARKER: str = '<unk>'
PADDING_MARKER: str = '<pad>'
SPECIAL_MARKERS: Set[str] = {
GENE_BEGIN_MARKER,
CONTIG_BEGIN_MARKER,
CONTIG_END_MARKER,
UNKNOWN_MARKER,
PADDING_MARKER,
}
SPECIAL_MARKER_TOKENIZER: Dict[str, int] = {
GENE_BEGIN_MARKER: 1,
CONTIG_BEGIN_MARKER: 2,
CONTIG_END_MARKER: 3,
UNKNOWN_MARKER: 4,
PADDING_MARKER: 0,
}
class Annotation(Enum):
"""Enum class for genome annotation sources in PATRIC database (
reference: https://docs.patricbrc.org/user_guides/organisms_taxon/
genome_annotations.html)
"""
PATRIC = 'PATRIC'
RefSeq = 'RefSeq'
class Organism(Enum):
"""Enum class for bacterial organisms in PATRIC database (reference:
https://en.wikipedia.org/wiki/PATRIC).
"""
BACILLUS = 'bacillus'
BARTONELLA = 'bartonella'
BORRELIA = 'borrelia'
BRUCELLA = 'brucella'
BURKHOLDERIA = 'burkholderia'
CAMPYLOBACTER = 'campylobacter'
CHLAMYDOPHILA = 'chlamydophila'
CLOSTRIDIUM = 'clostridium'
COXIELLA = 'coxiella'
EHRLICHIA = 'ehrlichia'
ESCHERICHIA = 'escherichia'
FRANCISELLA = 'francisella'
HELICOBACTER = 'helicobacter'
LISTERIA = 'listeria'
MYCOBACTERIUM = 'mycobacterium'
RICKETTSIA = 'rickettsia'
SALMONELLA = 'salmonella'
SHIGELLA = 'shigella'
STAPHYLOCOCCUS = 'staphylococcus'
VIBRIO = 'vibrio'
YERSINIA = 'yersinia'
OTHERS = 'others'
@dataclass
class ContigWithConservedDomains:
"""Data class for a genome contig, annotated with features (PATRIC or
RefSeq) and the corresponding conserved domains.
"""
genome_id: str
genome_name: Optional[str]
organism: Optional[Organism]
ncbi_taxon_id: str
annotation: Annotation
contig_accession: str
contig_feature_df: pd.DataFrame
contig_conserved_domain_df: pd.DataFrame
contig_feature_csv_file_path: str
contig_conserved_domain_csv_file_path: str
def __get_organism_from_genome_name(genome_name: str) -> Organism:
"""Get the organism from the name of the genome by naive string parse,
that is, if its name contains any of the organism strings, the genome
is of that particular organism.
:param genome_name:
:type genome_name:
:return:
:rtype:
"""
__lower_genome_name = genome_name.lower()
for __organism in Organism:
if __organism.value in __lower_genome_name:
return __organism
return Organism.OTHERS
def _convert_contigs_to_contigs_with_conserved_domains(
annotation: Annotation,
genome_parent_dir_path: str,
genome_summary_csv_file_path: Optional[str] = None,
) -> List[ContigWithConservedDomains]:
"""Get all the contigs inside a parent directory path into a list of
ContigWithConservedDomains, which is essentially a data class
with all the information on features and conserved domain annotations.
:param annotation:
:type annotation:
:param genome_parent_dir_path:
:type genome_parent_dir_path:
:param genome_summary_csv_file_path: optional genome summary CSV file
path, which could be downloaded from PATRIC server. If given, this
function will only process the genomes included in the CSV file by
checking the "genome_id" column.
:type genome_summary_csv_file_path: str
:return:
:rtype:
"""
# load the genome summary dataframe
try:
_genome_summary_df = pd.read_csv(
genome_summary_csv_file_path,
index_col=None,
usecols=[
'Genome ID',
'Genome Name',
'Organism Name',
'NCBI Taxon ID',
],
dtype={
'Genome ID': str,
'Genome Name': str,
'Organism Name': str,
'NCBI Taxon ID': int,
}
)
_genome_summary_df.columns = [
'genome_id',
'genome_name',
'organism_name',
'ncbi_taxon_id',
]
_genome_summary_df = _genome_summary_df.set_index('genome_id')
_genome_ids: Set[str] = set(_genome_summary_df.index.values)
except (ValueError, FileNotFoundError):
_warning_msg = \
f'Failed to load the summary dataframe for all the ' \
f'genomes in directory {genome_parent_dir_path}.'
_LOGGER.warning(_warning_msg)
_genome_summary_df = None
# get all the paths to the *.{annotation}.csv files in parent dir
genome_parent_dir_path = os.path.abspath(genome_parent_dir_path)
_contig_conserved_domain_csv_file_path_pattern = os.path.join(
genome_parent_dir_path, '**', f'*.{annotation.value}.csv')
_contig_conserved_domain_csv_file_paths: List[str] = glob(
_contig_conserved_domain_csv_file_path_pattern, recursive=True)
# construct conserved domain data class for every contig
_contig_conserved_domains = []
for __contig_conserved_domain_csv_file_path in \
tqdm(_contig_conserved_domain_csv_file_paths):
__split_path = \
__contig_conserved_domain_csv_file_path.split(os.sep)
__genome_id = __split_path[-3]
__contig_feature_csv_file_path = \
__contig_conserved_domain_csv_file_path.replace(
'/conserved_domains/', '/features/').replace('.csv', '.tsv')
# skip the config the the feature does not exist (should not happen)
if not os.path.exists(__contig_feature_csv_file_path):
_warning_msg = \
f'The feature table file ({__contig_feature_csv_file_path}) ' \
f'for current contig is missing. Skipping ...'
_LOGGER.warning(_warning_msg)
# skip the contig if the genome ID is not in the summary
__genome_name, __organism = None, None
if (_genome_summary_df is not None) and \
(__genome_id not in _genome_ids):
_warning_msg = \
f'Genome {__genome_id} is not listed in the genome table ' \
f'located in {genome_summary_csv_file_path}. Skipping ...'
_LOGGER.warning(_warning_msg)
continue
__contig_accession = __split_path[-1].split('.', 1)[0]
__contig_feature_df = pd.read_csv(
__contig_feature_csv_file_path,
sep='\t',
header=0,
index_col=None,
dtype={'genome_id': str},
)
__contig_conserved_domain_df = pd.read_csv(
__contig_conserved_domain_csv_file_path,
header=0,
index_col=None,
dtype={
'genome_id': str,
'pssm_id': str,
'superfamily_pssm_id': str,
}
)
# get the genome organism from genome name in the feature dataframe
if __genome_name is None:
__genome_names = __contig_feature_df.genome_name.unique().tolist()
if len(__genome_names) > 1:
__genome_name = max(__genome_names, key=len)
_warning_msg = \
f'More than one genome names ({__genome_names}) in ' \
f'a single contig feature dataframe for contig ' \
f'{__contig_accession} in genome with ID {__genome_id}. ' \
f'Using the longest genome name {__genome_name} ...'
_LOGGER.warning(_warning_msg)
else:
__genome_name = __genome_names[0]
__organism = __get_organism_from_genome_name(__genome_name)
# clean up the feature dataframe
__contig_feature_df = __contig_feature_df[
__contig_feature_df.accession == __contig_accession]
if len(__contig_feature_df) == 0:
_warning_msg = \
f'There are no features for accession {__contig_accession} ' \
f'in the feature table of genome with ID {__genome_id}.'
_LOGGER.warning(_warning_msg)
continue
__contig_feature_df.drop('genome_id', axis=1, inplace=True)
__contig_feature_df.drop('genome_name', axis=1, inplace=True)
__contig_feature_df.drop('accession', axis=1, inplace=True)
__contig_feature_df.drop('annotation', axis=1, inplace=True)
# clean up the conserved domain dataframe
__contig_conserved_domain_df.drop('genome_id', axis=1, inplace=True)
__contig_conserved_domain_df.drop('genome_name', axis=1, inplace=True)
__contig_with_conserved_domain = ContigWithConservedDomains(
__genome_id,
__genome_name,
__organism,
_genome_summary_df.loc[__genome_id, 'ncbi_taxon_id'],
annotation,
__contig_accession,
__contig_feature_df,
__contig_conserved_domain_df,
__contig_feature_csv_file_path,
__contig_conserved_domain_csv_file_path
)
_contig_conserved_domains.append(__contig_with_conserved_domain)
return _contig_conserved_domains
def _get_contigs_with_conserved_domains(
annotation: Annotation,
genome_parent_dir_path: str,
genome_summary_csv_file_path: Optional[str] = None,
) -> List[ContigWithConservedDomains]:
contigs_with_cds_file_path: str = \
REF_OR_REP_BACTERIA_CONTIGS_WITH_CDS_FILE_PATH.format(annotation=annotation.value)
if os.path.exists(contigs_with_cds_file_path):
with open(contigs_with_cds_file_path, 'rb') as _fh:
return pickle.load(_fh)
else:
_contigs = _convert_contigs_to_contigs_with_conserved_domains(
annotation=annotation,
genome_parent_dir_path=genome_parent_dir_path,
genome_summary_csv_file_path=genome_summary_csv_file_path,
# genome_parent_dir_path=REF_OR_REP_GNOME_PARENT_DIR_PATH,
# genome_summary_csv_file_path=REF_N_REP_BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH,
)
with open(contigs_with_cds_file_path, 'wb') as _fh:
pickle.dump(_contigs, _fh)
return _contigs
def __convert_single_contig_to_domain_sequence(
contig_with_cds: ContigWithConservedDomains,
) -> Tuple[str, List[str]]:
_id: str = \
f'{contig_with_cds.genome_id}/' \
f'{contig_with_cds.contig_accession}'
_annotation: Annotation = contig_with_cds.annotation
_feature_df: pd.DataFrame = \
contig_with_cds.contig_feature_df
_feature_df: pd.DataFrame = _feature_df[
_feature_df['feature_type'] == 'CDS'
]
if _annotation == Annotation.PATRIC:
_feature_df: pd.DataFrame = _feature_df[
['patric_id', 'product', 'plfam_id', 'pgfam_id']]
else:
_feature_df: pd.DataFrame = _feature_df[
['refseq_locus_tag', 'product', 'plfam_id', 'pgfam_id']]
_feature_df: pd.DataFrame = _feature_df.reset_index(drop=True)
_feature_df.columns = ['seq_id', 'product', 'plfam_id', 'pgfam_id']
_conserved_domain_df: pd.DataFrame = \
contig_with_cds.contig_conserved_domain_df
_hit_types = {'Specific', 'Non-specific', 'Superfamily'}
_conserved_domain_df: pd.DataFrame = _conserved_domain_df[
_conserved_domain_df['hit_type'].isin(_hit_types)
]
_conserved_domain_df: pd.DataFrame = \
_conserved_domain_df[[
'seq_id', 'accession', 'hit_type', 'pssm_id',
'start', 'end', 'e_value', 'bitscore',
]]
_conserved_domain_df: pd.DataFrame = \
_conserved_domain_df.reset_index(drop=True)
def __get_seq_id(__seq_id: str):
if __seq_id.count('|') == 1:
return __seq_id
elif __seq_id.count('|') >= 2 and \
_annotation == Annotation.PATRIC:
return __seq_id.rstrip('|').rsplit('|', 1)[0]
elif __seq_id.count('|') >= 2 and \
_annotation == Annotation.RefSeq:
return __seq_id.rstrip('|').rsplit('|', 2)[1]
else:
_warning_msg = \
f'cannot parse the PATRIC ID from FASTA ' \
f'sequence record with name {__seq_id}.'
print(_warning_msg)
return ''
_conserved_domain_df['seq_id'] = \
_conserved_domain_df['seq_id'].apply(__get_seq_id)
_feature_df = _feature_df.set_index('seq_id')
_cds_seq_ids = _feature_df.index.values
_ret_seq: List[str] = [CONTIG_BEGIN_MARKER]
for __cds_seq_id in _cds_seq_ids:
__cds_conserved_domain_df = _conserved_domain_df[
_conserved_domain_df['seq_id'] == __cds_seq_id
].copy()
__cds_conserved_domain_df.sort_values(
by=['e_value', 'bitscore'],
ascending=[True, False],
inplace=True,
)
__cds_proc_conserved_domain_df = \
pd.DataFrame([], columns=__cds_conserved_domain_df.columns)
while len(__cds_conserved_domain_df) > 0:
__curr_conserved_domain = \
__cds_conserved_domain_df.iloc[0]
__curr_start = __curr_conserved_domain.start
__curr_end = __curr_conserved_domain.end
__cds_proc_conserved_domain_df = \
__cds_proc_conserved_domain_df.append(
__curr_conserved_domain,
ignore_index=True,
)
# drop the hits with more than MAX_CD_OVERLAP
__overlap = int((__curr_end - __curr_start) * HALF_MAX_CD_OVERLAP)
__curr_start_w_overlap = __curr_start + __overlap
__curr_end_w_overlap = __curr_end - __overlap
__cds_conserved_domain_df.drop(
__cds_conserved_domain_df[(
(__cds_conserved_domain_df.start < __curr_end_w_overlap) &
(__cds_conserved_domain_df.end > __curr_start_w_overlap)
)].index,
inplace=True,
)
__cds_conserved_domain_df.reset_index(drop=True, inplace=True)
__cds_proc_conserved_domain_df.sort_values(
by=['start', 'bitscore'],
inplace=True,
)
__cds_product = _feature_df.loc[__cds_seq_id, 'product']
__cds_plfam_id = _feature_df.loc[__cds_seq_id, 'plfam_id']
__cds_pgfam_id = _feature_df.loc[__cds_seq_id, 'pgfam_id']
_ret_seq.append(f'{GENE_BEGIN_MARKER}/{__cds_product}/{__cds_plfam_id}/{__cds_pgfam_id}')
_ret_seq.extend(__cds_proc_conserved_domain_df['accession'].to_list())
_ret_seq.append(CONTIG_END_MARKER)
return _id, _ret_seq
def _convert_contigs_to_domain_sequences(
contigs_with_cds: List[ContigWithConservedDomains],
) -> Dict[str, List[str]]:
__arg_list_for_single_contig: List[Tuple[ContigWithConservedDomains]] = []
print('Preparing the arguments for contig conversion ...')
for __contig_with_cds in tqdm(contigs_with_cds):
__arg_list_for_single_contig.append((__contig_with_cds, ))
print('Converting contigs into sequences of conserved domains ...')
with Pool(cpu_count()) as _pool:
_contig_cds_seq: List[Tuple[str, List[str]]] = | |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Jobclass to execute python scripts and jupyter notebooks
"""
import os
import shutil
from pyiron_base.job.generic import GenericJob
from pyiron_base.generic.parameters import GenericParameters
from pyiron_base.generic.datacontainer import DataContainer
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
class ScriptJob(GenericJob):
"""
The ScriptJob class allows to submit Python scripts and Jupyter notebooks to the pyiron job management system.
Args:
project (ProjectHDFio): ProjectHDFio instance which points to the HDF5 file the job is stored in
job_name (str): name of the job, which has to be unique within the project
Simple example:
Step 1. Create the notebook to be submitted, for ex. 'example.ipynb', and save it -- Can contain any code like:
```
import json
with open('script_output.json','w') as f:
json.dump({'x': [0,1]}, f) # dump some data into a JSON file
```
Step 2. Create the submitter notebook, for ex. 'submit_example_job.ipynb', which submits 'example.ipynb' to the
pyiron job management system, which can have the following code:
```
from pyiron_base import Project
pr = Project('scriptjob_example') # save the ScriptJob in the 'scriptjob_example' project
scriptjob = pr.create.job.ScriptJob('scriptjob') # create a ScriptJob named 'scriptjob'
scriptjob.script_path = 'example.ipynb' # specify the PATH to the notebook you want to submit.
```
Step 3. Check the job table to get details about 'scriptjob' by using:
```
pr.job_table()
```
Step 4. If the status of 'scriptjob' is 'finished', load the data from the JSON file into the
'submit_example_job.ipynb' notebook by using:
```
import json
with open(scriptjob.working_directory + '/script_output.json') as f:
data = json.load(f) # load the data from the JSON file
```
More sophisticated example:
The script in ScriptJob can also be more complex, e.g. running its own pyiron calculations.
Here we show how it is leveraged to run a multi-core atomistic calculation.
Step 1. 'example.ipynb' can contain pyiron_atomistics code like:
```
from pyiron_atomistics import Project
pr = Project('example')
job = pr.create.job.Lammps('job') # we name the job 'job'
job.structure = pr.create.structure.ase_bulk('Fe') # specify structure
# Optional: get an input value from 'submit_example_job.ipynb', the notebook which submits
# 'example.ipynb'
number_of_cores = pr.data.number_of_cores
job.server.cores = number_of_cores
job.run() # run the job
# save a custom output, that can be used by the notebook 'submit_example_job.ipynb'
job['user/my_custom_output'] = 16
```
Step 2. 'submit_example_job.ipynb', can then have the following code:
```
from pyiron_base import Project
pr = Project('scriptjob_example') # save the ScriptJob in the 'scriptjob_example' project
scriptjob = pr.create.job.ScriptJob('scriptjob') # create a ScriptJob named 'scriptjob'
scriptjob.script_path = 'example.ipynb' # specify the PATH to the notebook you want to submit.
# In this example case, 'example.ipynb' is in the same
# directory as 'submit_example_job.ipynb'
# Optional: to submit the notebook to a queueing system
number_of_cores = 1 # number of cores to be used
scriptjob.server.cores = number_of_cores
scriptjob.server.queue = 'cmfe' # specify the queue to which the ScriptJob job is to be submitted
scriptjob.server.run_time = 120 # specify the runtime limit for the ScriptJob job in seconds
# Optional: save an input, such that it can be accessed by 'example.ipynb'
pr.data.number_of_cores = number_of_cores
pr.data.write()
# run the ScriptJob job
scriptjob.run()
```
Step 3. Check the job table by using:
```
pr.job_table()
```
in addition to containing details on 'scriptjob', the job_table also contains the details of the child
'job/s' (if any) that were submitted within the 'example.ipynb' notebook.
Step 4. Reload and analyse the child 'job/s': If the status of a child 'job' is 'finished', it can be loaded
into the 'submit_example_job.ipynb' notebook using:
```
job = pr.load('job') # remember in Step 1., we wanted to run a job named 'job', which has now
# 'finished'
```
this loads 'job' into the 'submit_example_job.ipynb' notebook, which can be then used for analysis,
```
job.output.energy_pot[-1] # via the auto-complete
job['user/my_custom_output'] # the custom output, directly from the hdf5 file
```
Attributes:
attribute: job_name
name of the job, which has to be unique within the project
.. attribute:: status
execution status of the job, can be one of the following [initialized, appended, created, submitted, running,
aborted, collect, suspended, refresh, busy, finished]
.. attribute:: job_id
unique id to identify the job in the pyiron database
.. attribute:: parent_id
job id of the predecessor job - the job which was executed before the current one in the current job series
.. attribute:: master_id
job id of the master job - a meta job which groups a series of jobs, which are executed either in parallel or in
serial.
.. attribute:: child_ids
list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
.. attribute:: project
Project instance the jobs is located in
.. attribute:: project_hdf5
ProjectHDFio instance which points to the HDF5 file the job is stored in
.. attribute:: job_info_str
short string to describe the job by it is job_name and job ID - mainly used for logging
.. attribute:: working_directory
working directory of the job is executed in - outside the HDF5 file
.. attribute:: path
path to the job as a combination of absolute file system path and path within the HDF5 file.
.. attribute:: version
Version of the hamiltonian, which is also the version of the executable unless a custom executable is used.
.. attribute:: executable
Executable used to run the job - usually the path to an external executable.
.. attribute:: library_activated
For job types which offer a Python library pyiron can use the python library instead of an external executable.
.. attribute:: server
Server object to handle the execution environment for the job.
.. attribute:: queue_id
the ID returned from the queuing system - it is most likely not the same as the job ID.
.. attribute:: logger
logger object to monitor the external execution and internal pyiron warnings.
.. attribute:: restart_file_list
list of files which are used to restart the calculation from these files.
.. attribute:: job_type
Job type object with all the available job types: ['ExampleJob', 'SerialMaster', 'ParallelMaster', 'ScriptJob',
'ListMaster']
.. attribute:: script_path
the absolute path to the python script
"""
def __init__(self, project, job_name):
super(ScriptJob, self).__init__(project, job_name)
self.__version__ = "0.1"
self.__hdf_version__ = "0.2.0"
self.__name__ = "Script"
self._script_path = None
self.input = DataContainer(table_name="custom_dict")
@property
def script_path(self):
"""
Python script path
Returns:
str: absolute path to the python script
"""
return self._script_path
@script_path.setter
def script_path(self, path):
"""
Python script path
Args:
path (str): relative or absolute path to the python script or a corresponding notebook
"""
if isinstance(path, str):
self._script_path = self._get_abs_path(path)
self.executable = self._executable_command(
working_directory=self.working_directory, script_path=self._script_path
)
else:
raise TypeError(
"path should be a string, but ", path, " is a ", type(path), " instead."
)
def validate_ready_to_run(self):
if self.script_path is None:
raise TypeError(
"ScriptJob.script_path expects a path but got None. Please provide a path before "
+ "running."
)
def set_input_to_read_only(self):
"""
This function enforces read-only mode for the input classes, but it has to be implement in the individual
classes.
"""
self.input.read_only = True
def to_hdf(self, hdf=None, group_name=None):
"""
Store the ScriptJob in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(ScriptJob, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf5_input:
hdf5_input["path"] = self._script_path
self.input.to_hdf(hdf5_input)
def from_hdf(self, hdf=None, group_name=None):
"""
Restore the ScriptJob from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(ScriptJob, self).from_hdf(hdf=hdf, group_name=group_name)
if "HDF_VERSION" in self.project_hdf5.list_nodes():
version = self.project_hdf5["HDF_VERSION"]
else:
version = "0.1.0"
if version == "0.1.0":
with self.project_hdf5.open("input") as hdf5_input:
try:
self.script_path = hdf5_input["path"]
gp = GenericParameters(table_name="custom_dict")
gp.from_hdf(hdf5_input)
for k in gp.keys():
self.input[k] = gp[k]
except TypeError:
pass
elif version == "0.2.0":
with self.project_hdf5.open("input") as hdf5_input:
try:
self.script_path = hdf5_input["path"]
except TypeError:
| |
work_data_list:
validation_source_data = copy.deepcopy(work_data)
validation_source_data = del_none(validation_source_data)
# Adding schema valdation for Work
validator = Core(
source_data=validation_source_data, schema_files=["work_schema.yaml"])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
task = Task.create(org=org, filename=filename, task_type=TaskType.WORK)
for work_data in work_data_list:
title = get_val(work_data, "title", "title", "value")
sub_title = get_val(work_data, "title", "subtitle", "value")
translated_title = get_val(work_data, "title", "translated-title", "value")
translated_title_language_code = get_val(work_data, "title", "translated-title", "language-code")
journal_title = get_val(work_data, "journal-title", "value")
short_description = get_val(work_data, "short-description")
citation_type = get_val(work_data, "citation", "citation-type")
citation_value = get_val(work_data, "citation", "citation-value")
type = get_val(work_data, "type")
publication_media_type = get_val(work_data, "publication-date", "media-type")
url = get_val(work_data, "url", "value")
language_code = get_val(work_data, "language-code")
country = get_val(work_data, "country", "value")
# Removing key 'media-type' from the publication_date dict. and only considering year, day & month
publication_date = PartialDate.create(
{date_key: work_data.get("publication-date")[date_key] for date_key in
('day', 'month', 'year')}) if work_data.get("publication-date") else None
work_record = WorkRecord.create(
task=task,
title=title,
sub_title=sub_title,
translated_title=translated_title,
translated_title_language_code=translated_title_language_code,
journal_title=journal_title,
short_description=short_description,
citation_type=citation_type,
citation_value=citation_value,
type=type,
publication_date=publication_date,
publication_media_type=publication_media_type,
url=url,
language_code=language_code,
country=country)
invitees_list = work_data.get("invitees") if work_data.get("invitees") else None
if invitees_list:
for invitee in invitees_list:
identifier = invitee.get("identifier")
email = invitee.get("email")
first_name = invitee.get("first-name")
last_name = invitee.get("last-name")
orcid_id = invitee.get("ORCID-iD")
put_code = invitee.get("put-code")
visibility = get_val(invitee, "visibility")
WorkInvitees.create(
work_record=work_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the work record will be written")
contributors_list = work_data.get("contributors").get("contributor") if \
work_data.get("contributors") else None
if contributors_list:
for contributor in contributors_list:
orcid_id = get_val(contributor, "contributor-orcid", "path")
name = get_val(contributor, "credit-name", "value")
email = get_val(contributor, "contributor-email", "value")
role = get_val(contributor, "contributor-attributes", "contributor-role")
contributor_sequence = get_val(contributor, "contributor-attributes",
"contributor-sequence")
WorkContributor.create(
work_record=work_record,
orcid=orcid_id,
name=name,
email=email,
role=role,
contributor_sequence=contributor_sequence)
external_ids_list = work_data.get("external-ids").get("external-id") if \
work_data.get("external-ids") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = get_val(external_id, "external-id-url", "value")
relationship = external_id.get("external-id-relationship")
WorkExternalId.create(
work_record=work_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load work record file.")
raise
class Meta: # noqa: D101,D106
db_table = "work_record"
table_alias = "wr"
class ContributorModel(BaseModel):
"""Common model bits of the contributor records."""
orcid = OrcidIdField(null=True)
name = CharField(max_length=120, null=True)
role = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
class WorkContributor(ContributorModel):
"""Researcher or contributor - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="work_contributors", on_delete="CASCADE")
contributor_sequence = CharField(max_length=120, null=True)
class Meta: # noqa: D101,D106
db_table = "work_contributor"
table_alias = "wc"
class FundingContributor(ContributorModel):
"""Researcher or contributor - reciever of the funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="contributors", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_contributor"
table_alias = "fc"
class InviteesModel(BaseModel):
"""Common model bits of the invitees records."""
identifier = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
first_name = CharField(max_length=120, null=True)
last_name = CharField(max_length=120, null=True)
orcid = OrcidIdField(null=True)
put_code = IntegerField(null=True)
visibility = CharField(null=True, max_length=100)
status = TextField(null=True, help_text="Record processing status.")
processed_at = DateTimeField(null=True)
def add_status_line(self, line):
"""Add a text line to the status for logging processing progress."""
ts = datetime.utcnow().isoformat(timespec="seconds")
self.status = (self.status + "\n" if self.status else '') + ts + ": " + line
class PeerReviewInvitee(InviteesModel):
"""Researcher or Invitee - related to peer review."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="peer_review_invitee", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_invitee"
table_alias = "pi"
class WorkInvitees(InviteesModel):
"""Researcher or Invitees - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="work_invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_invitees"
table_alias = "wi"
class FundingInvitees(InviteesModel):
"""Researcher or Invitees - related to funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="funding_invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_invitees"
table_alias = "fi"
class ExternalIdModel(BaseModel):
"""Common model bits of the ExternalId records."""
type = CharField(max_length=255)
value = CharField(max_length=255)
url = CharField(max_length=200, null=True)
relationship = CharField(max_length=255, null=True)
class WorkExternalId(ExternalIdModel):
"""Work ExternalId loaded for batch processing."""
work_record = ForeignKeyField(
WorkRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_external_id"
table_alias = "wei"
class PeerReviewExternalId(ExternalIdModel):
"""Peer Review ExternalId loaded for batch processing."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_external_id"
table_alias = "pei"
class ExternalId(ExternalIdModel):
"""Funding ExternalId loaded for batch processing."""
funding_record = ForeignKeyField(
FundingRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "external_id"
table_alias = "ei"
class Url(BaseModel, AuditMixin):
"""Shortened URLs."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
@classmethod
def shorten(cls, url):
"""Create a shorten url or retrievs an exiting one."""
try:
u = cls.get(url=url)
except cls.DoesNotExist:
while True:
short_id = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(5))
try:
cls.get(short_id=short_id)
except cls.DoesNotExist:
u = cls.create(short_id=short_id, url=url)
return u
return u
class Funding(BaseModel):
"""Uploaded research Funding record."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
class Client(BaseModel, AuditMixin):
"""API Client Application/Consumer.
A client is the app which wants to use the resource of a user.
It is suggested that the client is registered by a user on your site,
but it is not required.
"""
name = CharField(null=True, max_length=40, help_text="human readable name, not required")
homepage_url = CharField(null=True, max_length=100)
description = CharField(
null=True, max_length=400, help_text="human readable description, not required")
user = ForeignKeyField(
User, null=True, on_delete="SET NULL", help_text="creator of the client, not required")
org = ForeignKeyField(Organisation, on_delete="CASCADE", related_name="client_applications")
client_id = CharField(max_length=100, unique=True)
client_secret = CharField(max_length=55, unique=True)
is_confidential = BooleanField(null=True, help_text="public or confidential")
grant_type = CharField(max_length=18, default="client_credentials", null=True)
response_type = CharField(max_length=4, default="code", null=True)
_redirect_uris = TextField(null=True)
_default_scopes = TextField(null=True)
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty() and self.user_id is None and current_user:
self.user_id = current_user.id
return super().save(*args, **kwargs)
@property
def client_type(self): # noqa: D102
if self.is_confidential:
return 'confidential'
return 'public'
@property
def redirect_uris(self): # noqa: D102
if self._redirect_uris:
return self._redirect_uris.split()
return []
@redirect_uris.setter
def redirect_uris(self, value):
if value and isinstance(value, str):
self._redirect_uris = value
@property
def callback_urls(self): # noqa: D102
return self._redirect_uris
@callback_urls.setter
def callback_urls(self, value):
self._redirect_uris = value
@property
def default_redirect_uri(self): # noqa: D102
ru = self.redirect_uris
if not ru:
return None
return self.redirect_uris[0]
@property
def default_scopes(self): # noqa: D102
if self._default_scopes:
return self._default_scopes.split()
return []
def validate_scopes(self, scopes):
"""Validate client requested scopes."""
return "/webhook" in scopes or not scopes
def __repr__(self): # noqa: D102
return self.name or self.homepage_url or self.description
class Grant(BaseModel):
"""Grant Token / Authorization Code.
A grant token is created in the authorization flow, and will be destroyed when
the authorization is finished. In this case, it would be better to store the data
in a cache, which leads to better performance.
"""
user = ForeignKeyField(User, on_delete="CASCADE")
# client_id = db.Column(
# db.String(40), db.ForeignKey('client.client_id'),
# nullable=False,
# )
client = ForeignKeyField(Client, index=True)
code = CharField(max_length=255, index=True)
redirect_uri = CharField(max_length=255, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
# def delete(self):
# super().delete().execute()
# return self
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@scopes.setter
def scopes(self, value): # noqa: D102
if isinstance(value, str):
self._scopes = value
else:
self._scopes = ' '.join(value)
class Token(BaseModel):
"""Bearer Token.
A bearer token is the final token that could be used by the client.
There are other token types, but bearer token is widely used.
Flask-OAuthlib only comes with a bearer token.
"""
client = ForeignKeyField(Client)
user = ForeignKeyField(User, null=True, on_delete="SET NULL")
token_type = CharField(max_length=40)
access_token = CharField(max_length=100, unique=True)
refresh_token = CharField(max_length=100, unique=True, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@property
def expires_at(self): # noqa: D102
return self.expires
def readup_file(input_file):
"""Read up the whole content and deconde it and return the whole content."""
raw = input_file.read()
for encoding in "utf-8-sig", "utf-8", "utf-16":
try:
return raw.decode(encoding)
except UnicodeDecodeError:
continue
return raw.decode("latin-1")
def create_tables():
"""Create all DB tables."""
try:
db.connect()
except OperationalError:
pass
for model in [
File,
Organisation,
User,
UserOrg,
OrcidToken,
UserOrgAffiliation,
OrgInfo,
OrcidApiCall,
OrcidAuthorizeCall,
Task,
AffiliationRecord,
GroupIdRecord,
OrgInvitation,
Url,
UserInvitation,
FundingRecord,
WorkRecord,
WorkContributor,
WorkExternalId,
WorkInvitees,
FundingContributor,
FundingInvitees,
ExternalId,
PeerReviewRecord,
PeerReviewInvitee,
PeerReviewExternalId,
Client,
Grant,
Token,
]:
try:
model.create_table()
except (ProgrammingError, OperationalError) as ex:
if "already exists" in str(ex):
app.logger.info(f"Table '{model._meta.name}' already exists")
else:
raise ex
def create_audit_tables():
"""Create all DB audit tables for PostgreSQL DB."""
try:
db.connect()
except OperationalError:
pass
if isinstance(db, PostgresqlDatabase):
with open(os.path.join(os.path.dirname(__file__), "sql", "auditing.sql"), 'br') as input_file:
sql = readup_file(input_file)
db.commit()
with db.get_cursor() as cr:
cr.execute(sql)
| |
a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
else:
return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(next_input, t_magic_name, t_magic_arg_s)
def _tr_help(content):
"""Translate lines escaped with: ?
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '?')
def _tr_help2(content):
"""Translate lines escaped with: ??
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '??')
def _tr_magic(content):
"Translate lines escaped with a percent sign: %"
name, _, args = content.partition(' ')
return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
def _tr_quote(content):
"Translate lines escaped with a comma: ,"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, '", "'.join(args.split()) )
def _tr_quote2(content):
"Translate lines escaped with a semicolon: ;"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, args)
def _tr_paren(content):
"Translate lines escaped with a slash: /"
name, _, args = content.partition(' ')
return '%s(%s)' % (name, ", ".join(args.split()))
tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help2,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
class EscapedCommand(TokenTransformBase):
"""Transformer for escaped commands like %foo, !foo, or /foo"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first escaped command (%foo, !foo, etc.) in the cell.
"""
for line in tokens_by_line:
if not line:
continue
ix = 0
ll = len(line)
while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
if ix >= ll:
continue
if line[ix].string in ESCAPE_SINGLES:
return cls(line[ix].start)
def transform(self, lines):
"""Transform an escaped line found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
indent = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
line = assemble_continued_line(lines, (start_line, start_col), end_line)
if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
escape, content = line[:2], line[2:]
else:
escape, content = line[:1], line[1:]
if escape in tr:
call = tr[escape](content)
else:
call = ''
lines_before = lines[:start_line]
new_line = indent + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
class HelpEnd(TokenTransformBase):
"""Transformer for help syntax: obj? and obj??"""
# This needs to be higher priority (lower number) than EscapedCommand so
# that inspecting magics (%foo?) works.
priority = 5
def __init__(self, start, q_locn):
super().__init__(start)
self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
self.q_col = q_locn[1]
@classmethod
def find(cls, tokens_by_line):
"""Find the first help command (foo?) in the cell.
"""
for line in tokens_by_line:
# Last token is NEWLINE; look at last but one
if len(line) > 2 and line[-2].string == '?':
# Find the first token that's not INDENT/DEDENT
ix = 0
while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
return cls(line[ix].start, line[-2].start)
def transform(self, lines):
"""Transform a help command found by the ``find()`` classmethod.
"""
piece = ''.join(lines[self.start_line:self.q_line+1])
indent, content = piece[:self.start_col], piece[self.start_col:]
lines_before = lines[:self.start_line]
lines_after = lines[self.q_line + 1:]
m = _help_end_re.search(content)
if not m:
raise SyntaxError(content)
assert m is not None, content
target = m.group(1)
esc = m.group(3)
# If we're mid-command, put it back on the next prompt for the user.
next_input = None
if (not lines_before) and (not lines_after) \
and content.strip() != m.group(0):
next_input = content.rstrip('?\n')
call = _make_help_call(target, esc, next_input=next_input)
new_line = indent + call + '\n'
return lines_before + [new_line] + lines_after
def make_tokens_by_line(lines:List[str]):
"""Tokenize a series of lines and group tokens by line.
The tokens for a multiline Python string or expression are grouped as one
line. All lines except the last lines should keep their line ending ('\\n',
'\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
for example when passing block of text to this function.
"""
# NL tokens are used inside multiline expressions, but also after blank
# lines or comments. This is intentional - see https://bugs.python.org/issue17061
# We want to group the former case together but split the latter, so we
# track parentheses level, similar to the internals of tokenize.
NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
tokens_by_line = [[]]
if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
parenlev = 0
try:
for token in tokenize.generate_tokens(iter(lines).__next__):
tokens_by_line[-1].append(token)
if (token.type == NEWLINE) \
or ((token.type == NL) and (parenlev <= 0)):
tokens_by_line.append([])
elif token.string in {'(', '[', '{'}:
parenlev += 1
elif token.string in {')', ']', '}'}:
if parenlev > 0:
parenlev -= 1
except tokenize.TokenError:
# Input ended in a multiline string or expression. That's OK for us.
pass
if not tokens_by_line[-1]:
tokens_by_line.pop()
return tokens_by_line
def show_linewise_tokens(s: str):
"""For investigation and debugging"""
if not s.endswith('\n'):
s += '\n'
lines = s.splitlines(keepends=True)
for line in make_tokens_by_line(lines):
print("Line -------")
for tokinfo in line:
print(" ", tokinfo)
# Arbitrary limit to prevent getting stuck in infinite loops
TRANSFORM_LOOP_LIMIT = 500
class TransformerManager:
"""Applies various transformations to a cell or code block.
The key methods for external use are ``transform_cell()``
and ``check_complete()``.
"""
def __init__(self):
self.cleanup_transforms = [
leading_indent,
classic_prompt,
ipython_prompt,
]
self.line_transforms = [
cell_magic,
]
self.token_transformers = [
MagicAssign,
SystemAssign,
EscapedCommand,
HelpEnd,
]
def do_one_token_transform(self, lines):
"""Find and run the transform earliest in the code.
Returns (changed, lines).
This method is called repeatedly until changed is False, indicating
that all available transformations are complete.
The tokens following IPython special syntax might not be valid, so
the transformed code is retokenised every time to identify the next
piece of special syntax. Hopefully long code cells are mostly valid
Python, not using lots of IPython special syntax, so this shouldn't be
a performance issue.
"""
tokens_by_line = make_tokens_by_line(lines)
candidates = []
for transformer_cls in self.token_transformers:
transformer = transformer_cls.find(tokens_by_line)
if transformer:
candidates.append(transformer)
if not candidates:
# Nothing to transform
return False, lines
ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
for transformer in ordered_transformers:
try:
return True, transformer.transform(lines)
except SyntaxError:
pass
return False, lines
def do_token_transforms(self, lines):
for _ in range(TRANSFORM_LOOP_LIMIT):
changed, lines = self.do_one_token_transform(lines)
if not changed:
return lines
raise RuntimeError("Input transformation still changing after "
"%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
def transform_cell(self, cell: str) -> str:
"""Transforms a cell of input code"""
if not cell.endswith('\n'):
cell += '\n' # Ensure the cell has a trailing newline
lines = cell.splitlines(keepends=True)
for transform in self.cleanup_transforms + self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
return ''.join(lines)
def check_complete(self, cell: str):
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
# Remember if the lines ends in a new line.
ends_with_newline = False
for character in reversed(cell):
if character == '\n':
ends_with_newline = True
break
elif character.strip():
break
else:
continue
if not ends_with_newline:
# Append an newline for consistent tokenization
# See https://bugs.python.org/issue33899
cell += '\n'
lines = cell.splitlines(keepends=True)
if not lines:
return 'complete', None
if lines[-1].endswith('\\'):
# Explicit backslash continuation
return 'incomplete', find_last_indent(lines)
try:
for transform in self.cleanup_transforms:
lines = transform(lines)
except SyntaxError:
return 'invalid', None
if lines[0].startswith('%%'):
# Special case for cell magics - completion marked by blank line
if lines[-1].strip():
return 'incomplete', find_last_indent(lines)
else:
return 'complete', None
try:
for transform in self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
except SyntaxError:
return 'invalid', None
tokens_by_line = make_tokens_by_line(lines)
if not | |
from MIDIInput import *
from time import sleep
from random import *
class Urlinie_Old:
def __init__(self):
self.weight_1 = 0
self.weight_3 = 0
self.weight_5 = 0
self.weight_other = 0
self.weight_length = 3
self.slope = 10.0
self.variance = 0.0
self.loose_target_note = 64 # MIDI number
self.timeSpacing = 4
self.timeVariability = 0
self.weight_beat1 = 95
self.weight_otherDownbeat = 0
self.weight_offBeat = 0
self.MI = None # placeholder for MIDI Input reference
self.LS = None # placeholder for Lead Sheet reference
self.NS = None # placeholder for NoteSender reference
self.current_urlinie = []
def setMIDIInputReference(self, reference):
self.MI = reference
def setLeadSheetReference(self, reference):
self.LS = reference
def setNoteSenderReference(self, reference):
self.NS = reference
def newBar(self, barNo):
self.NS.updateBar(barNo)
if len(self.current_urlinie) > 0: # there is at least one note left in the current Urlinie
self.NS.sendTempo(self.MI.getTempo())
#self.NS.sendNoteEvent(self.current_urlinie.pop(0)) # remove the first list element and send it as a MIDI number
self.NS.noteOn(self.current_urlinie.pop(0), self.LS.getTickCount()) # remove the first list element and send it as a MIDI number
if len(self.current_urlinie) == 0:
self.current_urlinie = self.newUrlinieWithDegreeTarget(0)
def lastNoteFromScale(self, scale): # scale must be an ordered list of MIDI notes; first, third and fifth notes will be assumed important
targetType = self.getLastNote()
pitch_target = 0
if len(scale) >= 5:
if targetType in [1, 3, 5]:
pitch_target = scale[targetType-1] # return scale index 0, 2 or 4 respectively
else:
del scale[4] # remove the fifth
del scale[2] # remove the third
del scale[0] # remove the root
pitch_target = choice(scale) # now choose from the remaining notes
return pitch_target
def newUrlinieWithDegreeTargetAndTiming(self, pitch_target, beats_to_target, beats_per_bar, bar_beat_of_target, root_of_target, type_of_target): # pitch target should be a MIDI number (can be in any octave)
note_list = []
bars_to_next_I = 0
beat_of_next_I = 0
root_of_next_I = -1
type_of_next_I = ""
pitch_target = 0
reference_scale = []
if self.LS is not None:
bars_to_next_I, beat_of_next_I = self.LS.BeatsToNextI()
root_of_next_I, type_of_next_I = self.LS.RootAndTypeOfNextI()
if root_of_next_I != -1: # we DO have an upcoming I in the lead sheet
reference_scale = self.LS.getScale(self.LS.selectScale(root_of_next_I, type_of_next_I, root_of_next_I, type_of_next_I)) # this returns pitch interval series of I scale
target_type = self.getLastNote() # returns 1, 3, 5 or 0 (0 means "anything else")
print "target type = "+str(target_type)
print "scale = "+str(reference_scale)+", root = "+str(root_of_next_I)
if target_type == 1:
pitch_target = reference_scale[0]+root_of_next_I # first scale degree, shifted to align with the root
elif target_type == 3:
pitch_target = reference_scale[2]+root_of_next_I
elif target_type == 5:
pitch_target = reference_scale[4]+root_of_next_I
elif target_type == 0:
stripped_reference_scale = [] # create a new reference scale, stripped of the 1st, 3rd and 5th elements
for n, note in enumerate(reference_scale):
if n not in [0, 2, 4]:
stripped_reference_scale.append(note)
pitch_target = choice(stripped_reference_scale)+root_of_next_I
else:
pitch_target = choice(reference_scale)+root_of_next_I
print "pitch target = "+str(pitch_target)
notepool = self.allOctaves(pitch_target) # sets out the same pitch class across all octaves
target = self.pickClosest(notepool, self.getLooseTargetNote())
length = self.getLength()
if bars_to_next_I != 0:
length = bars_to_next_I+1 # temporary arrangement for testing arrival on the I - this overrides the length selection
else:
length = 0
if length > 0:
last_note = target
variance = self.getVariance()
if length > 1:
slope_per_slot = self.getSlope()/(length-1)
for slot in range(length):
this_note = last_note+(length-1-slot)*slope_per_slot
variation = (random()-0.5)*variance
note_list.append(this_note+variation)
elif length == 1:
variation = (random()-0.5)*variance
note_list.append(last_note+variation)
print "beat affinity would be: "+str(self.getBeatType())
print "UL Generated Note List "+str(note_list)
return note_list
def newUrlinieWithDegreeTarget(self, pitch_target): # pitch target should be a MIDI number (can be in any octave)
note_list = []
bars_to_next_I = 0
beat_of_next_I = 0
root_of_next_I = -1
type_of_next_I = ""
pitch_target = 0
reference_scale = []
if self.LS is not None:
bars_to_next_I, beat_of_next_I = self.LS.BeatsToNextI()
root_of_next_I, type_of_next_I = self.LS.RootAndTypeOfNextI()
if root_of_next_I != -1: # we DO have an upcoming I in the lead sheet
reference_scale = self.LS.getScale(self.LS.selectScale(root_of_next_I, type_of_next_I, root_of_next_I, type_of_next_I)) # this returns pitch interval series of I scale
target_type = self.getLastNote() # returns 1, 3, 5 or 0 (0 means "anything else")
print "target type = "+str(target_type)
print "scale = "+str(reference_scale)+", root = "+str(root_of_next_I)
if target_type == 1:
pitch_target = reference_scale[0]+root_of_next_I # first scale degree, shifted to align with the root
elif target_type == 3:
pitch_target = reference_scale[2]+root_of_next_I
elif target_type == 5:
pitch_target = reference_scale[4]+root_of_next_I
elif target_type == 0:
stripped_reference_scale = [] # create a new reference scale, stripped of the 1st, 3rd and 5th elements
for n, note in enumerate(reference_scale):
if n not in [0, 2, 4]:
stripped_reference_scale.append(note)
pitch_target = choice(stripped_reference_scale)+root_of_next_I
else:
pitch_target = choice(reference_scale)+root_of_next_I
print "pitch target = "+str(pitch_target)
notepool = self.allOctaves(pitch_target) # sets out the same pitch class across all octaves
target = self.pickClosest(notepool, self.getLooseTargetNote())
length = self.getLength()
if bars_to_next_I != 0:
length = bars_to_next_I+1 # temporary arrangement for testing arrival on the I - this overrides the length selection
else:
length = 0
if length > 0:
last_note = target
variance = self.getVariance()
if length > 1:
slope_per_slot = self.getSlope()/(length-1)
for slot in range(length):
this_note = last_note+(length-1-slot)*slope_per_slot
variation = (random()-0.5)*variance
note_list.append(this_note+variation)
elif length == 1:
variation = (random()-0.5)*variance
note_list.append(last_note+variation)
print "beat affinity would be: "+str(self.getBeatType())
print "UL Generated Note List "+str(note_list)
return note_list
def newUrlinie(self):
length = self.getLength()
note_list = []
if length > 0:
last_note = self.getLooseTargetNote()
variance = self.getVariance()
if length > 1:
slope_per_slot = self.getSlope()/(length-1)
for slot in range(length):
this_note = last_note+(length-1-slot)*slope_per_slot
variation = (random()-0.5)*variance
note_list.append(this_note+variation)
elif length == 1:
variation = (random()-0.5)*variance
note_list.append(last_note+variation)
return note_list
def adjustLastNoteWeights(self, slider_value):
# assume slider_value is an integer 0-127
# set weight 1:
if slider_value <= 95:
self.weight_1 = 95 - slider_value
else:
self.weight_1 = 0
# set weights 3 and 5:
if slider_value > 31:
self.weight_3 = 64 - (slider_value/2)
else:
self.weight_3 = 3*slider_value/2
self.weight_5 = self.weight_3
# set weight other:
if slider_value >= 32:
self.weight_other = slider_value - 32
else:
self.weight_other = 0
def getLastNote(self):
choice_index = self.weighted_choice([self.weight_1, self.weight_3, self.weight_5, self.weight_other])
if choice_index is None:
choice_index = 0
#print str(choice_index)
return [1, 3, 5, 0][choice_index]
def setLooseTargetNote(self, value):
self.loose_target_note = value
def getLooseTargetNote(self):
return self.loose_target_note
def adjustUrlinieLengthWeight(self, value):
# assume value is an integer 0-127
# we want length to be in the range 0 to 12 in proportion
self.weight_length = int(value/10)
def getLength(self):
return self.weight_length
def adjustUrlinieSlope(self, value):
# 0-127 corresponds to the range -31.5 to +32.0
self.slope = float(value*0.5-31.5)
def getSlope(self):
return self.slope
def adjustUrlinieVariance(self, value):
# 0-127 corresponds to the range 0.0 to +63.5
self.variance = value*0.5
def getVariance(self):
return self.variance
def adjustBeatAffinity(self, value): # use a weighting system similar to last note
# set weight of falling on beat 1:
if value <= 95:
self.weight_beat1 = 95 - value
else:
self.weight_beat1 = 0
# set weight of falling on another downbeat:
if value > 31:
self.weight_otherDownbeat = 95 - (4*value)/3
else:
self.weight_otherDownbeat = int(2.3*float(value))
# set weight of falling an an offbeat:
if value >= 32:
self.weight_offBeat = value - 32
else:
self.weight_offBeat = 0
def getBeatType(self):
choice_index = self.weighted_choice([self.weight_beat1, self.weight_otherDownbeat, self.weight_offBeat])
if choice_index is None:
choice_index = 0
#print str(choice_index)
return [1, 3, 0][choice_index] # 1=first beat, 3=other downbeat, 0=offbeat
def adjustTimeSpacing(self, value):
# 0-127 input range giving spacings from 1 to 32 beats
self.timeSpacing = int(value/4)+1
def adjustTimeVariability(self, value):
# 0-127 input range giving variabilities from 0 to 7
self.timeVariability = int(value/16)
def weighted_choice(self, weights):
# thanks to <NAME>'s Website
totals = []
running_total = 0
for w in weights:
running_total += w
totals.append(running_total)
rnd = random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return i
def allOctaves(self, MidiNo): # Takes a single MIDI number input and returns a list of that note in all octaves
octaves = [element*12 for element in range(11)] # produce list [0, 12, 24 ... 120]
pc = int(MidiNo)%12 # pitch class
result = []
for octave in octaves:
result.append(octave+pc) # this is the pitch class number plus the octave number
return result
def pickClosest(self, notepool, target): # returns the nearest note in the notepool to the target
# check how far the potential notes are from the target
differences = | |
<gh_stars>1-10
from Nanovor import Nanovor
from Attack import Attack
from Player import Player
#Nanovor: (self, name, health, armor, speed, strength, sv, family_class, attacks:list)
#Attacks: (self, name, cost:int, description, damage=[False], hack=[False], override=[False], combo=[False], consumes=False, armorpiercing=False)
#ATTACKS
electro_shock = Attack("Electroshock", 1, "A ranged electric damage attack", damage=[True, [30]])
armor_up = Attack("Armor Up", 3, "This Nanovor places a +5 Armor Override", override = [True, {"ARM":5}])
red_spike = Attack("Red Spike", 1, "This Nanovor places an Override that allows your swarm to make a Red Spiked attack.", override = [True, {"SPIKE":"Red"}])
powerball = Attack("Powerball", 2, "A damage attack; with a Red Spike override, the attack deals double damage.", damage = [True, [35]], combo = [True, {"DMGDOUBLE":2}], consumes = True)
obliterate = Attack("Obliterate", 2, "A ranged energy attack that erases the enemy's override.", hack=[True, {"OBLIT":0}])
get_tough = Attack("Get Tough", 5, "This Nanovor places a +10 Armor Override.", override = [True, {"ARM":10}])
energy_blast = Attack("Energyblast", 2, "A ranged energy damage attack; with a Red Spike, more damage that ignores Armor.", damage = [True, [40]], combo = [True, {"DMGSET":50, "PIERCE": "ALL"}], consumes = True)
pod_power = Attack("Pod Power", 4, "This Nanovor places a +15 Strength Override", override = [True, {"STR":15}])
spin_slash = Attack("Spin Slash", 3, "A damage attack", damage = [True, [50]])
gore = Attack("Gore", 1, "A damage attack", damage = [True, [30]])
dig_in = Attack("Dig In", 3,"This Nanovor places a +10 Strength Override", override = [True, {"STR": 10}])
bull_zap = Attack("Bull Zap", 3, "A ranged electric damage attack", damage = [True, [50]])
arcing_gore = Attack("Arcing Gore", 2, "An electric damage attack.", damage = [True, [40]])
bulk_up = Attack("Bulk Up", 4, "This Nanovor places a +15 Strength Override", override = [True, {"STR":15}])
bull_blast = Attack("Bull Blast", 3, "A ranged electric damage attack.", damage = [True, [50]])
crushing_wall = Attack("Crushing Wall", 1, "A damage attack; with a Red Spike, it swap-blocks the opponent for this and the next 2 rounds.", damage = [True, [30]], combo = [True, {"SWAP": 3}], consumes = True)
firewall = Attack("Firewall", 5, "This Nanovor places a +10 Armor Override", override = [True, {"ARM":10}])
hackslash = Attack("Hackslash", 3, "A damage attack", damage = [True, [60]])
tremor = Attack("Tremor", 1, "Ranged energy attack in which the target loses 25 Speed.", hack = [True, {"SPD": 25}])
tank_gore = Attack("Tank Gore", 3, "A headbutt charge that deals 53 Damage" , damage = [True, [50]])
headbutt = Attack("Headbutt", 1, "A damage attack.", damage = [True, [30]])
tank_smash = Attack("Tank Smash", 3, "An attack that causes the target to lose 40 Speed.", hack = [True, {"SPD": 40}])
ion_gore = Attack("Ion Gore", 4, "An electric damage attack.", damage = [True, [60]])
ion_gouge = Attack("Ion Gouge", 2, "An electric attack; with a Red Spike override, there is a second electric attack that ignores Armor.", damage = [True,[40]], combo = [True, {"PIERCE": {"PART":20}}], consumes = True)
atom_smasher = Attack("Atom Smasher", 5, "An electric damage attack that swap-blocks the target for this and the next round.", damage = [True, [60]], hack = [True, {"SWAP": 2}])
slam = Attack("Slam", 2, "A damage attack; with a Red Spike override, the damage ignores Armor.", damage = [True, [40]], combo = [True, {"PIERCE":"ALL"}], consumes = True)
defense = Attack("Defense", 5, "This Nanovor places a +10 Armor Override", override = [True, {"ARM": 10}])
agony = Attack("Agony", 4, "A ranged psychic damage attack.", damage = [True, [60]])
maim = Attack("Maim", 3, "A damage attack.", damage = [True, [50]])
shield = Attack("Shield", 6, "This Nanovor places a +15 Armor Override.", override = [True, {"ARM": 15}])
scorch = Attack("Scorch", 7, "A ranged energy damage attack", damage = [True, [90]])
hit_and_run = Attack("Hit and Run", 2, "A damage attack.", damage = [True, [40]])
zip_zap = Attack("Zip Zap", 3, "A ranged energy attack that also causes Gamma Stalker 1.0 to lose 10 Strength.", damage = [True, [50]], hack = [True, {"DECSELFSTR":10}])
jump_jab = Attack("Jump Jab", 2, "A damage attack.", damage = [True, [40]])
speed_boost = Attack("Speed Boost", 2, "This Nanovor places a +25 Speed Override", override = [True, {"SPD": 25}])
phase_fang = Attack("Phase Fang", 3, "A damage attack; with the Red Spike override, it swap-blocks the target Nanovor for this and the next 2 rounds.", damage = [True, [50]], combo = [True, {"SWAP": 3}], consumes = True)
gamma_zap = Attack("Gamma Zap", 3, "A damage attack that also causes the target to lose 30 Speed", damage = [True, [30]], hack = [True, {"SPD":30}])
spitfire = Attack("Spitfire", 2, "A damage attack; with a Red Spike override, gain a +25 Speed Override.", damage = [True, [35]], combo = [True, {"SETNEW": {"SPD":25}}])
spin_up = Attack("Spin Up", 3, "A damage attack and your Nanovor gains 10 Speed", damage = [True, [45]], hack = [True, {"INCSELFSPD":10}])
battering_ram = Attack("Battering Ram", 2, "A damage attack.", damage = [True, [40]])
thunder_flash = Attack("Thunder Flash", 3, "A ranged energy damage attack that also causes your current Nanovor to lose 10 Speed.", damage = [True, [60]], hack = [True, {"DECSELFSPD":10}])
power_amp = Attack("Power Amp", 4, "This Nanovor places a +15 STR Override", override = [True, {"STR":15}])
two_fist_hit = Attack("Two-Fist Hit", 2, "A damage attack; with a Red Spike override, the target takes double damage.", damage = [True, [40]], combo = [True, {"DMGDOUBLE": 2}], consumes = True)
mentallica = Attack("Mentallica", 4, "A ranged psychic attack in which the target Nanovor loses 50 Speed.", hack = [True, {"SPD": 50}])
gamma_power = Attack("Gamma Power", 5, "This Nanovor places a +20 Strength Override", override = [True, {"STR":20}])
killer_loogie = Attack("Killer Loogie", 2, "Ranged acid attack", damage = [True, [40]])
poison_pinch = Attack("Poison Pinch", 2, "Poison attack that swap-blocks the target for this and the next 2 rounds.", hack = [True, {"SWAP":3}])
mega_blast = Attack("Mega Blast", 3, " ranged energy damage attack; with the Red Spike override, it swap-blocks the target Nanovor for this and the next 3 rounds.", damage = [True, [50]], combo = [True, {"SWAP":4}], consumes = True)
dazzle = Attack("Dazzle", 2, "A ranged antimatter attack that ignores Armor.", damage = [True, [30]], armorpiercing = True)
acid_sting = Attack("Acid Sting", 4, "A ranged acid damage attack.", damage = [True, [60]])
atomic_spit = Attack("Atomic Spit", 3, "A ranged acid damage attack that also causes the target to lose 5 Armor.", damage = [True, [30]], hack = [True, {"ARM":5}])
psychic_sight = Attack("Psychic Sight", 3, "This Nanovor places a Dodge Override", override = [True, {"DODGE":20}])
cosmic_crush = Attack("Cosmic Crush", 4, "A ranged energy damage attack; your Nanovor gains 25 Speed and loses 10 Strength.", damage = [True, [70]], hack = [True, {"INCSELFSPD":25, "DECSELFSTR":10}])
jumpshot = Attack("Jumpshot", 3, "A damage attack that ignores Armor; your Nanovor gains 10 Strength and 20 Speed.", damage = [True, [30]], hack = [True, {"INCSELFSTR":10, "INCSELFSPD":20}], armorpiercing=True)
big_power_up = Attack("Big Power-Up", 4, "This Nanovor places an Override for Magnamods that allow them +1 Energy", override = [True, {"EN-MAG":1}])
flamethrower = Attack("Flamethrower", 5, "A ranged fire damage attack.", damage = [True, [70]])
head_whip = Attack("Head Whip", 1, "A damage attack.", [True,[30]])
electro_lite = Attack("Electro-lite", 2, "A ranged electric attack in which the target loses 10 Strength.", hack=[True, {"STR":10}])
yellow_spike = Attack("Yellow Spike", 1, "This Nanovor places an Override that allows your swarm to make a Yellow Spiked attack.", override = [True, {"SPIKE":"Yellow"}])
zeus_zap = Attack("Zeus Zap", 3, "A ranged electric damage attack that also swap-blocks the target swarm for this and the next round.", damage = [True, [50]], hack = [True, {"SWAP":2}])
plasma_slam = Attack("Plasma Slam", 2, "A damage attack; with a Yellow Spike override, the damage ignores Armor.", damage = [True, [40]], combo = [True, {"PIERCE":"ALL"}], consumes = True)
solid_strike = Attack("Solid Strike", 4, "A ranged electric attack that either inflicts 100 damage that ignores Armor, or no damage", special_condition = [True, {"CHANCE-DMG-50": {"PIERCE":100}}])
plasma_blast = Attack("Plasma Blast", 3, "A ranged electric attack that takes away 15 Strength; with a Yellow Spike override, the target Nanovor takes damage.", hack = [True, {"STR":15}], combo = [True, {"DMGSET":50}], consumes = True)
arc_blast = Attack("Arc Blast", 4, "A ranged electric damage attack.", damage = [True, [60]])
blaster = Attack("Blaster", 3, "A ranged electric damage attack.", damage = [True, [50]])
locust_whip = Attack("Locust Whip", 2, "A damage attack that also causes the attacker to lose 15 Strength.", damage = [True, [60]], hack = [True, {"DECSELFSTR":15}])
plasma_zap = Attack("Plasma Zap", 3, "A damage attack; with | |
<filename>spark_auto_mapper_fhir/resources/document_reference.py
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
# noinspection PyPackageRequirements
from pyspark.sql.types import StructType, DataType
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.complex_types.meta import Meta
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.id import FhirId
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.base_types.fhir_resource_base import FhirResourceBase
from spark_fhir_schemas.r4.resources.documentreference import DocumentReferenceSchema
if TYPE_CHECKING:
pass
# id_ (id)
# meta (Meta)
# implicitRules (uri)
# language (CommonLanguages)
from spark_auto_mapper_fhir.value_sets.common_languages import CommonLanguagesCode
# text (Narrative)
from spark_auto_mapper_fhir.complex_types.narrative import Narrative
# contained (ResourceContainer)
from spark_auto_mapper_fhir.complex_types.resource_container import (
ResourceContainer,
)
# extension (Extension)
# modifierExtension (Extension)
# masterIdentifier (Identifier)
from spark_auto_mapper_fhir.complex_types.identifier import Identifier
# identifier (Identifier)
# status (DocumentReferenceStatus)
from spark_auto_mapper_fhir.value_sets.document_reference_status import (
DocumentReferenceStatusCode,
)
# docStatus (CompositionStatus)
from spark_auto_mapper_fhir.value_sets.composition_status import (
CompositionStatusCode,
)
# type_ (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# Import for CodeableConcept for type_
from spark_auto_mapper_fhir.value_sets.document_type_value_set import (
DocumentTypeValueSetCode,
)
# End Import for CodeableConcept for type_
# category (CodeableConcept)
# Import for CodeableConcept for category
from spark_auto_mapper_fhir.value_sets.document_class_value_set import (
DocumentClassValueSetCode,
)
# End Import for CodeableConcept for category
# subject (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for subject
from spark_auto_mapper_fhir.resources.patient import Patient
from spark_auto_mapper_fhir.resources.practitioner import Practitioner
from spark_auto_mapper_fhir.resources.group import Group
from spark_auto_mapper_fhir.resources.device import Device
# date (instant)
from spark_auto_mapper_fhir.fhir_types.instant import FhirInstant
# author (Reference)
# Imports for References for author
from spark_auto_mapper_fhir.resources.practitioner_role import PractitionerRole
from spark_auto_mapper_fhir.resources.organization import Organization
from spark_auto_mapper_fhir.resources.related_person import RelatedPerson
# authenticator (Reference)
# Imports for References for authenticator
# custodian (Reference)
# Imports for References for custodian
# relatesTo (DocumentReference.RelatesTo)
from spark_auto_mapper_fhir.backbone_elements.document_reference_relates_to import (
DocumentReferenceRelatesTo,
)
# description (string)
# securityLabel (CodeableConcept)
# Import for CodeableConcept for securityLabel
from spark_auto_mapper_fhir.value_sets.all_security_labels import (
AllSecurityLabelsCode,
)
# End Import for CodeableConcept for securityLabel
# content (DocumentReference.Content)
from spark_auto_mapper_fhir.backbone_elements.document_reference_content import (
DocumentReferenceContent,
)
# context (DocumentReference.Context)
from spark_auto_mapper_fhir.backbone_elements.document_reference_context import (
DocumentReferenceContext,
)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class DocumentReference(FhirResourceBase):
"""
DocumentReference
documentreference.xsd
A reference to a document of any kind for any purpose. Provides metadata about
the document so that the document can be discovered and managed. The scope of
a document is any seralized object with a mime-type, so includes formal
patient centric documents (CDA), cliical notes, scanned paper, and non-patient
specific documents like policy text.
If the element is present, it must have either a @value, an @id, or extensions
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirId] = None,
meta: Optional[Meta] = None,
implicitRules: Optional[FhirUri] = None,
language: Optional[CommonLanguagesCode] = None,
text: Optional[Narrative] = None,
contained: Optional[FhirList[ResourceContainer]] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
masterIdentifier: Optional[Identifier] = None,
identifier: Optional[FhirList[Identifier]] = None,
status: DocumentReferenceStatusCode,
docStatus: Optional[CompositionStatusCode] = None,
type_: Optional[CodeableConcept[DocumentTypeValueSetCode]] = None,
category: Optional[FhirList[CodeableConcept[DocumentClassValueSetCode]]] = None,
subject: Optional[
Reference[Union[Patient, Practitioner, Group, Device]]
] = None,
date: Optional[FhirInstant] = None,
author: Optional[
FhirList[
Reference[
Union[
Practitioner,
PractitionerRole,
Organization,
Device,
Patient,
RelatedPerson,
]
]
]
] = None,
authenticator: Optional[
Reference[Union[Practitioner, PractitionerRole, Organization]]
] = None,
custodian: Optional[Reference[Organization]] = None,
relatesTo: Optional[FhirList[DocumentReferenceRelatesTo]] = None,
description: Optional[FhirString] = None,
securityLabel: Optional[
FhirList[CodeableConcept[AllSecurityLabelsCode]]
] = None,
content: FhirList[DocumentReferenceContent],
context: Optional[DocumentReferenceContext] = None,
) -> None:
"""
A reference to a document of any kind for any purpose. Provides metadata about
the document so that the document can be discovered and managed. The scope of
a document is any seralized object with a mime-type, so includes formal
patient centric documents (CDA), cliical notes, scanned paper, and non-patient
specific documents like policy text.
If the element is present, it must have either a @value, an @id, or extensions
:param id_: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
:param meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
:param implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
:param language: The base language in which the resource is written.
:param text: A human-readable narrative that contains a summary of the resource and can be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
:param contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
:param extension: May be used to represent additional information that is not part of the basic
definition of the resource. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the resource and that modifies the understanding of the element
that contains it and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer is allowed to define an extension, there is a set of requirements
that SHALL be met as part of the definition of the extension. Applications
processing a resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param masterIdentifier: Document identifier as assigned by the source of the document. This identifier
is specific to this version of the document. This unique identifier may be
used elsewhere to identify this version of the document.
:param identifier: Other identifiers associated with the document, including version independent
identifiers.
:param status: The status of this document reference.
:param docStatus: The status of the underlying document.
:param type_: Specifies the particular kind of document referenced (e.g. History and
Physical, Discharge Summary, Progress Note). This usually equates to the
purpose of making the document referenced.
:param category: A categorization for the type of document referenced - helps for indexing and
searching. This may be implied by or derived from the code specified in the
DocumentReference.type.
:param subject: Who or what the document is about. The document can be about a person,
(patient or healthcare practitioner), a device (e.g. a machine) or even a
group of subjects (such as a document about a herd of farm animals, or a set
of patients that share a common exposure).
:param date: When the document reference was created.
:param author: Identifies who is responsible for adding the information to the document.
:param authenticator: Which person or organization authenticates that this document is valid.
:param custodian: Identifies the organization or group who is responsible for ongoing
maintenance of and access to the document.
:param relatesTo: Relationships that this document has with other document references that
already exist.
:param description: Human-readable description of the source document.
:param securityLabel: A set of Security-Tag codes specifying the level of privacy/security of the
Document. Note that DocumentReference.meta.security contains the security
labels of the "reference" to the document, while
DocumentReference.securityLabel contains a snapshot of the security labels on
the document the reference refers to.
:param content: The document and format referenced. There may be | |
import base64
import re
from collections import Counter
from functools import reduce
import pandas as pd
from neanno.utils.dict import merge_dict_sum_numbers
from neanno.utils.list import ensure_items_within_set, get_set_of_list_and_keep_sequence
ANNOTATION_TYPES = [
"standalone_key_term",
"parented_key_term",
"standalone_named_entity",
"parented_named_entity",
]
TINY_TO_LONG_ANNOTATION_TYPE_MAPPING = {
"SK": "standalone_key_term",
"PK": "parented_key_term",
"SN": "standalone_named_entity",
"PN": "parented_named_entity",
}
ANNOTATION_REGEX = re.compile(
r"""(?xs)
`
(?P<term>[^`]*?)
``
(?P<type_tiny>(
(?P<standalone_key_term>SK)
| (?P<parented_key_term>PK)
| (?P<standalone_named_entity>SN)
| (?P<parented_named_entity>PN))
)
(?(parented_key_term)``(?P<parent_terms_pk>.*?))
(?(standalone_named_entity)``(?P<entity_code_sn>.*?))
(?(parented_named_entity)``(?P<entity_code_pn>.*?)``(?P<parent_terms_pn>.*?))
`´
"""
)
def extract_annotations_as_generator(
annotated_text, types_to_extract=None, entity_codes_to_extract=None
):
""" Yields all annotations from an annotated text as a list."""
def extract_normalized_parent_terms(parent_terms):
result = []
for parent_term in ((parent_terms or "").strip()).split(","):
parent_term = parent_term.strip()
if parent_term and parent_term not in result:
result.append(parent_term)
return ", ".join(result)
# ensure that types_to_extract has valid entries
ensure_items_within_set(types_to_extract, ANNOTATION_TYPES, True)
result = []
for match in re.finditer(ANNOTATION_REGEX, annotated_text):
# assemble annotation and do the filtering in parallel
annotation = {}
annotation["term"] = match.group("term")
annotation["type"] = TINY_TO_LONG_ANNOTATION_TYPE_MAPPING.get(
match.group("type_tiny")
)
if types_to_extract is not None and annotation["type"] not in types_to_extract:
continue
if annotation["type"] == "standalone_named_entity":
annotation["entity_code"] = match.group("entity_code_sn")
if (
entity_codes_to_extract is not None
and annotation["entity_code"] not in entity_codes_to_extract
):
continue
if annotation["type"] == "parented_named_entity":
annotation["entity_code"] = match.group("entity_code_pn")
if (
entity_codes_to_extract is not None
and annotation["entity_code"] not in entity_codes_to_extract
):
continue
annotation["parent_terms_raw"] = match.group("parent_terms_pn")
annotation["parent_terms"] = extract_normalized_parent_terms(
annotation["parent_terms_raw"]
)
if annotation["type"] == "parented_key_term":
annotation["parent_terms_raw"] = match.group("parent_terms_pk")
annotation["parent_terms"] = extract_normalized_parent_terms(
annotation["parent_terms_raw"]
)
annotation["start_net"] = len(
remove_all_annotations_from_text(annotated_text[: match.start()])
)
annotation["end_net"] = annotation["start_net"] + len(
remove_all_annotations_from_text(
annotated_text[match.start() : match.end()]
)
)
annotation["start_gross"] = match.start()
annotation["end_gross"] = match.end()
# yield annotation
yield annotation
def extract_annotations_as_list(
annotated_text, types_to_extract=None, entity_codes_to_extract=None
):
""" Extracts a list of annotations in the specified text."""
return [
annotation
for annotation in extract_annotations_as_generator(
annotated_text,
types_to_extract=types_to_extract,
entity_codes_to_extract=entity_codes_to_extract,
)
]
def extract_annotations_as_text(
annotated_text,
external_annotations_to_add=[],
entity_codes_to_extract=None,
include_entity_codes=True,
):
"""Extracts all annotations from the specified text and returns a string describing the set of contained annotations."""
result_list = []
for annotation in extract_annotations_as_generator(
annotated_text, entity_codes_to_extract=entity_codes_to_extract
):
# standalone key term
if annotation["type"] == "standalone_key_term":
annotation_to_add = annotation["term"]
if annotation_to_add.lower() not in [
annotation.lower() for annotation in result_list
] and annotation_to_add.lower() not in [
annotation.lower() for annotation in external_annotations_to_add
]:
result_list.append(annotation_to_add)
# parented key term
if annotation["type"] == "parented_key_term":
parent_terms = []
for parent_term in get_set_of_list_and_keep_sequence(
annotation["parent_terms"].split(", ")
):
annotation_to_add = parent_term
if annotation_to_add.lower() not in [
annotation.lower() for annotation in result_list
] and annotation_to_add.lower() not in [
annotation.lower() for annotation in external_annotations_to_add
]:
parent_terms.append(annotation_to_add)
result_list.extend(sorted(parent_terms))
# standalone named entity
if annotation["type"] == "standalone_named_entity":
annotation_to_add = (
"{}:{}".format(annotation["entity_code"].lower(), annotation["term"])
if include_entity_codes
else annotation["term"]
)
if annotation_to_add.lower() not in [
annotation.lower() for annotation in result_list
] and annotation_to_add.lower() not in [
annotation.lower() for annotation in external_annotations_to_add
]:
result_list.append(annotation_to_add)
# parented named entity
if annotation["type"] == "parented_named_entity":
for parent_term in get_set_of_list_and_keep_sequence(
annotation["parent_terms"].split(", ")
):
annotation_to_add = (
"{}:{}".format(annotation["entity_code"].lower(), parent_term)
if include_entity_codes
else parent_term
)
if annotation_to_add.lower() not in [
annotation.lower() for annotation in result_list
] and annotation_to_add.lower() not in [
annotation.lower() for annotation in external_annotations_to_add
]:
result_list.append(annotation_to_add)
# external annotations
result_list.extend(external_annotations_to_add)
# return result
return ", ".join(result_list)
def extract_annotations_by_type(
annotated_text,
types_to_extract=None,
entity_codes_to_extract=None,
list_aliases={
"standalone_key_terms": "standalone_key_terms",
"parented_key_terms": "parented_key_terms",
"standalone_named_entities": "standalone_named_entities",
"parented_named_entities": "parented_named_entities",
},
):
""" Returns all annotations and their position ranges from an annotated text."""
# get plain text without annotations
plain_text = remove_all_annotations_from_text(annotated_text)
# get the annotations dictionary
annotations = {}
# standalone key terms
if "standalone_key_term" in types_to_extract:
annotations[list_aliases["standalone_key_terms"]] = extract_annotations_as_list(
annotated_text, types_to_extract=["standalone_key_term"]
)
# parented key terms
if "parented_key_term" in types_to_extract:
annotations[list_aliases["parented_key_terms"]] = extract_annotations_as_list(
annotated_text, types_to_extract=["parented_key_term"]
)
# standalone named entities
if "standalone_named_entity" in types_to_extract:
annotations[
list_aliases["standalone_named_entities"]
] = extract_annotations_as_list(
annotated_text, types_to_extract=["standalone_named_entity"]
)
# parented named entities
if "parented_named_entity" in types_to_extract:
annotations[
list_aliases["parented_named_entities"]
] = extract_annotations_as_list(
annotated_text, types_to_extract=["parented_named_entity"]
)
# return result
return (plain_text, annotations)
def extract_entity_codes_from_annotated_texts_column(annotated_texts_column):
""" Extracts the set of all entity codes that appear in the texts of the specified column (pandas series)."""
result = []
for (index, annotated_text) in annotated_texts_column.iteritems():
for annotation in extract_annotations_as_generator(
annotated_text,
types_to_extract=["standalone_named_entity", "parented_named_entity"],
):
if annotation["entity_code"] not in result:
result.append(annotation["entity_code"])
result.sort()
return result
def extract_categories_from_categories_column(categories_column):
""" Extracts the set of all categories that appear in the specified categories column (pandas series)."""
result = []
for (index, categories_column_text) in categories_column.iteritems():
for category in categories_column_text.split("|"):
if category not in result:
result.append(category)
result.sort()
return result
def extract_annotations_for_spacy_ner(annotated_text, entity_codes_to_extract=None):
""" Returns a tuple which for the specified text that can be used to train a named entity recognition (NER) with spacy."""
# get plain text without annotations
plain_text = remove_all_annotations_from_text(annotated_text)
# get the annotations dictionary
annotations = []
for annotation in extract_annotations_as_generator(
annotated_text,
types_to_extract=["standalone_named_entity", "parented_named_entity"],
entity_codes_to_extract=entity_codes_to_extract,
):
annotations.append(
(annotation["start_net"], annotation["end_net"], annotation["entity_code"])
)
# return result
return (plain_text, {"entities": annotations})
def get_annotation_at_position(annotated_text, position):
""" Gets the annotation which is at the specified position. Returns None if that position is not an annotation."""
result = None
for annotation in extract_annotations_as_generator(annotated_text):
if not (annotation["start_gross"] < position < annotation["end_gross"]):
continue
else:
return annotation
def has_annotation_within_range(annotated_text, start_position, end_position):
""" Checks if the specified range overlaps with an annotation. """
for annotation in extract_annotations_as_generator(annotated_text):
if not (
(
start_position < annotation["start_gross"]
and end_position < annotation["start_gross"]
)
or (
start_position > annotation["start_gross"]
and start_position > annotation["end_gross"]
)
):
# there is an overlap => return True
return True
# no overlap found => return False
return False
def remove_all_annotations_from_text(annotated_text):
"""Removes all annotations from the specified text."""
new_text = re.sub(
ANNOTATION_REGEX, lambda match: match.group("term"), annotated_text
)
return new_text
def mask_annotations(text):
"""Masks all annotations, eg. to avoid that terms which are already annotated are annotated again."""
return re.sub(
ANNOTATION_REGEX,
lambda match: "@neanno_masked_annotation:{}@".format(
base64.b64encode(match.group().encode("utf-8")).decode()
),
text,
)
def unmask_annotations(text_with_masked_annotations):
"""Reverts a previous masking of all annotations."""
return re.sub(
r"@neanno_masked_annotation:(?P<base64string>.*?)@",
lambda match: base64.b64decode(match.group("base64string")).decode(),
text_with_masked_annotations,
)
def compute_named_entities_distribution_from_text(annotated_text):
""" Computes the types and frequencies of named entities in the specified text."""
result = {}
for entity_annotation in extract_annotations_as_generator(
annotated_text,
types_to_extract=["standalone_named_entity", "parented_named_entity"],
):
entity_code = entity_annotation["entity_code"]
if entity_code not in result:
result[entity_code] = 0
result[entity_code] += 1
return result
def compute_named_entities_distribution_from_column(pandas_series):
""" Computes the distribution over all named entities in the specified text column."""
distribution_candidate = pandas_series.map(
lambda text: compute_named_entities_distribution_from_text(text)
).agg(
lambda series: reduce(
lambda dist1, dist2: merge_dict_sum_numbers(dist1, dist2), series
)
)
return (
distribution_candidate
if not isinstance(distribution_candidate, pd.Series)
else {}
)
def compute_categories_distribution_from_column(pandas_series):
""" Computes the distribution over all categories in the specified categories column."""
distribution_candidate = pandas_series.map(
lambda categories_text: Counter(categories_text.split("|"))
).agg(
lambda series: reduce(
lambda dist1, dist2: merge_dict_sum_numbers(dist1, dist2), series
)
)
return (
dict(distribution_candidate)
if not isinstance(distribution_candidate, pd.Series)
else {}
)
def compute_term_distribution_from_text(
annotated_text, blacklist_terms=[], include_entity_codes=True
):
""" Computes all terms and their frequencies from the specified text."""
def relevant_terms_from_match(match):
if match.group("type_tiny") == "SK":
return re.sub(" ", chr(127), match.group("term"))
if match.group("type_tiny") == "SN":
return re.sub(
" ",
chr(127),
"{}:{}".format(match.group("entity_code_sn"), match.group("term"))
if include_entity_codes
else "{}".format(match.group("term")),
)
if match.group("type_tiny") == "PK":
return " ".join(
[
re.sub(" ", chr(127), parent_term.strip())
for parent_term in match.group("parent_terms_pk").split(",")
]
)
if match.group("type_tiny") == "PN":
return " ".join(
[
re.sub(
" ",
chr(127),
"{}:{}".format(
match.group("entity_code_pn"), parent_term.strip()
)
if include_entity_codes
else "{}".format(parent_term.strip()),
)
for parent_term in match.group("parent_terms_pn").split(",")
]
)
cleaned_text = annotated_text.strip()
cleaned_text = re.sub(r"(?m)\s+", " ", cleaned_text)
cleaned_text = mask_annotations(cleaned_text)
cleaned_text = re.sub(r"([.,?!<>\[\]\|\"\(\)\+\-])", "", cleaned_text)
cleaned_text = unmask_annotations(cleaned_text)
cleaned_text = re.sub(
ANNOTATION_REGEX, lambda match: relevant_terms_from_match(match), cleaned_text
)
result = {}
for term in cleaned_text.split(" "):
term = re.sub(chr(127), " ", term)
if re.match(r"^\d+$", term):
continue
if term in blacklist_terms:
continue
if term not in result:
result[term] = 0
result[term] += 1
return result
def compute_term_distribution_from_column(
pandas_series, blacklist_terms=[], include_entity_codes=True
):
""" Computes the distribution over all terms in the specified text column."""
distribution_candidate = pandas_series.map(
lambda text: compute_term_distribution_from_text(
text, blacklist_terms, include_entity_codes
)
).agg(
lambda series: reduce(
lambda dist1, dist2: merge_dict_sum_numbers(dist1, dist2), series
)
)
return (
distribution_candidate
if not isinstance(distribution_candidate, pd.Series)
else {}
)
def replace_from_to(text, start_position, end_position, new_text):
""" Replaces the substring within the given range against another text."""
return "{}{}{}".format(text[:start_position], new_text, text[end_position:])
def add_standalone_key_term(text, start_position, end_position):
""" Annotates the given range as a standalone key term (as long as there is no other annotation yet)."""
if not has_annotation_within_range(text, start_position, end_position):
return replace_from_to(
text,
start_position,
end_position,
"`{}``SK`´".format(text[start_position:end_position]),
)
else:
return text
def add_parented_key_term(text, start_position, end_position, parent_terms):
""" Annotates the given range as a parented key term (as long as there | |
<filename>scripts/run_experiment.py
# ~~~
# This file is part of the paper:
#
# " An Online Efficient Two-Scale Reduced Basis Approach
# for the Localized Orthogonal Decomposition "
#
# https://github.com/TiKeil/Two-scale-RBLOD.git
#
# Copyright 2019-2021 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME>
# <NAME>
# ~~~
import matplotlib.pyplot as plt
import numpy as np
import os, psutil, sys
import scipy.sparse as sparse
from types import SimpleNamespace
### pytimings
from pytimings.timer import global_timings as timings, scoped_timing
import time
### gridlod
from gridlod import pglod, util, fem, femsolver
from gridlod.world import World, Patch
### pymor
from pymor.algorithms.greedy import rb_greedy
from pymor.basic import set_log_levels, set_defaults
from pymor.models.basic import StationaryModel
from pymor.operators.numpy import NumpyMatrixOperator
from pymor.operators.constructions import IdentityOperator
from pymor.parallel.mpi import MPIPool
from pymor.parallel.dummy import DummyPool
from pymor.parameters.base import ParameterSpace, Parameters
from pymor.tools import mpi
from pymor.vectorarrays.numpy import NumpyVectorSpace
### perturbations_for_2d_data
from perturbations_for_2d_data import visualize
### RBLOD
from rblod.parameterized_stage_1 import build_two_scale_patch_models, correctors_from_TS_RBLOD_approach
from rblod.separated_stage_1 import build_separated_patch_models, henning_RBLOD_approach
from rblod.optimized_rom import OptimizedTwoScaleNumpyModel
from rblod.two_scale_model import Two_Scale_Problem
from rblod.two_scale_reductor import CoerciveRBReductorForTwoScale, TrueDiagonalBlockOperator
### from scripts
from scripts.tools import storeData, estimator_printout
from scripts.problems import model_problem_1, layer_problem_1, _construct_aFine_from_mu
from scripts.tools import verbose_stage_printout, times_printout, rom_size_printout
from scripts.tools import compute_constrast, compute_coarse_errors, compute_errors, extended_time_printout
"""
########################################################################################################
CONFIG
########################################################################################################
"""
use_mpi = False
verbose = False
save_correctors = True
use_fine_mesh = True
certified_estimator_study = False
pickle_data = False
henning = True
two_scale = True
store = False
"""
call this skript via:
> python run_experiment.py 2 4 0.001 0.001 2 0 [--mpi] [--v] [--sc] [--oc] [--ces] [--p] [--sh] [--sts] [--sld]
where:
arg_1 : n_h
arg_2 : n_H
arg_3 : epsilon_1
arg_4 : epsilon_2
arg_5 : size of verification set
arg_6 : problem class : --> 0: model problem 1
--> 1: model problem 2
--mpi : use MPI pool (use with "mpirun -n 4" in front)
--v : generate verbose output
--sc : do not save fine correctors at all
--oc : only use computations that work on the coarse scale. No FEM, no full visualization, no fine errors.
--ces : prepare for the estimator study of the certified error estimator in the TSRBLOD
--p : pickle all data to reuse them
--sh : do not construct and compare Henning RBLOD method
--sts : do not construct and compare TSRBLOD method
--sld : store local stage 1 data in the file system
"""
print()
assert isinstance(int(sys.argv[1]), int) # --> n_h
assert isinstance(int(sys.argv[2]), int) # --> n_H
assert isinstance(float(sys.argv[3]), float) # --> epsilon_1
assert isinstance(float(sys.argv[4]), float) # --> epsilon_2
assert isinstance(int(sys.argv[5]), int) # --> size of verification set
assert int(sys.argv[6]) == 0 or int(sys.argv[6]) == 1 # --> 0: model problem 1 --> 1: model problem 2
if "--mpi" in sys.argv:
print("Using MPI parallel run, make sure to use mpirun.")
use_mpi = True
pool = MPIPool()
else:
pool = DummyPool()
if "--sld" in sys.argv:
print("store local element data (and do not communicate it)")
store = True
if "--v" in sys.argv or "--verbose" in sys.argv:
print("using verbose printout")
verbose = True
if "--sc" in sys.argv or "--skip-correctors" in sys.argv:
print("always remove correctors from storage")
save_correctors = False
if "--oc" in sys.argv or "--only-coarse" in sys.argv:
print("not using any full fine data")
use_fine_mesh = False
if "--ces" in sys.argv or "--certified-estimator-study" in sys.argv:
print("also show the behavior of the estimator")
certified_estimator_study = True
if "--p" in sys.argv or certified_estimator_study:
print("stage 1 data will be pickled")
pickle_data = True
if "--sh" in sys.argv or "--skip-henning" in sys.argv:
print("skip henning method entirely")
henning = False
if "--sts" in sys.argv or "--skip-two-scale" in sys.argv:
print("skip two scale method entirely")
two_scale = False
if use_fine_mesh is False:
assert save_correctors is False, "You can not store correctors in this case"
if store:
assert save_correctors is False, "fine scale data is not stored as files"
assert use_fine_mesh is False, "fine scale data is not stored as files"
def prepare():
if verbose is True:
set_log_levels({"pymor": "INFO"})
else:
set_log_levels({"pymor": "WARN"})
set_defaults({"pymor.algorithms.gram_schmidt.gram_schmidt.rtol": 1e-8})
set_defaults({"pymor.algorithms.gram_schmidt.gram_schmidt.check": False})
np.warnings.filterwarnings("ignore") # silence numpys warnings
pool.apply(prepare)
path = "" #"/scratch/tmp/t_keil02/RBLOD/"
"""
########################################################################################################
PROBLEM SETUP AND LOD VARIABLES
########################################################################################################
"""
if int(sys.argv[6]) == 0:
experiment = "henning"
elif int(sys.argv[6]) == 1:
experiment = "layer"
# parameters for the grid size
N = int(sys.argv[1])
n = int(sys.argv[2])
k = int(np.ceil(np.abs(np.log(np.sqrt(2) * 1./N)))) # Localization parameter of patches
atol_patch = float(sys.argv[3]) # epsilon_1
atol_two_scale = float(sys.argv[4]) # epsilon_2
verification_size = int(sys.argv[5]) # size of verification set
NFine = np.array([n, n]) # n x n fine grid elements
NpFine = np.prod(NFine + 1) # Number of fine DoFs
NWorldCoarse = np.array([N, N]) # N x N coarse grid elements
boundaryConditions = np.array([[0, 0], [0, 0]]) # zero Dirichlet boundary conditions
NCoarseElement = NFine // NWorldCoarse
world = World(NWorldCoarse, NCoarseElement, boundaryConditions) # gridlod specific class
print("\nSTARTING SCRIPT ...\n")
print(f"Coarse FEM mesh: {N} x {N}")
print(f"Fine FEM mesh: {n} x {n}")
print(f"k: {k}")
print(f"|log H|: {np.abs(np.log(np.sqrt(2) * 1./N)):.2f}")
print(f"number of fine dofs {NpFine}")
middle_coarse_index = np.prod(NWorldCoarse)//2 + NWorldCoarse[0]//2
print(f"max fine dofs per patch: {Patch(world, k, middle_coarse_index).len_fine}")
print(f"number of parallel kernels: {mpi.size} ")
print(f"\nGREEDY TOLERANCES: \nStage 1: {atol_patch}\nTwo-scale: {atol_two_scale}\n")
if experiment == "henning":
param_min, param_max = 0, 5
aFines, aFineCoefficients, f, f_fine, model_parameters, aFine_Constructor = \
model_problem_1(NFine, world, plot=False, return_fine=use_fine_mesh)
training_number = 50
elif experiment == "layer":
param_min, param_max = 1, 5
aFines, aFineCoefficients, f, f_fine, model_parameters, aFine_Constructor = \
layer_problem_1(NFine, world, coefficients=3, plot=False, return_fine=use_fine_mesh)
training_number = 4
# standard parameter space
parameter_space = ParameterSpace(model_parameters, [param_min, param_max])
verification_set = parameter_space.sample_randomly(verification_size, seed=3)
"""
Plot data for the figures
"""
if use_fine_mesh:
for mu_ver in verification_set:
plt.figure("coefficient for mu")
aFine = _construct_aFine_from_mu(aFines, aFineCoefficients, mu_ver)
visualize.drawCoefficient_origin(NFine, aFine, logNorm=True, colorbar_font_size=16,
lim=[0.8,14])
# plt.savefig(f'full_diff_mp_1_{mu_ver.to_numpy()[0]*10000:.0f}.png', bbox_inches='tight')
# plt.show()
if experiment == "layer":
fullpatch = Patch(world, np.inf, 0)
aFines = aFine_Constructor(fullpatch)
for i, mu_plot in enumerate([[1,0,0], [0,1,0], [0,0,1], [1,2,3]]):
aFine_ = _construct_aFine_from_mu(aFines, aFineCoefficients, model_parameters.parse(mu_plot))
if i == 0 or i == 3:
visualize.drawCoefficient_origin(NFine, aFine_, logNorm=True, colorbar_font_size=18)
else:
visualize.drawCoefficient_origin(NFine, aFine_, logNorm=True)
plt.tight_layout()
# plt.savefig(f'patch_patch_diff_oc_mp_{i}.png', bbox_inches='tight')
# plt.show()
"""
########################################################################################################
OFFLINE PHASE
########################################################################################################
"""
"""
Construct training sets
"""
print(f"Training set size per dimension: {training_number}")
training_set = parameter_space.sample_uniformly(training_number)
# parameter space for parametric rhs in stage 1
model_parameters_and_rhs = dict(model_parameters)
model_parameters_and_rhs["DoFs"] = 4
model_parameters_and_rhs = Parameters(model_parameters_and_rhs)
ranges = {k: (param_min, param_max) for k in model_parameters}
ranges["DoFs"] = (0, 1) # dofs are only 0 or 1
parameter_space_for_two_scale_stage_1 = ParameterSpace(model_parameters_and_rhs, ranges)
counts = {k: training_number for k in model_parameters}
counts["DoFs"] = 2
unfiltered_training_set = list(parameter_space_for_two_scale_stage_1.sample_uniformly(counts=counts))
training_set_for_two_scale_stage_1 = []
for p in unfiltered_training_set:
if np.sum(p["DoFs"]) == 1: # treat all dofs individually
training_set_for_two_scale_stage_1.append(p)
print(f"Training set size old stage 1: {len(training_set)}")
print(f"Training set size two-scale stage 1: {len(training_set_for_two_scale_stage_1)}\n")
def construct_patches(TInd, k, world):
patch = Patch(world, k, TInd)
return patch
def compute_contrast_for_all_patches(patch, aFineCoefficients, training_set, aFine_Constructor):
aPatches = aFine_Constructor(patch)
contrast, min_alpha = compute_constrast(aPatches, aFineCoefficients, training_set)
return contrast, min_alpha
#localize coefficient beforehand
print('prepare patches ... \n')
coarse_indices = range(world.NtCoarse)
patchT = pool.map(construct_patches, list(coarse_indices), k=k, world=world)
print('approximating contrast ... ')
contrasts, min_alphas = zip(*pool.map(compute_contrast_for_all_patches, patchT,
aFineCoefficients=aFineCoefficients, training_set=training_set,
aFine_Constructor=aFine_Constructor))
contrast, min_alpha = np.max(contrasts), np.min(min_alphas)
print(f"contrast: {contrast}, min_alpha: {min_alpha}")
coercivity_estimator = lambda mu: min_alpha
# minimize gather data size for | N_H | > CPUS
if use_mpi and mpi.size < len(coarse_indices) and store:
split_into = len(coarse_indices) // mpi.size
print(f'\n ... splitting processes into {split_into} (parallel) subprocesses')
size_of_split = len(coarse_indices) // split_into
coarse_index_list, coarse_patch_list = [], []
for i in range(split_into):
a, b = i * size_of_split, (i + 1) * size_of_split
coarse_patch_list.append(patchT[a:b])
coarse_index_list.append(coarse_indices[a:b])
else:
coarse_patch_list, coarse_index_list = [patchT], [coarse_indices]
"""
########################################################################################################
STAGE 1: reducing corrector problems
########################################################################################################
"""
print("\n............ BUILDING RB Models for Corrector Problems...........\n ")
# amateur progress bar !
print("|", end="", flush=True)
for T in coarse_indices:
print(" ", end="", flush=True)
print("|\n ", end="", flush=True)
"""
Offline phase for separated Henning stage 1
"""
if henning:
with scoped_timing("offline henning stage 1"):
rss = psutil.Process().memory_info().rss
data = []
for indices, patches in zip(coarse_index_list, coarse_patch_list):
data_ = pool.map(build_separated_patch_models, patches,
aFineCoefficients=aFineCoefficients, coercivity_estimator=coercivity_estimator,
training_set=training_set, atol_patch=atol_patch, save_correctors=save_correctors,
aFine_Constructor=aFine_Constructor, store=store, path=path)
if store:
# for the case that all data is stored in the file system
for T in indices:
loaded = np.load(f'{path}mpi_storage/he_{T}.npz', allow_pickle=True)
data.append([loaded['rom'][()], loaded['time'][()]])
os.remove(f'{path}mpi_storage/he_{T}.npz')
else:
data.extend(data_)
if store:
optimized_romT_, timeT_ = zip(*data)
# construct information on stage 1 with the help of the roms
rom_sizeT_ = [[rom.operator_array.shape[1] for rom in roms] for roms in optimized_romT_]
max_errorsT_, max_error_musT_, basesT_, productT_, extension_failedT_henning = \
None, None, None, None, None
else:
optimized_romT_, timeT_, rom_sizeT_, max_errorsT_, max_error_musT_, extension_failedT_henning, bases_ \
= zip(*data)
print(f" --> THIS TOOK {timings.walltime('offline henning stage 1'):.5f} seconds "
f"and requires ~{(psutil.Process().memory_info().rss-rss)*1e-6:.2f} MB\n\n ", end="", flush=True)
"""
Offline phase for two scale parameterized stage 1
"""
if two_scale:
with scoped_timing("offline stage 1"):
rss = psutil.Process().memory_info().rss
data = []
for indices, patches in zip(coarse_index_list, coarse_patch_list):
data_ = pool.map(build_two_scale_patch_models, patches,
aFineCoefficients=aFineCoefficients, coercivity_estimator=coercivity_estimator,
training_set_for_two_scale_stage_1=training_set_for_two_scale_stage_1,
atol_patch=atol_patch, save_correctors=save_correctors,
certified_estimator_study=certified_estimator_study, aFine_Constructor=aFine_Constructor,
store=store, path=path)
if store:
# for the case that all data is stored in the file system
for T in indices:
loaded = np.load(f'{path}mpi_storage/ts_{T}.npz', allow_pickle=True)
data.append([loaded['rom'][()], | |
"""LREANNtf_algorithmLREANN_expRUANN.py
# Author:
<NAME> - Copyright (c) 2020-2022 Baxter AI (<EMAIL>)
# License:
MIT License
# Installation:
see LREANNtf_main.py
# Usage:
see LREANNtf_main.py
# Description:
LREANNtf algorithm LREANN expRUANN - define learning rule experiment artificial neural network with relaxation update
"""
import tensorflow as tf
import numpy as np
from ANNtf2_operations import *
import ANNtf2_operations
import ANNtf2_globalDefs
import math
from numpy import random
#
# RUANN biological implementation requirements:
#
# backpropagation approximation notes:
# error_L = (y_L - A_L) [sign reversal]
# error_l = (W_l+1 * error_l+1) . activationFunctionPrime(z_l) {~A_l}
# dC/dB = error_l
# dC/dW = A_l-1 * error_l
# Bnew = B+dC/dB [sign reversal]
# Wnew = W+dC/dW [sign reversal]
#
# backpropagation error is stored in temporary firing rate modification [increase/decrease] of neurons (ie Atrace->Aideal)
# Aerror_l update is applied based on signal pass through (W), higher level temporary firing rate adjustment, and current firing rate. error_l = (W_l+1 * error_l+1) * A_l
# W_l update is applied based on firing rate of lower layer and higher level temporary firing rate adjustment. dC/dW = A_l-1 * error_l
#
# RUANN approximates backpropagation for constrained/biological assumptions
# Error calculations are achieved by repropagating signal through neuron and measuring either a) temporary modulation in output (Aideal) relative to original (Atrace), or b) output of a specific error storage neurotransmitter receptor
# can rely on sustained burst/spike of ideal A values to perform weight updates
#
# Outstanding Biological Requirement: Need to identify a method to pass (single layer) error signal back through neuron from tip of axon to base of dendrite (internal/external signal?)
# the original RUANN (learningAlgorithm == "backpropApproximation3/backpropApproximation4") attempts to achieve this by sending a trial +/- signal from the lower layer l neuron k and slowly ramping it up/down (increasing/decreasing its effective error) until the above layer l+1 neurons reach their ideal values/errors
#
debugOnlyTrainFinalLayer = False #debug weight update method only (not Aideal calculation method) #requires recalculateAtraceUnoptimisedBio==False
debugVerboseOutput = False
debugVerboseOutputTrain = False
averageAerrorAcrossBatch = False #RUANN was originally implemented to calculate independent idealA for each batch index (rather than averaged across batch)
errorImplementationAlgorithm = "storeErrorAsModulationOfSignalPropagationNeurotransmitterReceptor" #original #a) modulates primary propagation neurotransmitter receptor (+/-) to store l error, and for the calculation of l-1 error
#errorImplementationAlgorithm = "storeErrorAsModulationOfUniqueNeurotransmitterReceptor" #b) designates a specific neurotransmitter receptor to store l error, and for the calculation of l-1 error
#learning algorithm variants in order of emulation similarity to formal backpropagation:
#learningAlgorithm = "backpropApproximation1" #strict backpropagation (optional: use A ideal instead of A error, use activationFunctionTypeFinalLayer sigmoid rather than softmax)
#learningAlgorithm = "backpropApproximation2" #incomplete #modifies the A ideal (trials +ve and -ve adjustments; adjusting firing strength), and performs weight updates based on this modified A value
#learningAlgorithm = "backpropApproximation3" #incomplete #modifies the A ideal (trials +ve and -ve adjustments; adjusting firing strength), and performs weight updates based on this modified A value
#learningAlgorithm = "backpropApproximation4" #incomplete #modifies the A ideal (trials +ve and -ve adjustments; adjusting firing strength), and performs weight updates based on this modified A value #original proposal #emulates backpropagation using a variety of shortcuts (with optional thresholding), but does not emulate backPropagation completely - error_l (Aideal_l) calculations are missing *error_l+1 (multiply by the strength of the higher layer error)
#learningAlgorithm = "backpropApproximation5" #incomplete #modifies the A ideal (trials +ve and -ve adjustments; adjusting firing strength), and performs weight updates based on this modified A value #simplifies RUANN algorithm to only consider +/- performance (not numerical/weighted performance) #probably only feasible with useBinaryWeights #note if useBinaryWeights then could more easily biologically predict the effect of adjusting Aideal of lower layer neuron k on performance of upper layer (perhaps without even trialling the adjustment)
#learningAlgorithm = "backpropApproximation6" #incomplete #calculates current layer neuron k A error based on final layer error of propagating signal
learningAlgorithm = "backpropApproximation7" #calculates current layer A error/ideal based on above level WdeltaStore
#errorStorageAlgorithm = "useAerror" #l+1 error is stored as a linear modulation of post synaptic receptor
#errorStorageAlgorithm = "useAideal" #original #l+1 error is stored as a hypothetical difference between Atrace and Aideal [ratio]
if(learningAlgorithm == "backpropApproximation1"):
#strict backpropagation is used for testing only #no known biological implementation
#requires recalculateAtraceUnoptimisedBio==False
errorStorageAlgorithm = "useAideal" #"useAerror" #optional
elif(learningAlgorithm == "backpropApproximation2"):
#requires recalculateAtraceUnoptimisedBio==False
errorStorageAlgorithm = "useAideal" #"useAerror" #optional
elif(learningAlgorithm == "backpropApproximation3"):
errorStorageAlgorithm = "useAideal"
elif(learningAlgorithm == "backpropApproximation4"):
errorStorageAlgorithm = "useAideal"
useWeightUpdateDirectionHeuristicBasedOnExcitatoryInhibitorySynapseType = True #enables rapid weight updates, else use stochastic (test both +/-) weight upates
useMultiplicationRatherThanAdditionOfDeltaValues = True #CHECKTHIS #this ensures that Aideal/weight updates are normalised across their local layer (to minimise the probability an alternate class data propagation will be interferred with by the update)
elif(learningAlgorithm == "backpropApproximation5"):
errorStorageAlgorithm = "useAideal"
useWeightUpdateDirectionHeuristicBasedOnExcitatoryInhibitorySynapseType = True
useMultiplicationRatherThanAdditionOfDeltaValues = False
elif(learningAlgorithm == "backpropApproximation6"):
errorStorageAlgorithm = "useAerror"
elif(learningAlgorithm == "backpropApproximation7"):
errorStorageAlgorithm = "useAideal" #"useAerror" #optional
#averageAerrorAcrossBatch = True #require inverse matrix multiplication operations (their batchSize dimension must equal 1)
activationFunctionType = "sigmoid" #default
#activationFunctionType = "softmax" #trial only
#activationFunctionType = "relu" #not currently supported; a) cannot converge with relu function at final layer, b) requires loss function
applyFinalLayerLossFunction = False #if False: normalise the error calculation across all layers, taking y_target as Aideal of top layer
if(learningAlgorithm == "backpropApproximation1"):
activationFunctionTypeFinalLayer = "sigmoid" #"softmax" #optional
applyFinalLayerLossFunction = False
elif(learningAlgorithm == "backpropApproximation2"):
activationFunctionTypeFinalLayer = "sigmoid" #"softmax" #optional
applyFinalLayerLossFunction = False
else:
activationFunctionTypeFinalLayer = "sigmoid" #default #doesn't currently converge with final layer loss function calculated based on sigmoid
applyFinalLayerLossFunction = False
errorFunctionTypeDelta = True
errorFunctionTypeDeltaFinalLayer = True #sigmoid/softmax has already been calculated [Aideal has been created for final layer] so can simply extrace delta error here #OLD: use sigmoid/softmax loss for final layer - consider using more simply delta loss here
updateOrder = "updateWeightsAfterAidealCalculations" #method 1
#updateOrder = "updateWeightsDuringAidealCalculations" #method 2
#updateOrder = "updateWeightsBeforeAidealCalculations" #method 3
if(learningAlgorithm == "backpropApproximation6"):
updateOrder = "updateWeightsDuringAidealCalculations"
if(learningAlgorithm == "backpropApproximation7"):
updateOrder = "updateWeightsBeforeAidealCalculations"
#takeAprevLayerFromTraceDuringWeightUpdates = True #mandatory for computational purposes (normalise across batches)
#this parameter value should not be critical to RUANN algorithm (it is currently set based on availability of Aideal of lower layer - ie if it has been precalculated)
#difference between Aideal and Atrace of lower layer should be so small takeAprevLayerFromTraceDuringWeightUpdates shouldn't matter
recalculateAtraceUnoptimisedBio = False
if(not applyFinalLayerLossFunction):
topLayerIdealAstrict = True #top level learning target (idealA) == y, else learning target (idealA) == A + deltaA
topLayerIdealAproximity = 0.01 #maximum learning rate (effective learning rate will be less than this)
if(learningAlgorithm == "backpropApproximation4"):
useMultiplicationRatherThanAdditionOfDeltaValuesAideal = False
useMultiplicationRatherThanAdditionOfDeltaValuesW = False
if(useMultiplicationRatherThanAdditionOfDeltaValues):
useMultiplicationRatherThanAdditionOfDeltaValuesAideal = True
useMultiplicationRatherThanAdditionOfDeltaValuesW = True
else:
useMultiplicationRatherThanAdditionOfDeltaValuesAideal = False
useMultiplicationRatherThanAdditionOfDeltaValuesW = False
learningRateMinFraction = 0.1 #minimum learning rate can be set to always be above 0 (learningRateMinFraction = fraction of learning rate)
applyMinimiumAdeltaContributionThreshold = False #only adjust Aideal_k of l based on Aideal of l+1 if it significantly improves Aideal of l+1, where k is neuron index of l
if(applyMinimiumAdeltaContributionThreshold):
minimiumAdeltaContributionThreshold = 0.1 #fraction relative to original performance difference
#minimiumAdeltaContributionThreshold = 1.0 #this contribution threshold is normalised wrt number of neurons (k) on l+1. default=1.0: if a Aideal_k adjustment on l contributes less than what on average an Aideal_k adjustment must necessarily contribute to achieve Aideal on l+1, then do not adjust Aideal_k (leave same as A_k)
applySubLayerIdealAmultiplierRequirement = True
if(applySubLayerIdealAmultiplierRequirement):
subLayerIdealAmultiplierRequirement = 1.5 #idealA of each neuron k on l will only be adjusted if its modification achieves at least xM performance increase for Aideal on l+1
applySubLayerIdealAmultiplierCorrection = True #optional: adjust learning neuron learning based on performance multiplier
else:
applySubLayerIdealAmultiplierCorrection = False
if(learningAlgorithm == "backpropApproximation5"):
subLayerIdealAlearningRateBase = 0.001 #small number used to ensure (reduce probablity) that update does not affect nonlinearity of signal upwards
else:
subLayerIdealAlearningRateBase = 0.01 #each neuron k on l will be adjusted only by this amount (modified by its multiplication effect on Aideal of l+1)
if(learningAlgorithm == "backpropApproximation6"):
useMultiplicationRatherThanAdditionOfDeltaValuesAideal = False
debugWexplosion = False
debugFastTrain = False
if(debugFastTrain):
learningRate = 0.01
else:
learningRate = 0.001
useBatch = True
if(learningAlgorithm == "backpropApproximation7"):
useBatch = False #require inverse matrix multiplication operations (their batchSize dimension must equal 1) #or use averageAerrorAcrossBatch instead
if(useBatch):
if(debugFastTrain):
batchSize = 1000
else:
batchSize = 10 #100
else:
batchSize = 1
biologicalConstraints = False #batchSize=1, _?
sparsityLevel = 1.0 #probability of initial strong neural connection per neuron in layer
noisySampleGeneration = False
noisySampleGenerationNumSamples = 0
noiseStandardDeviation = 0
if(biologicalConstraints):
useBinaryWeights = True #increases stochastically updated training speed, but reduces final accuracy
if(useBinaryWeights):
averageTotalInput = -1
useBinaryWeightsReduceMemoryWithBool = False #can use bool instead of float32 to limit memory required, but requires casting to float32 for matrix multiplications
if(not useBatch):
noisySampleGeneration = False #possible biological replacement for input data batchSize > 1 (provides better performance than standard input data batchSize == 1, but less performance than input data batchSize > 10+)
if(noisySampleGeneration):
noisySampleGenerationNumSamples = 10
noiseStandardDeviation = 0.03
else:
useBinaryWeights = False
W = {}
B = {}
Wbackup = {}
Bbackup = {}
NETWORK_PARAM_INDEX_TYPE = 0
NETWORK_PARAM_INDEX_LAYER = 1
NETWORK_PARAM_INDEX_H_CURRENT_LAYER = 2
NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER = 3
NETWORK_PARAM_INDEX_VARIATION_DIRECTION = 4
if(not recalculateAtraceUnoptimisedBio):
Atrace = {}
Ztrace = {}
if(errorStorageAlgorithm == "useAideal"):
Aideal = {}
elif(errorStorageAlgorithm == "useAerror"):
Aerror = {}
if(learningAlgorithm == "backpropApproximation7"):
WdeltaStore = {}
#Network parameters
n_h = []
numberOfLayers = 0
numberOfNetworks = 0
datasetNumClasses = 0
#randomNormal = tf.initializers.RandomNormal()
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learningRate)
def getNoisySampleGenerationNumSamples():
return noisySampleGeneration, noisySampleGenerationNumSamples, noiseStandardDeviation
def defineTrainingParameters(dataset):
if(debugFastTrain):
trainingSteps = 1000
else:
trainingSteps = 10000
if(useBatch):
numEpochs = 100 #10
else:
numEpochs = 100
displayStep = 100
return learningRate, trainingSteps, batchSize, displayStep, numEpochs
def defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, | |
optimally
#nz=find(hh~=0); # nz can be computed more optimally
# np.nonzero() always returns a tuple, even if it contains 1 element since hh has only 1 dimension
nz = np.nonzero(hh != 0)[0];
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz = %s" % (str(nz)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz.shape = %s" % (str(nz.shape)));
#if numel(nz) > 0:
if nz.size > 0:
#%%----text-retrieval-like
#votes(nz, tol_i) = votes(nz, tol_i) + log10(length(RD) / (length(nz)))^2 #Note: log10(a)^2 means (log10(a))^2 #PREVIOUSLY
#myVal = pow(math.log10(float(len(RD)) / len(nz)), 2);
myVal = pow(math.log10(float(len(r_harlocs)) / len(nz)), 2);
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(RD) = %d" % len(RD));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(r_harlocs) = %d" % len(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(nz) = %d" % len(nz));
common.DebugPrint("multiscale_quad_retrieval(): " \
"myVal = %.5f" % myVal);
# PREVIOUSLY
votes[nz, tol_i] = votes[nz, tol_i] + myVal;
# votes(nz)=votes(nz)+log10(length(RD)/(length(nz)));
# votes(nz)=votes(nz)+1;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"Votes_space.shape = %s" % (str(Votes_space.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes = %s" % (str(votes)));
return (queryFrame, np.ravel(votes));
# NOT performing these in each worker - the central dispatcher will do these
if False:
#Votes_space(:,q)=votes;
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#Votes_space[:, queryFrame - 1] = votes;
Votes_space[:, queryFrame] = np.ravel(votes);
if cropflag == 0:
HH[:, queryFrame] = 1;
else:
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(RD), st_threshold, cropflag);
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(r_harlocs), st_threshold, cropflag);
"""
From http://www.mathworks.com/help/matlab/matlab_prog/symbol-reference.html:
Dot-Dot-Dot (Ellipsis) - ...
A series of three consecutive periods (...) is the line continuation operator in MATLAB.
Line Continuation
Continue any MATLAB command or expression by placing an ellipsis at the end of the line to be continued:
"""
NUM_MAX_ELEMS = 100000;
search_params = dict(checks=1000000000); # Gives fewer results than scipy's tree.query_ball_point when we have 65K features
# returns Votes_space, HH
# Alex: r_harlocs and q_harlocs are the corresponding lists of harlocs computed
"""
md_threshold = max-distance threshold used to build quads out of Harris features
st_threshold = threshold value for spatio-temporal consistency (coherence)
all_ori, all_id, all_max, all_cen = orientation, reference frame ids, max distances,
respectively centroids coordinates of each
reference quad for scale scale_index
"""
def multiscale_quad_retrieval(r_quadsTree, r_harlocs, q_harlocs, md_threshold, st_threshold, \
all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag, \
sequence):
common.DebugPrint("Entered multiscale_quad_retrieval(): " \
"md_threshold = %s, st_threshold = %s." % \
(str(md_threshold), \
str(st_threshold)));
assert len(r_harlocs) != 0;
assert len(q_harlocs) != 0;
try:
Votes_space = np.load("Votes_space%d.npz" % scale_index)['arr_0'];
HH = np.load("HH%d.npz" % scale_index)['arr_0'];
return Votes_space, HH;
except:
common.DebugPrintErrorTrace();
if common.MY_DEBUG_STDOUT and DBGPRINT:
common.DebugPrint("multiscale_quad_retrieval(): r_quadsTree = %s" % \
str(r_quadsTree));
common.DebugPrint("multiscale_quad_retrieval(): len(r_harlocs) = %d" % len(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): r_harlocs = %s" % str(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): q_harlocs = %s" % str(q_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): md_threshold = %s" % str(md_threshold));
print("multiscale_quad_retrieval(): st_threshold = %s" % str(st_threshold));
#common.DebugPrint("multiscale_quad_retrieval(): all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag));
common.DebugPrint("multiscale_quad_retrieval(): all_id = %s" % str(all_id));
common.DebugPrint("multiscale_quad_retrieval(): all_id.shape = %s" % (str(all_id.shape)));
#common.DebugPrint("multiscale_quad_retrieval(): all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_max, all_cen, nos, scale_index, cropflag));
#common.DebugPrint("multiscale_quad_retrieval(): all_max = %s" % str(all_max));
#common.DebugPrint("multiscale_quad_retrieval(): all_cen, nos, scale_index, cropflag = %s" % str(all_cen, nos, scale_index, cropflag));
common.DebugPrint("multiscale_quad_retrieval(): sequence = %s" % str(sequence));
print("multiscale_quad_retrieval(): cropflag = %s" % str(cropflag));
t1 = float(cv2.getTickCount());
if scale_index > nos:
assert scale_index <= nos;
#error('Wrong scale index or number-of-scales');
#QD = dir([q_path "multiharlocs*.mat"])
#QD = [q_path + "multiharlocs*.mat"]
#QD = q_harlocs;
#RD = dir([r_path "multiharlocs*.mat"])
#RD = [r_path + "multiharlocs*.mat"]
#RD = r_harlocs;
#TODO: take out RD_start
#RD_start = str2num(RD(1).name(end - 9 : end - 4))
#RD_start = int(RD[0][-9 : -4])
RD_start = 0;
#RD_end = str2num(RD(end).name(end - 9 : end - 4))
#RD_end = int(RD[-1][-9 : -4])
#RD_end = len(RD) - 1;
RD_end = len(r_harlocs) - 1;
if False: # n_d not used anywhere
#n_d = hist(all_id, RD_start : RD_end)
#n_d = hist[all_id, RD_start : RD_end]
n_d = Matlab.hist(x=all_id, \
binCenters=np.array(range(RD_start, RD_end + 1)) );
#cross_indices = np.zeros( (len(QD), 2) );
cross_indices = np.zeros( (len(q_harlocs), 2) );
j = 1;
#tic
#ORI = np.array([]); # ORI NOT used anywhere
"""
Inspired from
https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
BUT doesn't help in this case:
Votes_space = np.asfortranarray(np.zeros( (len(RD), len(QD)) ));
"""
#Votes_space = np.zeros( (len(RD), len(QD)) );
Votes_space = np.zeros( (len(r_harlocs), len(q_harlocs)) );
# Make a distinct copy of HH from Votes_space...
#HH = Votes_space.copy().astype(np.int16); #Votes_space + 0;
#HH = np.zeros((len(RD), len(QD)), dtype=np.int8);
HH = np.zeros((len(r_harlocs), len(q_harlocs)), dtype=np.int8); #!!!!TODO use MAYBE even np.bool - OR take it out
#common.DebugPrint("multiscale_quad_retrieval(): Votes_space = %s,\n HH = %s" % (str(Votes_space), str(HH)))
tolers = 0.1 - float(scale_index) / 100.0; # it helps to make more strict the threshold as the scale goes up
# tolers = 0.15 - float(scale_index) / 100.0;
MAXDIS = 3 + scale_index;
MAXORI = 0.25;
"""
!!!!TODO TODO: I am using multiprocessing.Poll and return votes;
the dispatcher assembles the results,
but the results are NOT the same with the serial case - although they
look pretty decent, but they seem to be suboptimal - dp_Alex returns
suboptimal cost path for USE_MULTITHREADING == True instead of
False.
(Note: running under the same preconditions
multiscale_quad_retrieval I got the same results in dp_Alex().
"""
if False: #config.USE_MULTITHREADING == True:
global g;
g.r_quadsTree = r_quadsTree;
g.r_harlocs = r_harlocs;
g.q_harlocs = q_harlocs;
g.md_threshold = md_threshold;
g.st_threshold = st_threshold;
g.all_ori = all_ori;
g.all_id = all_id;
g.all_max = all_max;
g.all_cen = all_cen;
g.nos = nos;
g.scale_index = scale_index;
g.cropflag = cropflag;
g.sequence = sequence;
g.RD_start = RD_start;
g.RD_end = RD_end;
g.MAXDIS = MAXDIS;
g.MAXORI = MAXORI;
g.tolers = tolers;
"""
Start worker processes to use on multi-core processor (able to run
in parallel - no GIL issue if each core has it's own VM)
"""
pool = multiprocessing.Pool(processes=config.numProcesses);
print("multiscale_quad_retrieval(): Spawned a pool of %d workers" % \
config.numProcesses);
listParams = range(0, len(q_harlocs)); #!!!!TODO: use counterStep, config.initFrame[indexVideo]
#res = pool.map(IterationStandaloneMQR, listParams);
# See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
res = pool.map(func=IterationStandaloneMQR, iterable=listParams, \
chunksize=1);
print("Pool.map returns %s" % str(res)); #x0.size + 1
"""
From https://medium.com/building-things-on-the-internet/40e9b2b36148
close the pool and wait for the work to finish
"""
pool.close();
pool.join();
# Doing the "reduce" phase after the workers have finished :)
assert len(res) == len(q_harlocs);
for queryFrame, resE in enumerate(res):
resEIndex = resE[0];
resE = resE[1];
assert resEIndex == queryFrame;
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#Votes_space[:, queryFrame - 1] = votes;
Votes_space[:, queryFrame] = resE;
for queryFrame in range(len(q_harlocs)):
if cropflag == 0:
HH[:, queryFrame] = 1;
else:
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(RD), st_threshold, cropflag);
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(r_harlocs), st_threshold, cropflag);
try:
np.savez_compressed("Votes_space%d" % scale_index, Votes_space);
np.savez_compressed("HH%d" % scale_index, HH);
except:
common.DebugPrintErrorTrace();
return Votes_space, HH;
"""
We substitute q - 1 with q, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for q=1:length(QD)
#for q in range(1, len(QD) + 1):
#for queryFrame in range(len(QD)):
for queryFrame in range(len(q_harlocs)):
common.DebugPrint("multiscale_quad_retrieval(): Starting iteration queryFrame = %d" % queryFrame);
# tic
"""
str1=['load ' q_path QD(q).name]
eval(str1)
"""
"""
We make pp reference the desired multiharloc list for the query video
frame queryFrame
"""
pp = q_harlocs[queryFrame];
#pp = np.array(pp);
#common.DebugPrint("multiscale_quad_retrieval(): pp = %s" % str(pp));
#[qout,qcen,qmaxdis,qori]=findquads(pp(pp(:,3)==scale_index,1:2),md_threshold,0);
points = pp[pp[:, 2] == scale_index, 0:2];
qout, qcen, qmaxdis, qori = findquads.findquads(points, md_threshold, 0);
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): queryFrame = %d, " \
"qout.shape (number of quads for query frame queryFrame) = %s" % \
(queryFrame, str(qout.shape)));
# disp([num2str(q) ' of ' num2str(length(QD)) ' -> ' num2str(size(qout,1)) ' quads'])
#space_xy=zeros(size(qcen,1),2*length(RD))+nan;
#space_xy = np.zeros( (qcen.shape[0], 2 * len(RD)) ) + np.nan;
space_xy = np.zeros( (qcen.shape[0], 2 * len(r_harlocs)) ) + np.nan;
# votes=zeros(length(RD),1)
#votes=zeros(length(RD),length(tolers));
#votes = np.zeros( (len(RD), 1) );
votes = np.zeros( (len(r_harlocs), 1) );
#nep = np.array([]);
#m_points = np.array([]);
assert isinstance(tolers, float);
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): quads of query frame %d are: " % queryFrame);
common.DebugPrint(" qout | |
methods in your AWS account.
:param bool tracing_enabled: Specifies whether active tracing with X-ray is enabled for this stage.
:param Any variables: A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
if access_log_setting is not None:
pulumi.set(__self__, "access_log_setting", access_log_setting)
if cache_cluster_enabled is not None:
pulumi.set(__self__, "cache_cluster_enabled", cache_cluster_enabled)
if cache_cluster_size is not None:
pulumi.set(__self__, "cache_cluster_size", cache_cluster_size)
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if canary_setting is not None:
pulumi.set(__self__, "canary_setting", canary_setting)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_version is not None:
pulumi.set(__self__, "documentation_version", documentation_version)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if method_settings is not None:
pulumi.set(__self__, "method_settings", method_settings)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
if tracing_enabled is not None:
pulumi.set(__self__, "tracing_enabled", tracing_enabled)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter(name="accessLogSetting")
def access_log_setting(self) -> Optional['outputs.DeploymentAccessLogSetting']:
"""
Specifies settings for logging access in this stage.
"""
return pulumi.get(self, "access_log_setting")
@property
@pulumi.getter(name="cacheClusterEnabled")
def cache_cluster_enabled(self) -> Optional[bool]:
"""
Indicates whether cache clustering is enabled for the stage.
"""
return pulumi.get(self, "cache_cluster_enabled")
@property
@pulumi.getter(name="cacheClusterSize")
def cache_cluster_size(self) -> Optional[str]:
"""
The size of the stage's cache cluster.
"""
return pulumi.get(self, "cache_cluster_size")
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="canarySetting")
def canary_setting(self) -> Optional['outputs.DeploymentCanarySetting']:
"""
Specifies settings for the canary deployment in this stage.
"""
return pulumi.get(self, "canary_setting")
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[str]:
"""
The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
"""
return pulumi.get(self, "client_certificate_id")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the purpose of the stage.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationVersion")
def documentation_version(self) -> Optional[str]:
"""
The version identifier of the API documentation snapshot.
"""
return pulumi.get(self, "documentation_version")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="methodSettings")
def method_settings(self) -> Optional[Sequence['outputs.DeploymentMethodSetting']]:
"""
Configures settings for all of the stage's methods.
"""
return pulumi.get(self, "method_settings")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DeploymentTag']]:
"""
An array of arbitrary tags (key-value pairs) to associate with the stage.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@property
@pulumi.getter(name="tracingEnabled")
def tracing_enabled(self) -> Optional[bool]:
"""
Specifies whether active tracing with X-ray is enabled for this stage.
"""
return pulumi.get(self, "tracing_enabled")
@property
@pulumi.getter
def variables(self) -> Optional[Any]:
"""
A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
return pulumi.get(self, "variables")
@pulumi.output_type
class DeploymentTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag
:param str value: The value for the tag
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag
"""
return pulumi.get(self, "value")
@pulumi.output_type
class DocumentationPartLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DocumentationPartLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
method: Optional[str] = None,
name: Optional[str] = None,
path: Optional[str] = None,
status_code: Optional[str] = None,
type: Optional[str] = None):
if method is not None:
pulumi.set(__self__, "method", method)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def method(self) -> Optional[str]:
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
@pulumi.output_type
class DomainNameEndpointConfiguration(dict):
def __init__(__self__, *,
types: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@pulumi.output_type
class DomainNameMutualTlsAuthentication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "truststoreUri":
suggest = "truststore_uri"
elif key == "truststoreVersion":
suggest = "truststore_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNameMutualTlsAuthentication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
truststore_uri: Optional[str] = None,
truststore_version: Optional[str] = None):
if truststore_uri is not None:
pulumi.set(__self__, "truststore_uri", truststore_uri)
if truststore_version is not None:
pulumi.set(__self__, "truststore_version", truststore_version)
@property
@pulumi.getter(name="truststoreUri")
def truststore_uri(self) -> Optional[str]:
return pulumi.get(self, "truststore_uri")
@property
@pulumi.getter(name="truststoreVersion")
def truststore_version(self) -> Optional[str]:
return pulumi.get(self, "truststore_version")
@pulumi.output_type
class DomainNameTag(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class MethodIntegration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheKeyParameters":
suggest = "cache_key_parameters"
elif key == "cacheNamespace":
suggest = "cache_namespace"
elif key == "connectionId":
suggest = "connection_id"
elif key == "connectionType":
suggest = "connection_type"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "integrationHttpMethod":
suggest = "integration_http_method"
elif key == "integrationResponses":
suggest = "integration_responses"
elif key == "passthroughBehavior":
suggest = "passthrough_behavior"
elif key == "requestParameters":
suggest = "request_parameters"
elif key == "requestTemplates":
suggest = "request_templates"
elif key == "timeoutInMillis":
suggest = "timeout_in_millis"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: 'MethodIntegrationType',
cache_key_parameters: Optional[Sequence[str]] = None,
cache_namespace: Optional[str] = None,
connection_id: Optional[str] = None,
connection_type: Optional['MethodIntegrationConnectionType'] = None,
content_handling: Optional['MethodIntegrationContentHandling'] = None,
credentials: Optional[str] = None,
integration_http_method: Optional[str] = None,
integration_responses: Optional[Sequence['outputs.MethodIntegrationResponse']] = None,
passthrough_behavior: Optional['MethodIntegrationPassthroughBehavior'] = None,
request_parameters: Optional[Any] = None,
request_templates: Optional[Any] = None,
timeout_in_millis: Optional[int] = None,
uri: Optional[str] = None):
"""
:param 'MethodIntegrationType' type: The type of backend that | |
<filename>gui/core/surface_browser.py
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.backends.backend_qt4 import FigureCanvasQT as FigureCanvas
#from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt4 import QtGui, Qt, QtCore
#import os
import gdal_interface
from matplotlib.colors import LightSource
from matplotlib import cm
import config
import time
import numpy as np
from matplotlib.widgets import RectangleSelector
class SurfaceBrowser(QtGui.QWidget):
def __init__(self, source):
QtGui.QWidget.__init__(self)
self.source = source
splitter = QtGui.QSplitter(Qt.Qt.Vertical, self)
self.figure = plt.figure(frameon = False) #(3)?
self.figure.add_axes([0,0,1,1])
#self.figure.gca().invert_yaxis()
self.canvas = FigureCanvas(self.figure)
self.canvas.mpl_connect("motion_notify_event", self.mouse_move)
self.canvas.mpl_connect("pick_event", self.on_pick)
self.canvas.mpl_connect("button_release_event", self.mouse_release)
self.canvas.mpl_connect("button_press_event", self.mouse_press)
self.canvas.mpl_connect("resize_event", lambda x: self.replot(center_at_cr = False, autozoom=True))
self.canvasMenu = QtGui.QMenu(self)
ca = QtGui.QAction('Insert a non-detection here', self)
ca.triggered.connect(self.insert_nondetection)
self.canvasMenu.addAction(ca)
self.canvas.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.canvas.customContextMenuRequested.connect(self.exec_canvas_menu)
self.RS = RectangleSelector(self.figure.gca(), self.rectangle_selected,
drawtype='box', useblit=True,
button=[1], #[1,3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels')
self.light_source = LightSource(azdeg = 135, altdeg = 30)
self.image = None
self.raster = None
self.last_x = None
self.points = None
self.selected = None
self.current = None
self.last_hidden = []
self.last_selected = []
self.last_current = []
self.last_plotted_rows = []
self.last_pick_time = time.time()
self.last_x = None
self.last_y = None
self.last_top_left = None
self.last_bottom_right = None
self.size_slider = QtGui.QSpinBox () # (Qt.Qt.Horizontal)
self.size_slider.setMinimum(5)
self.size_slider.setMaximum(1000)
self.size_slider.setSingleStep(10)
self.size_slider.setSuffix("px")
self.altitude_slider = QtGui.QDial()
self.altitude_slider.valueChanged.connect(self.altitude_slider_change)
self.altitude_slider.setMinimum(0)
self.altitude_slider.setMaximum(360)
self.altitude_slider.setWrapping(True)
try:
sv = int(config.get("raster_light_altitude"))
self.altitude_slider.setValue( (-sv - 90) % 360 )
except:
self.altitude_slider.setValue( (-30 - 90) % 360 )
self.angle_slider = QtGui.QDial() #QSlider(Qt.Qt.Vertical)
self.angle_slider.setWrapping(True)
self.angle_slider.setMinimum(0)
self.angle_slider.setMaximum(360)
self.angle_label = QtGui.QLabel("Azimuth")
#self.angle_label.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
#self.angle_label.setStyleSheet("QLabel { background-color : red; color : blue; }");
self.angle_slider.setMaximumSize(40, 40)
self.altitude_slider.setMaximumSize(40, 40)
#self.altitude_slider.setMinimum(0)
#self.altitude_slider.setMaximum(90)
try:
sv = int(config.get("raster_window_size"))
self.size_slider.setValue(sv)
except:
self.size_slider.setValue(100)
self.angle_slider.valueChanged.connect(self.angle_slider_change)
try:
sv = int(config.get("raster_light_angle"))
self.angle_slider.setValue( (- sv) % 360)
except:
self.angle_slider.setValue( (- 135) % 360)
self.size_slider.valueChanged.connect(self.size_slider_change)
#layout = QtGui.QHBoxLayout()
#layout.addWidget()
#hsplit.setOrientation(Qt.Qt.Vertical)
clayout = QtGui.QHBoxLayout()
self.nav = NavigationToolbar(self.canvas, self, coordinates = False)
a = self.nav.addAction(QtGui.QIcon('etc/arrow.png'), 'Point', self.nav_pointer)
a.setToolTip('Turn off pan/zoom')
#self.nav.configure_subplots.setVisible(False)
#self.nav.save_figure.setVisible(False)
for i,x in enumerate(self.nav.findChildren(QtGui.QAction)):
#print i,x
if x.text() in ['Subplots', 'Save', 'Customize', 'Back', 'Forward']:
x.setVisible(False)
#self.nav.DeleteToolByPos(6)
#self.nav.setMaximumWidth(200)
clayout.addWidget(self.nav, 0, QtCore.Qt.AlignLeft)
clayout.setAlignment(QtCore.Qt.AlignLeft)
clayout.addItem(QtGui.QSpacerItem(20,20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding))
clayout2 = QtGui.QHBoxLayout()
#clayout2.setAlignment(QtCore.Qt.AlignLeft)
clayout2.addWidget(QtGui.QLabel("Area"))
clayout2.addWidget(self.size_slider)
#clayout.addWidget(QtGui.QLabel("Light source control"))
s = str((- self.angle_slider.value()) % 360)
#clayout_ang = QtGui.QHBoxLayout()
clayout2.addWidget(QtGui.QLabel("Altitude"))
clayout2.addWidget(self.altitude_slider)
clayout2.addWidget(self.angle_label)
clayout2.addWidget(self.angle_slider)
#clayout.addLayout(clayout_ang)
clayout2.setContentsMargins(0,0,0,0)
clayout.addLayout(clayout2)
w = QtGui.QWidget() #QGroupBox("Raster parameters")
w.setLayout(clayout)
splitter.addWidget(w)
splitter.addWidget(self.canvas)
layout = QtGui.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignTop)
layout.addWidget(splitter)
clayout.setContentsMargins(0,0,0,0)
self.setLayout(layout)
def mouse_press(self,event):
self.RS.set_active(self.raster is not None and not self.nav._active in ['PAN', 'ZOOM'])
def rectangle_selected(self, eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print ("rectangle", x1, y1, x2,y2)
rows = self.source.db().filter_rectangle(min(x1,x2), max(x1,x2), min(y1,y2), max(y1,y2))
self.source.select_rows(rows)
def nav_pointer(self):
if self.nav._active == 'PAN': self.nav.pan()
if self.nav._active == 'ZOOM': self.nav.zoom()
status_changed = QtCore.pyqtSignal(QtCore.QString)
point_selected = QtCore.pyqtSignal(int)
point_inserted = QtCore.pyqtSignal(float,float)
def mouse_move(self, event):
if self.raster is not None:
#print (event.xdata, event.ydata)
if event.xdata is None: return
#cx,cy = self.raster.to_raster_coords(self.last_x, self.last_y)
self.status_changed.emit(str(tuple(map(int, [event.xdata, event.ydata]))))
#self.status_changed.emit(str(tuple(map(int, self.raster.to_geo_coords(self.last_tlr[0]+event.xdata, self.last_tlr[1] + event.ydata)))))
#c = self.nav.findChildren(QtGui.QLabel)
#if len(c) > 0:
# print c[-1].text()
# c[-1].hide()
def mouse_release(self, event):
self.RS.set_active(False)
if self.last_top_left is None: return
ax = self.figure.gca()
corners = ax.transData.inverted().transform( ax.transAxes.transform( [(0.1, 0.1), (0.1,0.9), (0.9,0.1), (0.9,0.9)]))
lon,lat = ax.transData.inverted().transform( ax.transAxes.transform( (0.5, 0.5)))
#tlr = self.raster.to_raster_coords(*self.last_top_left)
#lon,lat=self.raster.to_geo_coords(tlr[0] + xy[0], self.last_tlr[1]+xy[1])
tl,br= self.last_top_left, self.last_bottom_right
#print (lon,lat,tl,br)
mx,Mx = min(tl[0], br[0]), max(tl[0], br[0])
my,My = min(tl[1], br[1]), max(tl[1], br[1])
inside = lambda x: ((mx <= x[0] <= Mx) and (my <= x[1] <= My))
if not min(map(inside, corners)):
#print ("replot from release")
self.last_x=lon
self.last_y=lat
self.replot(center_at_cr = False, autozoom = True, keep_lims = True)
def on_pick(self, event):
t = time.time()
if 1000 * (t - self.last_pick_time) < 500:
#print "Ignore quick pick"
self.last_pick_time = t
return #bug?? picks the press for the new point
self.last_pick_time = t
artist = event.artist
#xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
#x, y = artist.get_xdata(), artist.get_ydata()
ind = event.ind
#print 'Artist picked:', event.artist
#print '{} vertices picked'.format(len(ind))
#print 'Pick between vertices {} and {}'.format(min(ind), max(ind)+1)
#print 'x, y of mouse: {:.2f},{:.2f}'.format(xmouse, ymouse)
#print 'Data point:', x[ind[0]], y[ind[0]]
#if self.current is not None:
# xc,yc = self.current.get_xdata(), self.current.get_ydata()
# if (len(xc) > 0):
# xc[0] = x[ind[0]]
# yc[0] = y[ind[0]]
# self.current.set_xdata(xc)
# self.current.set_ydata(yc)
self.point_selected.emit(artist.ids[ind[0]])
#print ("replot from pick")
self.replot(only_points=True)
#self.canvas.draw()
def angle_slider_change(self):
v = (- self.angle_slider.value()) % 360
config.set("raster_light_angle", str(v))
self.light_source.azdeg = v
s = str((- self.angle_slider.value()) % 360)
self.status_changed.emit("Light azimuth: "+s.rjust(6, " "))
self.replot()
def altitude_slider_change(self):
v = (- self.altitude_slider.value()-90) % 360
if v > 90 and v <= 180:
v = 90
self.altitude_slider.setValue( (-90 - v) % 360)
if v > 180:
v = 0
self.altitude_slider.setValue( (-90-v) % 360)
config.set("raster_light_altitude", str(v))
self.light_source.altdeg = v
self.status_changed.emit("Light altitude: "+str(v).rjust(6, " "))
self.replot()
def size_slider_change(self):
config.set("raster_window_size", str(self.size_slider.value()))
self.replot(autozoom = True)
def pan_to(self, coord):
if coord == 'cr':
cr = self.source.current_row()
if (center_at_cr) and (cr is not None):
x,y = self.source.db().get_item(cr, "LONGITUDE"), self.source.db().get_item(cr, "LATITUDE")
else:
x,y = coord
xl = self.figure.gca().get_xlim()
yl = self.figure.gca().get_ylim()
hx = (xl[1] - xl[0])/2.0
hy = (yl[1] - yl[0])/2.0
ax.set_xlim(x -hx , x+ hx)
ax.set_xlim(y -hy , y+ hy)
def replot(self, only_points = False, center_at_cr = False, autozoom = False, hidden =None, keep_lims = False):
if self.last_x is None:
return
x,y = self.last_x, self.last_y
cr = self.source.current_row()
if (center_at_cr) and (cr is not None):
x,y = self.source.db().get_item(cr, "LONGITUDE"), self.source.db().get_item(cr, "LATITUDE")
if hidden is None:
hidden = self.last_hidden
if (self.last_x is not None):
self.plot_point(x, y, hidden, self.source.selected_rows(), [self.source.current_row()], only_points, autozoom, keep_lims)
def set_raster(self, filename):
if filename == "":
self.raster = None
return
self.raster = gdal_interface.GdalMap(filename)
def set_projection(self, filename):
print ("Setting projection: "+filename)
if filename == "":
return
if self.raster is not None:
self.raster.dataset.SetProjection(gdal_interface.load_proj(filename).ExportToWkt()) #strange.
#always autozoom??? or not autozoom = keep_lims?
def plot_point(self, x, y, hidden = [], selected = [], current = [], only_points = False, autozoom=True, keep_lims = False):
if autozoom or self.last_top_left is None:
v = self.size_slider.value()
width,height = self.canvas.get_width_height()
#print width, height
if height < width:
dy = v
dx = (v * width) / height
else:
dx = v
dy = (v* height) / width
#print (dx, dy)
self.plot_area((x-dx, y + dy), (x+dx, y-dy), hidden, selected, current, only_points, autozoom, keep_lims)
else:
self.plot_area(self.last_top_left, self.last_bottom_right, hidden, selected, current, only_points, autozoom, keep_lims)
self.last_x = x
self.last_y = y
self.last_hidden =hidden
self.last_selected = selected
self.last_current = current
#coords in LKS
#so far hidden are direct ids
#and selected, current are "ID"s
def plot_area(self, top_left0, bottom_right0, hidden = [], selected = [], current = [], only_points=False, autozoom = True, keep_lims=False):
if self.raster == None:
return
if autozoom:
self.nav._views.clear()
self.nav._positions.clear()
#print ("plot_area", top_left0, bottom_right0)
ax = self.figure.gca()
top_left, bottom_right = self.raster.get_actual_bounds(top_left0, bottom_right0)
xl = ax.get_xlim()
yl=ax.get_ylim()
if not only_points:
#ax.set_aspect("equal", "datalim")
r = self.raster.get_rectangle(top_left, bottom_right)
if len(r) == 0:
print "No raster at this point"
return
#print (r.max(), r.min())
r= np.ma.array(r, mask= (r<-9999)) #mask
gist_tampered = cm.gist_earth
gist_tampered.set_bad('k', 1.0)
#print (r)
r = self.light_source.shade(r, gist_tampered) #create shadows
dim = (len(r), len(r[0]))
e = [top_left[0], bottom_right[0], bottom_right[1], top_left[1]]
#e = [top_left[0], bottom_right[0], top_left[1], bottom_right[1]]
if (self.image is None) or (dim != self.last_dimension):
#print ("new imshow")
ax.autoscale(True)
#ax.get_xaxis().set_visible(False)
#ax.get_yaxis().set_visible(False)
ax.set_axis_off()
self.image = ax.imshow(r, aspect = 'equal', extent = e)
ax.set_aspect("equal", "datalim")
#self.image = ax.imshow(r, aspect = 'auto')
ax.autoscale(False)
else:
#print ("update imshow")
self.image.set_array(r)
self.image.set_extent(e)
self.last_dimension = dim
#print ('TOPLEFT', top_left)
#print ('BOTTOMRIGHT', bottom_right)
if keep_lims:
ax.set_xlim(xl)
ax.set_ylim(yl)
elif autozoom:
ax.set_xlim(top_left0[0], bottom_right0[0])
#ax.set_ylim(top_left0[1], bottom_right0[1]) #upside down!!
ax.set_ylim(bottom_right0[1], top_left0[1])
self.last_top_left = top_left0
self.last_bottom_right = bottom_right0
#print ("actual bounds: ", top_left, bottom_right)
##tlr = self.raster.to_raster_coords(top_left[0], top_left[1])
##self.last_tlr = tlr
#print ("tlr:",tlr)
#brr = self.raster.to_raster_coords(bottom_right)
db = self.source.db()
rows = db.filter_rectangle(top_left[0], bottom_right[0], bottom_right[1], top_left[1])
rows = [rr for rr in rows if not rr in hidden]
x = db.get_projection("LONGITUDE", rows)
y = db.get_projection("LATITUDE", rows)
#ids = db.get_projection("ID", rows)
##xy_tr = [self.raster.to_raster_coords(*z) for z in zip(x,y)]
##x_tr,y_tr = zip(*xy_tr)
# subtract top left coordinates, because the image is shifted to (0,0)
##x_tr = [xx - tlr[0] for xx in x_tr]
##y_tr = [yy - tlr[1] for yy in y_tr]
x_tr, y_tr = x,y
rrows = range(len(rows))
self.last_plotted_rows = rows
rselected = filter(lambda x: (rows[x] in selected) and (not rows[x] in | |
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as tdist
import numpy as np
"""
import argparse
# coding: utf-8
# Take length 50 snippets and record the cumulative return for each one. Then determine ground truth labels based on this.
# In[1]:
import sys
import pickle
import gym
from gym import spaces
import time
import random
from torchvision.utils import save_image
from run_test import *
from baselines.common.trex_utils import preprocess
import os
def generate_novice_demos(env, env_name, agent, model_dir):
checkpoint_min = 50
checkpoint_max = 600
checkpoint_step = 50
checkpoints = []
if env_name == "enduro":
checkpoint_min = 3100
checkpoint_max = 3650
elif env_name == "seaquest":
checkpoint_min = 10
checkpoint_max = 65
checkpoint_step = 5
for i in range(checkpoint_min, checkpoint_max + checkpoint_step, checkpoint_step):
if i < 10:
checkpoints.append('0000' + str(i))
elif i < 100:
checkpoints.append('000' + str(i))
elif i < 1000:
checkpoints.append('00' + str(i))
elif i < 10000:
checkpoints.append('0' + str(i))
print(checkpoints)
demonstrations = []
learning_returns = []
learning_rewards = []
for checkpoint in checkpoints:
model_path = model_dir + "/models/" + env_name + "_25/" + checkpoint
if env_name == "seaquest":
model_path = model_dir + "/models/" + env_name + "_5/" + checkpoint
agent.load(model_path)
episode_count = 30
for i in range(episode_count):
done = False
traj = []
actions = []
gt_rewards = []
r = 0
ob = env.reset()
steps = 0
acc_reward = 0
#os.mkdir('images/' + str(checkpoint))
frameno = 0
while True:
action = agent.act(ob, r, done)
ob, r, done, info = env.step(action)
ob_processed = preprocess(ob, env_name)
ob_processed = ob_processed[0] #get rid of first dimension ob.shape = (1,84,84,4)
traj.append(ob_processed)
actions.append(action[0])
#save_image(torch.from_numpy(ob_processed).permute(2, 0, 1).reshape(4*84, 84), 'images/' + str(checkpoint) + '/' + str(frameno) + '_action_' + str(action[0]) + '.png')
frameno += 1
gt_rewards.append(r[0])
steps += 1
acc_reward += r[0]
if done:
print("checkpoint: {}, steps: {}, return: {}".format(checkpoint, steps,acc_reward))
break
print("traj length", len(traj))
print("demo length", len(demonstrations))
demonstrations.append([traj, actions])
learning_returns.append(acc_reward)
learning_rewards.append(gt_rewards)
return demonstrations, learning_returns, learning_rewards
def create_training_data(demonstrations, num_trajs, num_snippets, min_snippet_length, max_snippet_length):
#collect training data
max_traj_length = 0
training_obs = []
training_labels = []
times = []
actions = []
num_demos = len(demonstrations)
#add full trajs (for use on Enduro)
"aaa""
for n in range(num_trajs):
ti = 0
tj = 0
#only add trajectories that are different returns
while(ti == tj):
#pick two random demonstrations
ti = np.random.randint(num_demos)
tj = np.random.randint(num_demos)
#create random partial trajs by finding random start frame and random skip frame
si = np.random.randint(6)
sj = np.random.randint(6)
step = np.random.randint(3,7)
traj_i = demonstrations[ti][si::step] #slice(start,stop,step)
traj_j = demonstrations[tj][sj::step]
if ti > tj:
label = 0
else:
label = 1
training_obs.append((traj_i, traj_j))
training_labels.append(label)
max_traj_length = max(max_traj_length, len(traj_i), len(traj_j))
"aaa""
#fixed size snippets with progress prior
for n in range(num_snippets):
ti = 0
tj = 0
#only add trajectories that are different returns
while(ti == tj):
#pick two random demonstrations
ti = np.random.randint(num_demos)
tj = np.random.randint(num_demos)
#create random snippets
#find min length of both demos to ensure we can pick a demo no earlier than that chosen in worse preferred demo
min_length = min(len(demonstrations[ti][0]), len(demonstrations[tj][0]))
rand_length = np.random.randint(min_snippet_length, max_snippet_length)
if ti < tj: #pick tj snippet to be later than ti
ti_start = np.random.randint(min_length - rand_length + 1)
#print(ti_start, len(demonstrations[tj]))
tj_start = np.random.randint(ti_start, len(demonstrations[tj][0]) - rand_length + 1)
else: #ti is better so pick later snippet in ti
tj_start = np.random.randint(min_length - rand_length + 1)
#print(tj_start, len(demonstrations[ti]))
ti_start = np.random.randint(tj_start, len(demonstrations[ti][0]) - rand_length + 1)
traj_i = demonstrations[ti][0][ti_start:ti_start+rand_length:1] #skip everyother framestack to reduce size
traj_j = demonstrations[tj][0][tj_start:tj_start+rand_length:1]
traj_i_actions = demonstrations[ti][1][ti_start:ti_start+rand_length:1] #skip everyother framestack to reduce size
traj_j_actions = demonstrations[tj][1][tj_start:tj_start+rand_length:1]
max_traj_length = max(max_traj_length, len(traj_i), len(traj_j))
if ti > tj:
label = 0
else:
label = 1
len1 = len(traj_i)
len2 = len(list(range(ti_start, ti_start+rand_length, 1)))
if len1 != len2:
print("---------LENGTH MISMATCH!------")
training_obs.append((traj_i, traj_j))
training_labels.append(label)
times.append((list(range(ti_start, ti_start+rand_length, 1)), list(range(tj_start, tj_start+rand_length, 1))))
actions.append((traj_i_actions, traj_j_actions))
print("maximum traj length", max_traj_length)
return training_obs, training_labels, times, actions
"""
class Net(nn.Module):
def __init__(self, ENCODING_DIMS, ACTION_SPACE_SIZE):
super().__init__()
self.conv1 = nn.Conv2d(4, 16, 7, stride=3)
self.conv2 = nn.Conv2d(16, 32, 5, stride=2)
self.conv3 = nn.Conv2d(32, 32, 3, stride=1)
self.conv4 = nn.Conv2d(32, 16, 3, stride=1)
intermediate_dimension = 128 #min(784, max(64, ENCODING_DIMS*2))
self.fc1 = nn.Linear(784, intermediate_dimension)
self.fc_mu = nn.Linear(intermediate_dimension, ENCODING_DIMS)
self.fc_var = nn.Linear(intermediate_dimension, ENCODING_DIMS)
self.fc2 = nn.Linear(ENCODING_DIMS, 1)
self.reconstruct1 = nn.Linear(ENCODING_DIMS, intermediate_dimension)
self.reconstruct2 = nn.Linear(intermediate_dimension, 1568)
self.reconstruct_conv1 = nn.ConvTranspose2d(2, 4, 3, stride=1)
self.reconstruct_conv2 = nn.ConvTranspose2d(4, 16, 6, stride=1)
self.reconstruct_conv3 = nn.ConvTranspose2d(16, 16, 7, stride=2)
self.reconstruct_conv4 = nn.ConvTranspose2d(16, 4, 10, stride=1)
self.temporal_difference1 = nn.Linear(ENCODING_DIMS*2, 1, bias=False)#ENCODING_DIMS)
#self.temporal_difference2 = nn.Linear(ENCODING_DIMS, 1)
self.inverse_dynamics1 = nn.Linear(ENCODING_DIMS*2, ACTION_SPACE_SIZE, bias=False) #ENCODING_DIMS)
#self.inverse_dynamics2 = nn.Linear(ENCODING_DIMS, ACTION_SPACE_SIZE)
self.forward_dynamics1 = nn.Linear(ENCODING_DIMS + ACTION_SPACE_SIZE, ENCODING_DIMS, bias=False)# (ENCODING_DIMS + ACTION_SPACE_SIZE) * 2)
#self.forward_dynamics2 = nn.Linear((ENCODING_DIMS + ACTION_SPACE_SIZE) * 2, (ENCODING_DIMS + ACTION_SPACE_SIZE) * 2)
#self.forward_dynamics3 = nn.Linear((ENCODING_DIMS + ACTION_SPACE_SIZE) * 2, ENCODING_DIMS)
self.normal = tdist.Normal(0, 1)
self.softmax = nn.Softmax(dim=1)
self.sigmoid = nn.Sigmoid()
print("Intermediate dimension calculated to be: " + str(intermediate_dimension))
def reparameterize(self, mu, var): #var is actually the log variance
if self.training:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
std = var.mul(0.5).exp()
eps = self.normal.sample(mu.shape).to(device)
return eps.mul(std).add(mu)
else:
return mu
def cum_return(self, traj):
#print("input shape of trajectory:")
#print(traj.shape)
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
x = traj.permute(0,3,1,2) #get into NCHW format
#compute forward pass of reward network (we parallelize across frames so batch size is length of partial trajectory)
print("pre any:", x.shape)
x = F.leaky_relu(self.conv1(x))
print("after conv1:", x.shape)
x = F.leaky_relu(self.conv2(x))
print("after conv2:", x.shape)
x = F.leaky_relu(self.conv3(x))
print("after conv3:", x.shape)
x = F.leaky_relu(self.conv4(x))
print("after conv4:", x.shape)
x = x.view(-1, 784)
x = F.leaky_relu(self.fc1(x))
mu = self.fc_mu(x)
var = self.fc_var(x)
z = self.reparameterize(mu, var)
#print("after fc_mu:", x.shape)
r = self.fc2(z)
sum_rewards += torch.sum(r)
sum_abs_rewards += torch.sum(torch.abs(r))
return sum_rewards, sum_abs_rewards, mu, var, z
def estimate_temporal_difference(self, z1, z2):
x = self.temporal_difference1(torch.cat((z1, z2), 1))
#x = self.temporal_difference2(x)
return x
def forward_dynamics(self, z1, actions):
x = torch.cat((z1, actions), dim=1)
x = self.forward_dynamics1(x)
#x = F.leaky_relu(self.forward_dynamics2(x))
#x = self.forward_dynamics3(x)
return x
def estimate_inverse_dynamics(self, z1, z2):
concatenation = torch.cat((z1, z2), 1)
x = self.inverse_dynamics1(concatenation)
#x = F.leaky_relu(self.inverse_dynamics2(x))
return x
def decode(self, encoding):
#print("before:", encoding.shape)
x = F.leaky_relu(self.reconstruct1(encoding))
#print("after reconstruct1:", x.shape)
x = F.leaky_relu(self.reconstruct2(x))
#print("after reconstruct2:", x.shape)
x = x.view(-1, 2, 28, 28)
#print("------decoding--------")
#print(x.shape)
x = F.leaky_relu(self.reconstruct_conv1(x))
#print("after reconstruct_conv1:", x.shape)
#print(x.shape)
x = F.leaky_relu(self.reconstruct_conv2(x))
#print("after reconstruct_conv2:", x.shape)
#print(x.shape)
#print(x.shape)
x = F.leaky_relu(self.reconstruct_conv3(x))
#print("after reconstruct_conv3:", x.shape)
#print(x.shape)
#print(x.shape)
x = self.sigmoid(self.reconstruct_conv4(x))
#print("after reconstruct_conv4:", x.shape)
#print(x.shape)
#print("------end decoding--------")
return x.permute(0, 2, 3, 1)
def forward(self, traj_i, traj_j):
'''compute cumulative return for each trajectory and return logits'''
cum_r_i, abs_r_i, mu1, var1, z1 = self.cum_return(traj_i)
cum_r_j, abs_r_j, mu2, var2, z2 = self.cum_return(traj_j)
return torch.cat((cum_r_i.unsqueeze(0), cum_r_j.unsqueeze(0)),0), abs_r_i + abs_r_j, z1, z2, mu1, mu2, var1, var2
"""
def reconstruction_loss(decoded, target, mu, logvar):
num_elements = decoded.numel()
target_num_elements = decoded.numel()
if num_elements != target_num_elements:
print("ELEMENT SIZE MISMATCH IN RECONSTRUCTION")
sys.exit()
bce = F.binary_cross_entropy(decoded, target)
kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
kld /= num_elements
#print("bce: " + str(bce) + " kld: " + str(kld))
return bce + kld
# Train the network
def learn_reward(reward_network, optimizer, training_inputs, training_outputs, training_times, training_actions, num_iter, l1_reg, checkpoint_dir):
#check if gpu available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
loss_criterion = nn.CrossEntropyLoss()
temporal_difference_loss = nn.MSELoss()
inverse_dynamics_loss = nn.CrossEntropyLoss()
forward_dynamics_loss = nn.MSELoss()
cum_loss = 0.0
training_data = list(zip(training_inputs, training_outputs, training_times, training_actions))
for epoch in range(num_iter):
np.random.shuffle(training_data)
training_obs, training_labels, training_times_sub, training_actions_sub = zip(*training_data)
validation_split = 0.9
for i in range(len(training_labels)):
traj_i, traj_j = training_obs[i]
labels = np.array([training_labels[i]])
times_i, times_j = training_times_sub[i]
actions_i, actions_j = training_actions_sub[i]
traj_i = np.array(traj_i)
traj_j = np.array(traj_j)
traj_i = torch.from_numpy(traj_i).float().to(device)
traj_j = torch.from_numpy(traj_j).float().to(device)
labels = torch.from_numpy(labels).to(device)
num_frames = len(traj_i)
#zero out gradient
optimizer.zero_grad()
#forward + backward + optimize
outputs, abs_rewards, z1, z2, mu1, mu2, logvar1, logvar2 = reward_network.forward(traj_i, traj_j)
outputs = outputs.unsqueeze(0)
decoded1 = reward_network.decode(z1)
#print("DECODED SHAPE:")
#print(decoded1.shape)
#print(decoded1.type())
#print("TRAJ_I SHAPE:")
#print(traj_i.shape)
#print(traj_i.type())
decoded2 = reward_network.decode(z2)
reconstruction_loss_1 = 10*reconstruction_loss(decoded1, traj_i, mu1, logvar1)
reconstruction_loss_2 = 10*reconstruction_loss(decoded2, traj_j, mu2, logvar2)
t1_i = np.random.randint(0, len(times_i))
t2_i = np.random.randint(0, len(times_i))
t1_j = np.random.randint(0, len(times_j))
t2_j = np.random.randint(0, len(times_j))
est_dt_i | |
<filename>cw/bassplayer.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import struct
import ctypes
import threading
from ctypes import c_int, c_uint8, c_uint16, c_uint32, c_uint64, c_float, c_void_p, c_char_p
import cw
from cw.util import synclock
# typedef を間違えないように...
c_BYTE = c_uint8
c_WORD = c_uint16
c_DWORD = c_uint32
c_QWORD = c_uint64
c_BOOL = c_int
c_HMUSIC = c_DWORD
c_HSAMPLE = c_DWORD
c_HCHANNEL = c_DWORD
c_HSTREAM = c_DWORD
c_HSYNC = c_DWORD
c_HPLUGIN = c_DWORD
c_HSOUNDFONT = c_DWORD
class BASS_CHANNELINFO(ctypes.Structure):
_fields_ = [("freq", c_DWORD),
("chans", c_DWORD),
("flags", c_DWORD),
("ctype", c_DWORD),
("origres", c_DWORD),
("plugin", c_HPLUGIN),
("sample", c_HSAMPLE),
("filename", c_char_p)]
BASS_DEVICE_DEFAULT = 0
BASS_DEVICE_8BITS = 1
BASS_DEVICE_MONO = 2
BASS_DEVICE_3D = 4
BASS_DEFAULT = 0
BASS_SAMPLE_LOOP = 4
BASS_MUSIC_RAMP = 0x200
BASS_MUSIC_RAMPS = 0x400
BASS_MUSIC_POSRESET = 0x8000
BASS_MUSIC_PRESCAN = 0x20000
BASS_MUSIC_STOPBACK = 0x80000
BASS_FILEPOS_CURRENT = 0
BASS_FILEPOS_END = 2
MIDI_EVENT_CONTROL = 64
BASS_SYNC_POS = 0
BASS_SYNC_END = 2
BASS_SYNC_SLIDE = 5
BASS_SYNC_MUSICPOS = 10
BASS_SYNC_MIXTIME = 0x40000000
BASS_POS_BYTE = 0
MIDI_EVENT_END = 0
MIDI_EVENT_END_TRACK = 0x10003
BASS_TAG_OGG = 2
BASS_TAG_RIFF_INFO = 0x100
BASS_TAG_RIFF_BEXT = 0x101
BASS_TAG_RIFF_CART = 0x102
BASS_TAG_RIFF_DISP = 0x103
BASS_SAMPLE_8BITS = 1
BASS_SAMPLE_FLOAT = 256
BASS_ACTIVE_STOPPED = 0
BASS_ATTRIB_TEMPO = 0x10000
BASS_ATTRIB_TEMPO_PITCH = 0x10001
BASS_ATTRIB_TEMPO_FREQ = 0x10002
BASS_ATTRIB_FREQ = 1
BASS_ATTRIB_VOL = 2
BASS_ATTRIB_PAN = 3
BASS_ATTRIB_EAXMIX = 4
BASS_ATTRIB_MUSIC_AMPLIFY = 0x100
BASS_ATTRIB_MUSIC_PANSEP = 0x101
BASS_ATTRIB_MUSIC_PSCALER = 0x102
BASS_ATTRIB_MUSIC_BPM = 0x103
BASS_ATTRIB_MUSIC_SPEED = 0x104
BASS_ATTRIB_MUSIC_VOL_GLOBAL = 0x105
BASS_ATTRIB_MUSIC_VOL_CHAN = 0x200 # + channel No.
BASS_ATTRIB_MUSIC_VOL_INST = 0x300 # + instrument No.
BASS_STREAM_DECODE = 0x200000
BASS_FX_FREESOURCE = 0x10000
MAX_BGM_CHANNELS = 2
MAX_SOUND_CHANNELS = 2
STREAM_BGM = 0 # 0~1
STREAM_SOUND1 = 2 # 2~3
STREAM_SOUND2 = 4 # 4
CC111 = 111
_bass = None
_bassmidi = None
_bassfx = None
_sfonts = []
_streams = [0, 0, 0, 0, 0]
_fadeoutstreams = [None, None, None, None, None]
_loopstarts = [0, 0, 0, 0, 0]
_loopcounts = [0, 1, 1, 1, 1]
_lock = threading.Lock()
_fadeoutlock = threading.Lock()
if sys.platform == "win32":
SYNCPROC = ctypes.WINFUNCTYPE(None, c_HSYNC, c_DWORD, c_DWORD, c_void_p)
else:
SYNCPROC = ctypes.CFUNCTYPE(None, c_HSYNC, c_DWORD, c_DWORD, c_void_p)
def _cc111loop(handle, channel, data, streamindex):
"""CC#111の位置へシークし、再び演奏を始める。"""
if streamindex is None:
streamindex = 0
_loop(handle, channel, data, streamindex)
CC111LOOP = SYNCPROC(_cc111loop)
def _free_channel(handle, channel, data, streamindex):
global _bass, _fadeoutstreams
_bass.BASS_ChannelStop(channel)
_bass.BASS_StreamFree(channel)
if streamindex is None:
streamindex = 0
@synclock(_fadeoutlock)
def func(streamindex):
_fadeoutstreams[streamindex] = None
func(streamindex)
def _free_channel_lockfree(handle, channel, data, streamindex):
global _bass, _fadeoutstreams
_bass.BASS_ChannelStop(channel)
_bass.BASS_StreamFree(channel)
if streamindex is None:
streamindex = 0
_fadeoutstreams[streamindex] = None
FREE_CHANNEL = SYNCPROC(_free_channel)
@synclock(_lock)
def _loop(handle, channel, data, streamindex):
global _bass, _loopcounts, _loopstarts, _fadeoutstreams
fadeouting = _fadeoutstreams[streamindex] and _fadeoutstreams[streamindex][0] == channel
if fadeouting:
# フェードアウト中のチャンネル
loops = _fadeoutstreams[streamindex][1]
pos = _fadeoutstreams[streamindex][2]
else:
loops = _loopcounts[streamindex]
pos = _loopstarts[streamindex]
if loops <> 1:
if 0 < loops:
if fadeouting:
_fadeoutstreams[streamindex] = (channel, loops - 1, pos)
else:
_loopcounts[streamindex] = loops - 1
_bass.BASS_ChannelSetPosition(channel, pos, BASS_POS_BYTE)
def is_alivable():
global _bass, _bassmidi, _bassfx, _sfonts, _streams, _loopstarts, _loopcounts
"""BASS Audioによる演奏が可能な状態であればTrueを返す。
init_bass()の実行前は必ずFalseを返す。"""
return not _bass is None
def is_alivablemidi():
global _bass, _bassmidi, _bassfx, _sfonts, _streams, _loopstarts, _loopcounts
return _bassmidi and _sfonts
def is_alivablewithpath(path):
global _bass, _bassmidi, _bassfx, _sfonts, _streams, _loopstarts, _loopcounts
if cw.util.is_midi(path):
return is_alivablemidi()
else:
return is_alivable()
def init_bass(soundfonts):
"""
BASS AudioのDLLをロードし、再生のための初期化を行う。
初期化が成功したらTrueを、失敗した場合はFalseを返す。
soundfonts: サウンドフォントのファイルパス。listで指定。
"""
global _bass, _bassmidi, _bassfx, _sfonts, _streams, _loopstarts, _loopcounts
if _bass:
# 初期化済み
return True
try:
if sys.platform == "win32":
_bass = ctypes.windll.LoadLibrary("bass.dll")
_bassmidi = ctypes.windll.LoadLibrary("bassmidi.dll")
_bassfx = ctypes.windll.LoadLibrary("bass_fx.dll")
elif sys.platform == "darwin":
if ('RESOURCEPATH' in os.environ and
os.path.exists(
os.path.join(os.environ['RESOURCEPATH'], 'libbass.dylib'))):
_bass = ctypes.CDLL(
os.path.join(os.environ['RESOURCEPATH'], "libbass.dylib"),
mode=ctypes.RTLD_GLOBAL)
_bassmidi = ctypes.CDLL(
os.path.join(os.environ['RESOURCEPATH'],
"libbassmidi.dylib"))
_bassfx = ctypes.CDLL(
os.path.join(os.environ['RESOURCEPATH'],
"libbass_fx.dylib"))
else:
_bass = ctypes.CDLL(
"./lib/libbass.dylib", mode=ctypes.RTLD_GLOBAL)
_bassmidi = ctypes.CDLL(
"./lib/libbassmidi.dylib")
_bassfx = ctypes.CDLL(
"./lib/libbass_fx.dylib")
else:
if sys.maxsize == 0x7fffffff:
_bass = ctypes.CDLL("./lib/libbass32.so", mode=ctypes.RTLD_GLOBAL)
_bassmidi = ctypes.CDLL("./lib/libbassmidi32.so")
_bassfx = ctypes.CDLL("./lib/libbass_fx32.so")
elif sys.maxsize == 0x7fffffffffffffff:
_bass = ctypes.CDLL("./lib/libbass64.so", mode=ctypes.RTLD_GLOBAL)
_bassmidi = ctypes.CDLL("./lib/libbassmidi64.so")
_bassfx = ctypes.CDLL("./lib/libbass_fx64.so")
except Exception:
cw.util.print_ex()
if not _bass:
return False
# 使用している関数の argtypes と restype の設定
_bass.BASS_Init.argtypes = [ c_int, c_DWORD, c_DWORD, c_void_p, c_void_p ]
_bass.BASS_Init.restype = c_BOOL
_bass.BASS_Free.argtypes = []
_bass.BASS_Free.restype = c_BOOL
_bass.BASS_StreamCreateFile.argtypes = [ c_BOOL, c_char_p, c_QWORD, c_QWORD, c_DWORD ]
_bass.BASS_StreamCreateFile.restype = c_HSTREAM
_bass.BASS_StreamFree.argtypes = [ c_HSTREAM ]
_bass.BASS_StreamFree.restype = c_BOOL
_bass.BASS_ChannelIsActive.argtypes = [ c_DWORD ]
_bass.BASS_ChannelIsActive.restype = c_DWORD
_bass.BASS_ChannelGetInfo.argtypes = [ c_DWORD, ctypes.POINTER(BASS_CHANNELINFO) ]
_bass.BASS_ChannelGetInfo.restype = c_BOOL
_bass.BASS_ChannelGetTags.argtypes = [ c_DWORD, c_DWORD ]
_bass.BASS_ChannelGetTags.restype = c_void_p
_bass.BASS_ChannelPlay.argtypes = [ c_DWORD, c_BOOL ]
_bass.BASS_ChannelPlay.restype = c_BOOL
_bass.BASS_ChannelStop.argtypes = [ c_DWORD ]
_bass.BASS_ChannelStop.restype = c_BOOL
_bass.BASS_ChannelPause.argtypes = [ c_DWORD ]
_bass.BASS_ChannelPause.restype = c_BOOL
_bass.BASS_ChannelSetAttribute.argtypes = [ c_DWORD, c_DWORD, c_float ]
_bass.BASS_ChannelSetAttribute.restype = c_BOOL
_bass.BASS_ChannelGetAttribute.argtypes = [ c_DWORD, c_DWORD, ctypes.POINTER(c_float) ]
_bass.BASS_ChannelGetAttribute.restype = c_BOOL
_bass.BASS_ChannelSlideAttribute.argtypes = [ c_DWORD, c_DWORD, c_float, c_DWORD ]
_bass.BASS_ChannelSlideAttribute.restype = c_BOOL
_bass.BASS_ChannelSetPosition.argtypes = [ c_DWORD, c_QWORD, c_DWORD ]
_bass.BASS_ChannelSetPosition.restype = c_BOOL
_bass.BASS_ChannelSetSync.argtypes = [ c_DWORD, c_DWORD, c_QWORD, SYNCPROC, c_void_p ]
_bass.BASS_ChannelSetSync.restype = c_HSYNC
_bassmidi.BASS_MIDI_FontInit.argtypes = [ c_char_p, c_DWORD ]
_bassmidi.BASS_MIDI_FontInit.restype = c_HSOUNDFONT
_bassmidi.BASS_MIDI_FontSetVolume.argtypes = [ c_HSOUNDFONT, c_float ]
_bassmidi.BASS_MIDI_FontSetVolume.restype = c_BOOL
_bassmidi.BASS_MIDI_FontFree.argtypes = [ c_HSOUNDFONT ]
_bassmidi.BASS_MIDI_FontFree.restype = c_BOOL
_bassmidi.BASS_MIDI_StreamCreateFile.argtypes = [ c_BOOL, c_char_p, c_QWORD, c_QWORD, c_DWORD, c_DWORD ]
_bassmidi.BASS_MIDI_StreamCreateFile.restype = c_HSTREAM
_bassmidi.BASS_MIDI_StreamSetFonts.argtypes = [ c_HSTREAM, c_char_p, c_DWORD ]
_bassmidi.BASS_MIDI_StreamSetFonts.restype = c_BOOL
_bassmidi.BASS_MIDI_StreamGetEvents.argtypes = [ c_HSTREAM, c_int, c_DWORD, c_void_p ]
_bassmidi.BASS_MIDI_StreamGetEvents.restype = c_DWORD
_bassfx.BASS_FX_TempoCreate.argtypes = [ c_DWORD, c_DWORD ]
_bassfx.BASS_FX_TempoCreate.restype = c_HSTREAM
if not _bass.BASS_Init(-1, 44100, BASS_DEVICE_DEFAULT, None, None):
dispose_bass()
return False
# サウンドフォントのロード
_sfonts = ""
encoding = sys.getfilesystemencoding()
if _bassmidi:
for soundfont, volume in soundfonts:
sfont = _bassmidi.BASS_MIDI_FontInit(soundfont.encode(encoding), 0)
if not sfont:
print "BASS_MIDI_FontInit() failure: %s" % (soundfont)
return False
if not _bassmidi.BASS_MIDI_FontSetVolume(sfont, volume):
print "BASS_MIDI_FontSetVolume() failure: %s, %s" % (soundfont, volume)
return False
_sfonts += struct.pack("@Iii", sfont, -1, 0)
if not _sfonts:
dispose_bass()
return False
return True
def change_soundfonts(soundfonts):
"""サウンドフォントの差し替えを行う。"""
global _bass, _bassmidi, _bassfx, _sfonts, _streams, _loopstarts, _loopcounts
if _bassmidi:
for i in xrange(0, len(_sfonts), 4*3):
sfont = struct.unpack("@Iii", _sfonts[i:i+4*3])
_bassmidi.BASS_MIDI_FontFree(sfont[0])
_sfonts = ""
encoding = sys.getfilesystemencoding()
for soundfont, volume in soundfonts:
sfont = _bassmidi.BASS_MIDI_FontInit(soundfont.encode(encoding), 0)
if not sfont:
print "BASS_MIDI_FontInit() failure: %s" % (soundfont)
return False
if not _bassmidi.BASS_MIDI_FontSetVolume(sfont, volume):
print "BASS_MIDI_FontSetVolume() failure: %s, %s" % (soundfont, volume)
return False
_sfonts += struct.pack("@Iii", sfont, -1, 0)
if not _sfonts:
return False
return True
def _play(fpath, volume, loopcount, streamindex, fade, tempo=0, pitch=0):
"""
BASS Audioによってfileを演奏する。
file: 再生するファイル。
volume: 音量。0.0~1.0で指定。
loopcount: ループ回数。0で無限ループ。
streamindex: 再生チャンネル番号。
fade: フェードインにかける時間(ミリ秒)。
tempo: テンポを変更する場合は-95%~5000%の値を指定。
pitch: ピッチを変更する場合は-60~60の値を指定。
"""
global _bass, _bassmidi, _bassfx, _sfonts
encoding = sys.getfilesystemencoding()
# BUG: BASS 2.4.13.8でBGMが無い時に"システム・改ページ.wav"等を鳴らすと
# 鳴り出しでノイズと遅延が発生する。
# 過去にBASS_STREAM_DECODEを使用するとループ時にノイズが発生する
# ファイルがあったので回避策としてテンポ・ピッチの変化が無い時は
# BASS FXを使用しないようにしていたが、BASS 2.4.13.8ではその問題は
# 無くなっており、却って前段落の問題が発生するようなので
# 必ずBASS_STREAM_DECODEを使用するように変更する。
# See Also: https://bitbucket.org/k4nagatsuki/cardwirthpy-reboot/issues/459
FORCE_FX = True
flag = BASS_MUSIC_STOPBACK|BASS_MUSIC_POSRESET|BASS_MUSIC_PRESCAN
if tempo <> 0 or pitch <> 0 or FORCE_FX:
flag |= BASS_STREAM_DECODE
if cw.cwpy.setting.bassmidi_sample32bit:
flag |= BASS_SAMPLE_FLOAT
_BASS_CONFIG_MIDI_DEFFONT = 0x10403
ismidi = False
if os.path.isfile(fpath) and 4 <= os.path.getsize(fpath):
with open(fpath, "rb") as f:
head = f.read(4)
f.close()
ismidi = (head == "MThd")
if ismidi:
if not is_alivablemidi():
return
stream = _bassmidi.BASS_MIDI_StreamCreateFile(False, fpath.encode(encoding), 0, 0, flag, 44100)
if stream:
if _sfonts:
_bassmidi.BASS_MIDI_StreamSetFonts(stream, _sfonts, len(_sfonts) / (4*3))
else:
raise ValueError("sound font not found: %s" % (fpath))
else:
raise ValueError("_play() failure: %s" % (fpath))
else:
stream = _bass.BASS_StreamCreateFile(False, fpath.encode(encoding), 0, 0, flag)
if not stream:
raise ValueError("_play() failure: %s" % (fpath))
if loopcount <> 1:
loopinfo = _get_loopinfo(fpath, stream)
else:
loopinfo = None
if not loopinfo and ismidi:
# RPGツクールで使用されるループ位置情報(CC#111)を探し、
# 存在する場合はその位置からループ再生を行う
count = _bassmidi.BASS_MIDI_StreamGetEvents(stream, -1, MIDI_EVENT_CONTROL, None)
if count:
events = "\0" * (count*4*5)
count = _bassmidi.BASS_MIDI_StreamGetEvents(stream, -1, MIDI_EVENT_CONTROL, events)
for i in xrange(0, count, 4*5):
bassMidiEvent = struct.unpack("@iiiii", events[i:i+4*5])
_event = bassMidiEvent[0] # 使用しない
param = bassMidiEvent[1]
_chan = bassMidiEvent[2] # 使用しない
_tick = bassMidiEvent[3] # 使用しない
pos = bassMidiEvent[4]
if (param & 0x00ff) == CC111: # CC#111があったのでここでループする
loopinfo = (pos, -1)
break
if tempo <> 0 or pitch <> 0 or FORCE_FX:
stream = _bassfx.BASS_FX_TempoCreate(stream, BASS_FX_FREESOURCE)
_loopcounts[streamindex] = loopcount
if loopinfo:
loopstart, loopend = loopinfo
_loopstarts[streamindex] = loopstart
if 0 <= loopend:
_bass.BASS_ChannelSetSync(stream, BASS_SYNC_POS|BASS_SYNC_MIXTIME, loopend, CC111LOOP, streamindex)
else:
_bass.BASS_ChannelSetSync(stream, BASS_SYNC_END|BASS_SYNC_MIXTIME, 0, CC111LOOP, streamindex)
else:
_loopstarts[streamindex] = 0
_bass.BASS_ChannelSetSync(stream, BASS_SYNC_END|BASS_SYNC_MIXTIME, 0, CC111LOOP, c_void_p(streamindex))
if tempo <> 0:
_bass.BASS_ChannelSetAttribute(stream, BASS_ATTRIB_TEMPO, tempo) # -95%...0...+5000%
if pitch <> 0:
_bass.BASS_ChannelSetAttribute(stream, BASS_ATTRIB_TEMPO_PITCH, pitch) # -60...0...+60
if 0 < fade:
_bass.BASS_ChannelSetAttribute(stream, BASS_ATTRIB_VOL, 0)
_bass.BASS_ChannelSlideAttribute(stream, BASS_ATTRIB_VOL, volume, fade)
else:
_bass.BASS_ChannelSetAttribute(stream, BASS_ATTRIB_VOL, volume)
_bass.BASS_ChannelPlay(stream, False)
return stream
def _get_attribute(stream, flag):
global _bass
attr = c_float()
_bass.BASS_ChannelGetAttribute(stream, flag, ctypes.byref(attr))
return attr.value
def _get_loopinfo(fpath, stream):
"""吉里吉里もしくはRPGツクール形式の
ループ情報が存在すれば取得して返す。
"""
global _bass
info = BASS_CHANNELINFO()
if not _bass.BASS_ChannelGetInfo(stream, ctypes.byref(info)):
return None
sampperbytes = 44100.0 / info.freq
samptobytes = info.chans
if info.flags & BASS_SAMPLE_FLOAT:
samptobytes *= 4
elif info.flags & BASS_SAMPLE_8BITS:
samptobytes *= 1
else:
samptobytes *= 2
# *.sliファイル
sli = fpath + ".sli"
if | |
<gh_stars>1-10
"""
Routines to estimate reconstruction efficiency:
- :class:`MeshFFTCorrelation`: correlation
- :class:`MeshFFTTransfer`: transfer
- :class:`MeshFFTPropagator`: propagator
This requires the following packages:
- pmesh
- pypower, see https://github.com/adematti/pypower
"""
import os
import numpy as np
from scipy.interpolate import UnivariateSpline, RectBivariateSpline
from pypower import MeshFFTPower, CatalogMesh, ParticleMesh, ArrayMesh, PowerSpectrumWedges
from .utils import BaseClass
from . import utils
class BasePowerRatio(BaseClass):
"""
Base template class to compute power ratios.
Specific statistic should extend this class.
"""
_coords_names = ['k', 'mu']
_result_names = ['num', 'denom']
_power_names = ['ratio']
_attrs = []
def get_ratio(self, complex=False, **kwargs):
"""
Return power spectrum ratio, computed using various options.
Parameters
----------
complex : bool, default=False
Whether (``True``) to return the ratio of complex power spectra,
or (``False``) return the ratio of their real part only.
kwargs : dict
Optionally, arguments for :meth:`BasePowerSpectrumStatistics.get_power`.
Results
-------
ratio : array
"""
with np.errstate(divide='ignore', invalid='ignore'):
return self.num.get_power(complex=complex, **kwargs) / self.denom.get_power(complex=complex, **kwargs)
@property
def ratio(self):
"""Power spectrum ratio."""
return self.get_ratio()
def __call__(self, k=None, mu=None, return_k=False, return_mu=False, complex=False, **kwargs):
r"""
Return power spectrum ratio, optionally performing linear interpolation over :math:`k` and :math:`\mu`.
Parameters
----------
k : float, array, default=None
:math:`k` where to interpolate the power spectrum.
Values outside :attr:`kavg` are set to the first/last ratio value;
outside :attr:`edges[0]` to nan.
Defaults to :attr:`kavg`.
mu : float, array, default=None
:math:`\mu` where to interpolate the power spectrum.
Values outside :attr:`muavg` are set to the first/last ratio value;
outside :attr:`edges[1]` to nan.
Defaults to :attr:`muavg`.
return_k : bool, default=False
Whether (``True``) to return :math:`k`-modes (see ``k``).
If ``None``, return :math:`k`-modes if ``k`` is ``None``.
return_mu : bool, default=False
Whether (``True``) to return :math:`\mu`-modes (see ``mu``).
If ``None``, return :math:`\mu`-modes if ``mu`` is ``None``.
complex : bool, default=False
Whether (``True``) to return the ratio of complex power spectra,
or (``False``) return the ratio of their real part only.
kwargs : dict
Other arguments for :meth:`get_ratio`.
Returns
-------
k : array
Optionally, :math:`k`-modes.
mu : array
Optionally, :math:`\mu`-modes.
ratio : array
(Optionally interpolated) power spectrum ratio.
"""
power = self.ratio
kavg, muavg = self.kavg, self.muavg
if return_k is None:
return_k = k is None
if return_mu is None:
return_mu = mu is None
if k is None and mu is None:
if return_k:
if return_mu:
return kavg, muavg, power
return kavg, power
return power
if k is None: k = kavg
if mu is None: mu = muavg
mask_finite_k, mask_finite_mu = ~np.isnan(kavg), ~np.isnan(muavg)
kavg, muavg, power = kavg[mask_finite_k], muavg[mask_finite_mu], power[np.ix_(mask_finite_k, mask_finite_mu)]
k, mu = np.asarray(k), np.asarray(mu)
toret_shape = k.shape + mu.shape
k, mu = k.ravel(), mu.ravel()
toret = np.nan * np.zeros((k.size, mu.size), dtype=power.dtype)
mask_k = (k >= self.edges[0][0]) & (k <= self.edges[0][-1])
mask_mu = (mu >= self.edges[1][0]) & (mu <= self.edges[1][-1])
k, mu = k[mask_k], mu[mask_mu]
if mask_k.any() and mask_mu.any():
if muavg.size == 1:
def interp(array):
return UnivariateSpline(kavg, array, k=1, s=0, ext='const')(k)[:, None]
else:
i_k = np.argsort(k); ii_k = np.argsort(i_k)
i_mu = np.argsort(mu); ii_mu = np.argsort(i_mu)
def interp(array):
return RectBivariateSpline(kavg, muavg, array, kx=1, ky=1, s=0)(k[i_k], mu[i_mu], grid=True)[np.ix_(ii_k, ii_mu)]
toret[np.ix_(mask_k, mask_mu)] = interp(power.real)
if complex and np.iscomplexobj(power):
toret[np.ix_(mask_k, mask_mu)] += 1j * interp(power.imag)
toret.shape = toret_shape
if return_k:
if return_mu:
return k, mu, toret
return k, toret
return toret
def __copy__(self):
new = super(BasePowerRatio, self).__copy__()
for name in self._result_names:
setattr(new, name, getattr(self, name).__copy__())
return new
def __getstate__(self):
"""Return this class state dictionary."""
state = {}
for name in self._result_names:
if hasattr(self, name):
state[name] = getattr(self, name).__getstate__()
for name in self._attrs:
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Set this class state."""
self.__dict__.update(state)
for name in self._result_names:
if name in state:
setattr(self, name, PowerSpectrumWedges.from_state(state[name]))
@classmethod
def from_state(cls, state):
new = cls.__new__(cls)
new.__setstate__(state)
return new
def save(self, filename):
"""Save to ``filename``."""
if not self.with_mpi or self.mpicomm.rank == 0:
self.log_info('Saving {}.'.format(filename))
utils.mkdir(os.path.dirname(filename))
np.save(filename, self.__getstate__(), allow_pickle=True)
# if self.with_mpi:
# self.mpicomm.Barrier()
@classmethod
def load(cls, filename):
cls.log_info('Loading {}.'.format(filename))
state = np.load(filename, allow_pickle=True)[()]
new = cls.from_state(state)
return new
def save_txt(self, filename, fmt='%.12e', delimiter=' ', header=None, comments='# ', **kwargs):
"""
Save power spectrum ratio as txt file.
Warning
-------
Attributes are not all saved, hence there is :meth:`load_txt` method.
Parameters
----------
filename : str
File name.
fmt : str, default='%.12e'
Format for floating types.
delimiter : str, default=' '
String or character separating columns.
header : str, list, default=None
String that will be written at the beginning of the file.
If multiple lines, provide a list of one-line strings.
comments : str, default=' #'
String that will be prepended to the header string.
kwargs : dict
Arguments for :meth:`get_power`.
"""
if not self.with_mpi or self.mpicomm.rank == 0:
self.log_info('Saving {}.'.format(filename))
utils.mkdir(os.path.dirname(filename))
formatter = {'int_kind': lambda x: '%d' % x, 'float_kind': lambda x: fmt % x}
def complex_kind(x):
imag = fmt % x.imag
if imag[0] not in ['+', '-']: imag = '+' + imag
return '{}{}j'.format(fmt % x.real, imag)
formatter['complex_kind'] = complex_kind
if header is None: header = []
elif isinstance(header, str): header = [header]
else: header = list(header)
for name in ['los_type', 'los', 'nmesh', 'boxsize', 'boxcenter']:
value = self.attrs.get(name, getattr(self, name, None))
if value is None:
value = 'None'
elif any(name.startswith(key) for key in ['los_type']):
value = str(value)
else:
value = np.array2string(np.array(value), separator=delimiter, formatter=formatter).replace('\n', '')
header.append('{} = {}'.format(name, value))
labels = ['nmodes']
assert len(self._coords_names) == self.ndim
for name in self._coords_names:
labels += ['{}mid'.format(name), '{}avg'.format(name)]
labels += self._power_names
power = self.get_ratio(**kwargs)
columns = [self.nmodes.flat]
mids = np.meshgrid(*[(edges[:-1] + edges[1:]) / 2. for edges in self.edges], indexing='ij')
for idim in range(self.ndim):
columns += [mids[idim].flat, self.modes[idim].flat]
for column in power.reshape((-1,) * (power.ndim == self.ndim) + power.shape):
columns += [column.flat]
columns = [[np.array2string(value, formatter=formatter) for value in column] for column in columns]
widths = [max(max(map(len, column)) - len(comments) * (icol == 0), len(label)) for icol, (column, label) in enumerate(zip(columns, labels))]
widths[-1] = 0 # no need to leave a space
header.append((' ' * len(delimiter)).join(['{:<{width}}'.format(label, width=width) for label, width in zip(labels, widths)]))
widths[0] += len(comments)
with open(filename, 'w') as file:
for line in header:
file.write(comments + line + '\n')
for irow in range(len(columns[0])):
file.write(delimiter.join(['{:<{width}}'.format(column[irow], width=width) for column, width in zip(columns, widths)]) + '\n')
if self.with_mpi:
self.mpicomm.Barrier()
def __getitem__(self, slices):
"""Call :meth:`slice`."""
new = self.copy()
if isinstance(slices, tuple):
new.slice(*slices)
else:
new.slice(slices)
return new
def select(self, *xlims):
"""
Restrict statistic to provided coordinate limits in place.
For example:
.. code-block:: python
statistic.select((0, 0.3)) # restrict first axis to (0, 0.3)
statistic.select(None, (0, 0.2)) # restrict second axis to (0, 0.2)
"""
for name in self._result_names:
getattr(self, name).select(*xlims)
def slice(self, *slices):
"""
Slice statistics in place. If slice step is not 1, use :meth:`rebin`.
For example:
.. code-block:: python
statistic.slice(slice(0, 10, 2), slice(0, 6, 3)) # rebin by factor 2 (resp. 3) along axis 0 (resp. 1), up to index 10 (resp. 6)
statistic[:10:2,:6:3] # same as above, but return new instance.
"""
for name in self._result_names:
getattr(self, name).slice(*slices)
def rebin(self, factor=1):
"""
Rebin statistic, by factor(s) ``factor``.
A tuple must be provided in case :attr:`ndim` is greater than 1.
Input factors must divide :attr:`shape`.
"""
for name in self._result_names:
getattr(self, name).rebin(factor=factor)
def _make_property(name):
@property
def func(self):
return getattr(self.num, name)
return func
for name in ['edges', 'shape', 'ndim', 'nmodes', 'modes', 'k', 'mu', 'kavg', 'muavg', 'with_mpi', 'mpicomm', 'attrs']:
setattr(BasePowerRatio, name, _make_property(name))
BasePowerRatio.modeavg = PowerSpectrumWedges.modeavg
class MeshFFTCorrelator(BasePowerRatio):
r"""
Estimate correlation between two meshes (reconstructed and initial fields), i.e.:
.. math::
r(k) = \frac{P_{\mathrm{rec},\mathrm{init}}}{\sqrt{P_{\mathrm{rec}}P_{\mathrm{init}}}}
"""
_result_names = ['num', 'auto_reconstructed', 'auto_initial']
_power_names = ['correlator']
def __init__(self, mesh_reconstructed, mesh_initial, edges=None, los=None, compensations=None):
r"""
Initialize :class:`MeshFFTCorrelation`.
Parameters
----------
mesh_reconstructed : CatalogMesh, RealField
Mesh with reconstructed density field.
If ``RealField``, should be :math:`1 + \delta` or :math:`\bar{n} (1 + \delta)`.
mesh_initial : CatalogMesh, RealField
Mesh with initial density field (before structure formation).
If ``RealField``, should be :math:`1 + \delta` or :math:`\bar{n} (1 + \delta)`.
edges : tuple, array, default=None
:math:`k`-edges for :attr:`poles`.
One can also provide :math:`\mu-edges` (hence a tuple ``(kedges, muedges)``) for :attr:`wedges`.
``kedges`` may be a dictionary, with keys 'min' (minimum :math:`k`, defaults to 0), 'max' (maximum :math:`k`, defaults to ``np.pi/(boxsize/nmesh)``),
'step' (if | |
= Constraint(expr=-m.x2034*m.x1720 + m.x734 == 0)
m.c753 = Constraint(expr=-m.x2035*m.x1720 + m.x735 == 0)
m.c754 = Constraint(expr=-m.x2036*m.x1720 + m.x736 == 0)
m.c755 = Constraint(expr=-m.x2034*m.x1721 + m.x737 == 0)
m.c756 = Constraint(expr=-m.x2035*m.x1721 + m.x738 == 0)
m.c757 = Constraint(expr=-m.x2036*m.x1721 + m.x739 == 0)
m.c758 = Constraint(expr=-m.x2034*m.x1722 + m.x740 == 0)
m.c759 = Constraint(expr=-m.x2035*m.x1722 + m.x741 == 0)
m.c760 = Constraint(expr=-m.x2036*m.x1722 + m.x742 == 0)
m.c761 = Constraint(expr=-m.x2034*m.x1723 + m.x743 == 0)
m.c762 = Constraint(expr=-m.x2035*m.x1723 + m.x744 == 0)
m.c763 = Constraint(expr=-m.x2036*m.x1723 + m.x745 == 0)
m.c764 = Constraint(expr=-m.x2034*m.x1724 + m.x746 == 0)
m.c765 = Constraint(expr=-m.x2035*m.x1724 + m.x747 == 0)
m.c766 = Constraint(expr=-m.x2036*m.x1724 + m.x748 == 0)
m.c767 = Constraint(expr=-m.x2034*m.x1725 + m.x749 == 0)
m.c768 = Constraint(expr=-m.x2035*m.x1725 + m.x750 == 0)
m.c769 = Constraint(expr=-m.x2036*m.x1725 + m.x751 == 0)
m.c770 = Constraint(expr=-m.x2034*m.x1726 + m.x752 == 0)
m.c771 = Constraint(expr=-m.x2035*m.x1726 + m.x753 == 0)
m.c772 = Constraint(expr=-m.x2036*m.x1726 + m.x754 == 0)
m.c773 = Constraint(expr=-m.x2037*m.x1727 + m.x755 == 0)
m.c774 = Constraint(expr=-m.x2038*m.x1727 + m.x756 == 0)
m.c775 = Constraint(expr=-m.x2039*m.x1727 + m.x757 == 0)
m.c776 = Constraint(expr=-m.x2037*m.x1728 + m.x758 == 0)
m.c777 = Constraint(expr=-m.x2038*m.x1728 + m.x759 == 0)
m.c778 = Constraint(expr=-m.x2039*m.x1728 + m.x760 == 0)
m.c779 = Constraint(expr=-m.x2037*m.x1729 + m.x761 == 0)
m.c780 = Constraint(expr=-m.x2038*m.x1729 + m.x762 == 0)
m.c781 = Constraint(expr=-m.x2039*m.x1729 + m.x763 == 0)
m.c782 = Constraint(expr=-m.x2037*m.x1730 + m.x764 == 0)
m.c783 = Constraint(expr=-m.x2038*m.x1730 + m.x765 == 0)
m.c784 = Constraint(expr=-m.x2039*m.x1730 + m.x766 == 0)
m.c785 = Constraint(expr=-m.x2037*m.x1731 + m.x767 == 0)
m.c786 = Constraint(expr=-m.x2038*m.x1731 + m.x768 == 0)
m.c787 = Constraint(expr=-m.x2039*m.x1731 + m.x769 == 0)
m.c788 = Constraint(expr=-m.x2037*m.x1732 + m.x770 == 0)
m.c789 = Constraint(expr=-m.x2038*m.x1732 + m.x771 == 0)
m.c790 = Constraint(expr=-m.x2039*m.x1732 + m.x772 == 0)
m.c791 = Constraint(expr=-m.x2037*m.x1733 + m.x773 == 0)
m.c792 = Constraint(expr=-m.x2038*m.x1733 + m.x774 == 0)
m.c793 = Constraint(expr=-m.x2039*m.x1733 + m.x775 == 0)
m.c794 = Constraint(expr=-m.x2037*m.x1734 + m.x776 == 0)
m.c795 = Constraint(expr=-m.x2038*m.x1734 + m.x777 == 0)
m.c796 = Constraint(expr=-m.x2039*m.x1734 + m.x778 == 0)
m.c797 = Constraint(expr=-m.x2037*m.x1735 + m.x779 == 0)
m.c798 = Constraint(expr=-m.x2038*m.x1735 + m.x780 == 0)
m.c799 = Constraint(expr=-m.x2039*m.x1735 + m.x781 == 0)
m.c800 = Constraint(expr=-m.x2037*m.x1736 + m.x782 == 0)
m.c801 = Constraint(expr=-m.x2038*m.x1736 + m.x783 == 0)
m.c802 = Constraint(expr=-m.x2039*m.x1736 + m.x784 == 0)
m.c803 = Constraint(expr=-m.x2037*m.x1737 + m.x785 == 0)
m.c804 = Constraint(expr=-m.x2038*m.x1737 + m.x786 == 0)
m.c805 = Constraint(expr=-m.x2039*m.x1737 + m.x787 == 0)
m.c806 = Constraint(expr=-m.x2037*m.x1738 + m.x788 == 0)
m.c807 = Constraint(expr=-m.x2038*m.x1738 + m.x789 == 0)
m.c808 = Constraint(expr=-m.x2039*m.x1738 + m.x790 == 0)
m.c809 = Constraint(expr=-m.x2037*m.x1739 + m.x791 == 0)
m.c810 = Constraint(expr=-m.x2038*m.x1739 + m.x792 == 0)
m.c811 = Constraint(expr=-m.x2039*m.x1739 + m.x793 == 0)
m.c812 = Constraint(expr=-m.x2037*m.x1740 + m.x794 == 0)
m.c813 = Constraint(expr=-m.x2038*m.x1740 + m.x795 == 0)
m.c814 = Constraint(expr=-m.x2039*m.x1740 + m.x796 == 0)
m.c815 = Constraint(expr=-m.x2040*m.x1741 + m.x797 == 0)
m.c816 = Constraint(expr=-m.x2041*m.x1741 + m.x798 == 0)
m.c817 = Constraint(expr=-m.x2042*m.x1741 + m.x799 == 0)
m.c818 = Constraint(expr=-m.x2040*m.x1742 + m.x800 == 0)
m.c819 = Constraint(expr=-m.x2041*m.x1742 + m.x801 == 0)
m.c820 = Constraint(expr=-m.x2042*m.x1742 + m.x802 == 0)
m.c821 = Constraint(expr=-m.x2040*m.x1743 + m.x803 == 0)
m.c822 = Constraint(expr=-m.x2041*m.x1743 + m.x804 == 0)
m.c823 = Constraint(expr=-m.x2042*m.x1743 + m.x805 == 0)
m.c824 = Constraint(expr=-m.x2040*m.x1744 + m.x806 == 0)
m.c825 = Constraint(expr=-m.x2041*m.x1744 + m.x807 == 0)
m.c826 = Constraint(expr=-m.x2042*m.x1744 + m.x808 == 0)
m.c827 = Constraint(expr=-m.x2040*m.x1745 + m.x809 == 0)
m.c828 = Constraint(expr=-m.x2041*m.x1745 + m.x810 == 0)
m.c829 = Constraint(expr=-m.x2042*m.x1745 + m.x811 == 0)
m.c830 = Constraint(expr=-m.x2040*m.x1746 + m.x812 == 0)
m.c831 = Constraint(expr=-m.x2041*m.x1746 + m.x813 == 0)
m.c832 = Constraint(expr=-m.x2042*m.x1746 + m.x814 == 0)
m.c833 = Constraint(expr=-m.x2040*m.x1747 + m.x815 == 0)
m.c834 = Constraint(expr=-m.x2041*m.x1747 + m.x816 == 0)
m.c835 = Constraint(expr=-m.x2042*m.x1747 + m.x817 == 0)
m.c836 = Constraint(expr=-m.x2040*m.x1748 + m.x818 == 0)
m.c837 = Constraint(expr=-m.x2041*m.x1748 + m.x819 == 0)
m.c838 = Constraint(expr=-m.x2042*m.x1748 + m.x820 == 0)
m.c839 = Constraint(expr=-m.x2040*m.x1749 + m.x821 == 0)
m.c840 = Constraint(expr=-m.x2041*m.x1749 + m.x822 == 0)
m.c841 = Constraint(expr=-m.x2042*m.x1749 + m.x823 == 0)
m.c842 = Constraint(expr=-m.x2040*m.x1750 + m.x824 == 0)
m.c843 = Constraint(expr=-m.x2041*m.x1750 + m.x825 == 0)
m.c844 = Constraint(expr=-m.x2042*m.x1750 + m.x826 == 0)
m.c845 = Constraint(expr=-m.x2040*m.x1751 + m.x827 == 0)
m.c846 = Constraint(expr=-m.x2041*m.x1751 + m.x828 == 0)
m.c847 = Constraint(expr=-m.x2042*m.x1751 + m.x829 == 0)
m.c848 = Constraint(expr=-m.x2040*m.x1752 + m.x830 == 0)
m.c849 = Constraint(expr=-m.x2041*m.x1752 + m.x831 == 0)
m.c850 = Constraint(expr=-m.x2042*m.x1752 + m.x832 == 0)
m.c851 = Constraint(expr=-m.x2040*m.x1753 + m.x833 == 0)
m.c852 = Constraint(expr=-m.x2041*m.x1753 + m.x834 == 0)
m.c853 = Constraint(expr=-m.x2042*m.x1753 + m.x835 == 0)
m.c854 = Constraint(expr=-m.x2040*m.x1754 + m.x836 == 0)
m.c855 = Constraint(expr=-m.x2041*m.x1754 + m.x837 == 0)
m.c856 = Constraint(expr=-m.x2042*m.x1754 + m.x838 == 0)
m.c857 = Constraint(expr=-m.x2043*m.x1755 + m.x839 == 0)
m.c858 = Constraint(expr=-m.x2044*m.x1755 + m.x840 == 0)
m.c859 = Constraint(expr=-m.x2045*m.x1755 + m.x841 == 0)
m.c860 = Constraint(expr=-m.x2043*m.x1756 + m.x842 == 0)
m.c861 = Constraint(expr=-m.x2044*m.x1756 + m.x843 == 0)
m.c862 = Constraint(expr=-m.x2045*m.x1756 + m.x844 == 0)
m.c863 = Constraint(expr=-m.x2043*m.x1757 + m.x845 == 0)
m.c864 = Constraint(expr=-m.x2044*m.x1757 + m.x846 == 0)
m.c865 = Constraint(expr=-m.x2045*m.x1757 + m.x847 == 0)
m.c866 = Constraint(expr=-m.x2043*m.x1758 + m.x848 == 0)
m.c867 = Constraint(expr=-m.x2044*m.x1758 + m.x849 == 0)
m.c868 = Constraint(expr=-m.x2045*m.x1758 + m.x850 == 0)
m.c869 = Constraint(expr=-m.x2043*m.x1759 + m.x851 == 0)
m.c870 = Constraint(expr=-m.x2044*m.x1759 + m.x852 == 0)
m.c871 = Constraint(expr=-m.x2045*m.x1759 + m.x853 == 0)
m.c872 = Constraint(expr=-m.x2043*m.x1760 + m.x854 == 0)
m.c873 = Constraint(expr=-m.x2044*m.x1760 + m.x855 == 0)
m.c874 = Constraint(expr=-m.x2045*m.x1760 + m.x856 == 0)
m.c875 = Constraint(expr=-m.x2043*m.x1761 + m.x857 == 0)
m.c876 = Constraint(expr=-m.x2044*m.x1761 + m.x858 == 0)
m.c877 = Constraint(expr=-m.x2045*m.x1761 + m.x859 == 0)
m.c878 = Constraint(expr=-m.x2043*m.x1762 + m.x860 == 0)
m.c879 = Constraint(expr=-m.x2044*m.x1762 + m.x861 == 0)
m.c880 = Constraint(expr=-m.x2045*m.x1762 + m.x862 == 0)
m.c881 = Constraint(expr=-m.x2043*m.x1763 + m.x863 == 0)
m.c882 = Constraint(expr=-m.x2044*m.x1763 + m.x864 == 0)
m.c883 = Constraint(expr=-m.x2045*m.x1763 + m.x865 == 0)
m.c884 = Constraint(expr=-m.x2043*m.x1764 + m.x866 == 0)
m.c885 = Constraint(expr=-m.x2044*m.x1764 + m.x867 == 0)
m.c886 = Constraint(expr=-m.x2045*m.x1764 + m.x868 == 0)
m.c887 = Constraint(expr=-m.x2043*m.x1765 + m.x869 == 0)
m.c888 = Constraint(expr=-m.x2044*m.x1765 + m.x870 == 0)
m.c889 = Constraint(expr=-m.x2045*m.x1765 + m.x871 == 0)
m.c890 = Constraint(expr=-m.x2043*m.x1766 + m.x872 == 0)
m.c891 = Constraint(expr=-m.x2044*m.x1766 + m.x873 == 0)
m.c892 = Constraint(expr=-m.x2045*m.x1766 + m.x874 == 0)
m.c893 = Constraint(expr=-m.x2043*m.x1767 + m.x875 == 0)
m.c894 = Constraint(expr=-m.x2044*m.x1767 + m.x876 == 0)
m.c895 = Constraint(expr=-m.x2045*m.x1767 + m.x877 == 0)
m.c896 = Constraint(expr=-m.x2043*m.x1768 + m.x878 == 0)
m.c897 = Constraint(expr=-m.x2044*m.x1768 + m.x879 == 0)
m.c898 = Constraint(expr=-m.x2045*m.x1768 + m.x880 == 0)
m.c899 = Constraint(expr=-m.x2046*m.x1769 + m.x881 == 0)
m.c900 = Constraint(expr=-m.x2047*m.x1769 + m.x882 == 0)
m.c901 = Constraint(expr=-m.x2048*m.x1769 + m.x883 == 0)
m.c902 = Constraint(expr=-m.x2046*m.x1770 + m.x884 == 0)
m.c903 = Constraint(expr=-m.x2047*m.x1770 + m.x885 == 0)
m.c904 = Constraint(expr=-m.x2048*m.x1770 + m.x886 == 0)
m.c905 = Constraint(expr=-m.x2046*m.x1771 + m.x887 == 0)
m.c906 = Constraint(expr=-m.x2047*m.x1771 + m.x888 == 0)
m.c907 = Constraint(expr=-m.x2048*m.x1771 + m.x889 == 0)
m.c908 = Constraint(expr=-m.x2046*m.x1772 + m.x890 == 0)
m.c909 = Constraint(expr=-m.x2047*m.x1772 + m.x891 == 0)
m.c910 = Constraint(expr=-m.x2048*m.x1772 + m.x892 == 0)
m.c911 = Constraint(expr=-m.x2046*m.x1773 + m.x893 == 0)
m.c912 = Constraint(expr=-m.x2047*m.x1773 + m.x894 == 0)
m.c913 = Constraint(expr=-m.x2048*m.x1773 + m.x895 == 0)
m.c914 = Constraint(expr=-m.x2046*m.x1774 + m.x896 == 0)
m.c915 = Constraint(expr=-m.x2047*m.x1774 + m.x897 == 0)
m.c916 = Constraint(expr=-m.x2048*m.x1774 + m.x898 == 0)
m.c917 = Constraint(expr=-m.x2046*m.x1775 + m.x899 == 0)
m.c918 = Constraint(expr=-m.x2047*m.x1775 + m.x900 == 0)
m.c919 = Constraint(expr=-m.x2048*m.x1775 + m.x901 == 0)
m.c920 = Constraint(expr=-m.x2046*m.x1776 + m.x902 == 0)
m.c921 = Constraint(expr=-m.x2047*m.x1776 + m.x903 == 0)
m.c922 = Constraint(expr=-m.x2048*m.x1776 + m.x904 == 0)
m.c923 = Constraint(expr=-m.x2046*m.x1777 + m.x905 == 0)
m.c924 = Constraint(expr=-m.x2047*m.x1777 + m.x906 == 0)
m.c925 = Constraint(expr=-m.x2048*m.x1777 + m.x907 == 0)
m.c926 = Constraint(expr=-m.x2046*m.x1778 + m.x908 == 0)
m.c927 = Constraint(expr=-m.x2047*m.x1778 + m.x909 == 0)
m.c928 = Constraint(expr=-m.x2048*m.x1778 + m.x910 == 0)
m.c929 = Constraint(expr=-m.x2046*m.x1779 + m.x911 == 0)
m.c930 = Constraint(expr=-m.x2047*m.x1779 + m.x912 == 0)
m.c931 = Constraint(expr=-m.x2048*m.x1779 + m.x913 == 0)
m.c932 = Constraint(expr=-m.x2046*m.x1780 + m.x914 == 0)
m.c933 = Constraint(expr=-m.x2047*m.x1780 + m.x915 == 0)
m.c934 = Constraint(expr=-m.x2048*m.x1780 + m.x916 == 0)
m.c935 = Constraint(expr=-m.x2046*m.x1781 + m.x917 == 0)
m.c936 = Constraint(expr=-m.x2047*m.x1781 + m.x918 == 0)
m.c937 = Constraint(expr=-m.x2048*m.x1781 + m.x919 == 0)
m.c938 = Constraint(expr=-m.x2046*m.x1782 + m.x920 == 0)
m.c939 = Constraint(expr=-m.x2047*m.x1782 + m.x921 == 0)
m.c940 = Constraint(expr=-m.x2048*m.x1782 + m.x922 == 0)
m.c941 = Constraint(expr=-m.x2046*m.x1783 + m.x923 == 0)
m.c942 = Constraint(expr=-m.x2047*m.x1783 + m.x924 == 0)
m.c943 = Constraint(expr=-m.x2048*m.x1783 + m.x925 == 0)
m.c944 = Constraint(expr=-m.x2046*m.x1784 + m.x926 == 0)
m.c945 = Constraint(expr=-m.x2047*m.x1784 + m.x927 == 0)
m.c946 = Constraint(expr=-m.x2048*m.x1784 + m.x928 == 0)
m.c947 = Constraint(expr=-m.x2046*m.x1785 + m.x929 == 0)
m.c948 = Constraint(expr=-m.x2047*m.x1785 + m.x930 == 0)
m.c949 = Constraint(expr=-m.x2048*m.x1785 + m.x931 == 0)
m.c950 = Constraint(expr=-m.x2046*m.x1786 + m.x932 == 0)
m.c951 = Constraint(expr=-m.x2047*m.x1786 + m.x933 == 0)
m.c952 = Constraint(expr=-m.x2048*m.x1786 + m.x934 == 0)
m.c953 = Constraint(expr=-m.x2046*m.x1787 + m.x935 == 0)
m.c954 = Constraint(expr=-m.x2047*m.x1787 + m.x936 == 0)
m.c955 = Constraint(expr=-m.x2048*m.x1787 + m.x937 == 0)
m.c956 = Constraint(expr=-m.x2046*m.x1788 + m.x938 == 0)
m.c957 = Constraint(expr=-m.x2047*m.x1788 + m.x939 == 0)
m.c958 = Constraint(expr=-m.x2048*m.x1788 + m.x940 == 0)
m.c959 = Constraint(expr=-m.x2046*m.x1789 + m.x941 == 0)
m.c960 = Constraint(expr=-m.x2047*m.x1789 + m.x942 == 0)
m.c961 = Constraint(expr=-m.x2048*m.x1789 + m.x943 == 0)
m.c962 = Constraint(expr=-m.x2046*m.x1790 + m.x944 == 0)
m.c963 = Constraint(expr=-m.x2047*m.x1790 + m.x945 == 0)
m.c964 = Constraint(expr=-m.x2048*m.x1790 + m.x946 == 0)
m.c965 = Constraint(expr=-m.x2046*m.x1791 | |
= float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (Z): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'OutlinePoints':
obj_ = OutlinePointsType.factory()
obj_.build(child_)
self.OutlinePoints = obj_
obj_.original_tagname_ = 'OutlinePoints'
# end class BoundingBoxType
class PointType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _id=None, _real_archetype=None, _desynched_atts=None, _subtype=None, _instances=None, _archetype=None, Y=None, X=None, Z=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._id = _cast(None, _id)
self._real_archetype = _cast(bool, _real_archetype)
self._desynched_atts = _cast(None, _desynched_atts)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._archetype = _cast(None, _archetype)
self.Y = _cast(float, Y)
self.X = _cast(float, X)
self.Z = _cast(float, Z)
def factory(*args_, **kwargs_):
if PointType.subclass:
return PointType.subclass(*args_, **kwargs_)
else:
return PointType(*args_, **kwargs_)
factory = staticmethod(factory)
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get_Y(self): return self.Y
def set_Y(self, Y): self.Y = Y
def get_X(self): return self.X
def set_X(self, X): self.X = X
def get_Z(self): return self.Z
def set_Z(self, Z): self.Z = Z
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PointType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PointType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='PointType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PointType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
outfile.write(' _desynched_atts=%s' % (self.gds_format_string(quote_attrib(self._desynched_atts).encode(ExternalEncoding), input_name='_desynched_atts'), ))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % self.gds_format_boolean(self._subtype, input_name='_subtype'))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
outfile.write(' _instances=%s' % (self.gds_format_string(quote_attrib(self._instances).encode(ExternalEncoding), input_name='_instances'), ))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self.Y is not None and 'Y' not in already_processed:
already_processed.add('Y')
outfile.write(' Y="%s"' % self.gds_format_double(self.Y, input_name='Y'))
if self.X is not None and 'X' not in already_processed:
already_processed.add('X')
outfile.write(' X="%s"' % self.gds_format_double(self.X, input_name='X'))
if self.Z is not None and 'Z' not in already_processed:
already_processed.add('Z')
outfile.write(' Z="%s"' % self.gds_format_double(self.Z, input_name='Z'))
def exportChildren(self, outfile, level, namespace_='', name_='PointType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='PointType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
showIndent(outfile, level)
outfile.write('_derived="%s",\n' % (self._derived,))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
showIndent(outfile, level)
outfile.write('_id="%s",\n' % (self._id,))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
showIndent(outfile, level)
outfile.write('_real_archetype=%s,\n' % (self._real_archetype,))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
showIndent(outfile, level)
outfile.write('_desynched_atts="%s",\n' % (self._desynched_atts,))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
showIndent(outfile, level)
outfile.write('_subtype=%s,\n' % (self._subtype,))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
showIndent(outfile, level)
outfile.write('_instances="%s",\n' % (self._instances,))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
showIndent(outfile, level)
outfile.write('_archetype="%s",\n' % (self._archetype,))
if self.Y is not None and 'Y' not in already_processed:
already_processed.add('Y')
showIndent(outfile, level)
outfile.write('Y=%e,\n' % (self.Y,))
if self.X is not None and 'X' not in already_processed:
already_processed.add('X')
showIndent(outfile, level)
outfile.write('X=%e,\n' % (self.X,))
if self.Z is not None and 'Z' not in already_processed:
already_processed.add('Z')
showIndent(outfile, level)
outfile.write('Z=%e,\n' % (self.Z,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('Y', node)
if value is not None and 'Y' not in already_processed:
already_processed.add('Y')
try:
self.Y = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (Y): %s' % exp)
value = find_attr_value_('X', node)
if value is not None and 'X' not in already_processed:
already_processed.add('X')
try:
self.X = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (X): %s' % exp)
value = find_attr_value_('Z', node)
if value is not None and 'Z' not in already_processed:
already_processed.add('Z')
try:
self.Z = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (Z): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class PointType
class TranslationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _id=None, _real_archetype=None, _desynched_atts=None, _subtype=None, _instances=None, _archetype=None, Y=None, X=None, Z=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._id = _cast(None, _id)
self._real_archetype = _cast(bool, _real_archetype)
self._desynched_atts = _cast(None, _desynched_atts)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._archetype = _cast(None, _archetype)
self.Y = _cast(float, Y)
self.X = _cast(float, X)
self.Z = _cast(float, Z)
def factory(*args_, **kwargs_):
if TranslationType.subclass:
return TranslationType.subclass(*args_, **kwargs_)
else:
return TranslationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get_Y(self): return self.Y
def set_Y(self, Y): self.Y = Y
def get_X(self): return self.X
def set_X(self, X): self.X = X
def get_Z(self): return self.Z
def set_Z(self, Z): self.Z = Z
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TranslationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TranslationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TranslationType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TranslationType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._desynched_atts is not None and '_desynched_atts' not | |
cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "vegetation_modis.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def vegetation_modis_r(self):
cname = "vegetation_modis_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "vegetation_modis.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wgne15(self):
cname = "wgne15"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wgne15.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wgne15_r(self):
cname = "wgne15_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wgne15.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wh_bl_gr_ye_re(self):
cname = "wh_bl_gr_ye_re"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wh_bl_gr_ye_re.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wh_bl_gr_ye_re_r(self):
cname = "wh_bl_gr_ye_re_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wh_bl_gr_ye_re.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wind_17lev(self):
cname = "wind_17lev"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wind_17lev.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wind_17lev_r(self):
cname = "wind_17lev_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wind_17lev.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wxpEnIR(self):
cname = "wxpEnIR"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wxpEnIR.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def wxpEnIR_r(self):
cname = "wxpEnIR_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "wxpEnIR.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def acton(self):
cname = "acton"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "acton.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def acton_r(self):
cname = "acton_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "acton.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def bamako(self):
cname = "bamako"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "bamako.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def bamako_r(self):
cname = "bamako_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "bamako.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def batlow(self):
cname = "batlow"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "batlow.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def batlow_r(self):
cname = "batlow_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "batlow.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def berlin(self):
cname = "berlin"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "berlin.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def berlin_r(self):
cname = "berlin_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "berlin.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def bilbao(self):
cname = "bilbao"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "bilbao.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def bilbao_r(self):
cname = "bilbao_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "bilbao.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def broc(self):
cname = "broc"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "broc.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def broc_r(self):
cname = "broc_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "broc.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def buda(self):
cname = "buda"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "buda.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def buda_r(self):
cname = "buda_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "buda.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cork(self):
cname = "cork"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "cork.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cork_r(self):
cname = "cork_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "cork.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def davos(self):
cname = "davos"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "davos.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def davos_r(self):
cname = "davos_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "davos.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def devon(self):
cname = "devon"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "devon.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def devon_r(self):
cname = "devon_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "devon.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grayc(self):
cname = "grayc"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "grayc.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grayc_r(self):
cname = "grayc_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "grayc.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hawaii(self):
cname = "hawaii"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "hawaii.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hawaii_r(self):
cname = "hawaii_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "hawaii.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def imola(self):
cname = "imola"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "imola.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def imola_r(self):
cname = "imola_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "imola.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lajolla(self):
cname = "lajolla"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lajolla.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lajolla_r(self):
cname = "lajolla_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lajolla.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lapaz(self):
cname = "lapaz"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lapaz.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lapaz_r(self):
cname = "lapaz_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lapaz.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lisbon(self):
cname = "lisbon"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lisbon.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lisbon_r(self):
cname = "lisbon_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "lisbon.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def nuuk(self):
cname = "nuuk"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "nuuk.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def nuuk_r(self):
cname = "nuuk_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "nuuk.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oleron(self):
cname = "oleron"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "oleron.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oleron_r(self):
cname = "oleron_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "oleron.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oslo(self):
cname = "oslo"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "oslo.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oslo_r(self):
cname = "oslo_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "oslo.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def roma(self):
cname = "roma"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "roma.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def roma_r(self):
cname = "roma_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "roma.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def tofino(self):
cname = "tofino"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "scientific", "tofino.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def tofino_r(self):
cname = "tofino_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
| |
players in playerlist:
p = bot.get_user(int(players[0])) # ユーザーID
if not p: continue
user = p
if not user.id in kekka:
if not user.id in rongaina:
kekka[user.id] = [c + 1]
c += 1
return kekka[user_id][0]
@bot.command(name='inquiry', aliases=['inq'], pass_context=True, description='チャンネルのバトルの状態を確認する')
async def inquiry(ctx):
"""チャンネルのバトルの状態を確認する"""
# author_name = bot.get_user(ctx.message.author.id)
# embed = discord.Embed(
# description=f"""```コマンド:[!!inquiry]\n発言鯖:{ctx.message.guild.name} | チャンネル名:{ctx.message.channel.name}\n発言者:{author_name} | ID:{ctx.message.id}```""",
# color=0x1d1d1d)
# channel = bot.get_channel(661122218847109130)
# await channel.send(embed=embed)
channel_id = ctx.message.channel.id
boss_level, boss_hp = get_boss_level_and_hp(channel_id)
if channel_id in special_monster:
monster = special_monster[channel_id]
elif channel_id in very_special_monster:
monster = very_special_monster[channel_id]
else:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?", (channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster = monsters[monster_num[0]]
if boss_level % MONSTER_NUM == 0:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?",
(channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster = tyoukyouteki[monster_num[0]]
elif boss_level % 5 == 0:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?",
(channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster = kyouteki[monster_num[0]]
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("""SELECT in_battle.user_id, player.experience, in_battle.player_hp
FROM in_battle, player WHERE in_battle.channel_id=? AND player.user_id=in_battle.user_id""",
(channel_id,)).fetchall()
con.commit()
in_battles = c.fetchall()
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("""SELECT
(SELECT Count(0) FROM channel_status WHERE channel_status.boss_level > channel_status1.boss_level) + 1 AS rank
FROM channel_status AS channel_status1 WHERE channel_id=?""", (ctx.message.channel.id,)).fetchone()
con.commit()
rank = c.fetchone()[0]
rank_say = 'このチャンネルの世界ランキングは「{}位」だ!'.format(rank)
if in_battles:
members = "\n ".join("<@{}> Lv.{} 残りHP: {}".format(
in_battle[0], int(math.sqrt(in_battle[1])), in_battle[2]) for in_battle in in_battles)
embed = discord.Embed(
description="{0}\n\nLv:{1}の{2}と戦闘中だ!\n{2}のHP:{3}/{4}\n\n戦闘中のメンバー:\n{5}".format(
rank_say, boss_level, monster["name"], boss_hp, boss_level * 10 + 50, members),
color=0x36393f)
embed.set_image(url="{}".format(monster["img"]))
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{0}\n\nLv: {1}の{2}が待ち構えている。\n{2}のHP:{3}\n".format(
rank_say, boss_level, monster["name"], boss_level * 10 + 50),
color=0x36393f)
embed.set_image(url="{}".format(monster["img"]))
await ctx.send(embed=embed)
@bot.command(name='reset', aliases=['re'], pass_context=True, description='戦いをやり直す')
async def reset(ctx):
"""戦いをやり直す"""
if "true" in kidou:
embed = discord.Embed(description="現在起動中です。しばらくお待ちください。",
color=0xff0000)
return await ctx.send(embed=embed)
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT 0 FROM in_battle WHERE channel_id=?", (ctx.message.channel.id,)).fetchone()
con.commit()
resee = c.fetchone()
if resee:
await reset_battle(ctx, ctx.message.channel.id, False)
else:
await ctx.send("このチャンネルでは戦いは行われていないようだ。")
@bot.command(pass_context=True, description='四字熟語の読み方をひらがなで入力し、正解すると経験値がもらえるぞ。')
async def t(ctx):
"""トレーニングをする"""
if ctx.message.author.id == 279506636828311553:
return await ctx.send("セルフですよね....?")
if "true" in kidou:
embed = discord.Embed(description="現在起動中です。しばらくお待ちください。",
color=0xff0000)
return await ctx.send(embed=embed)
# author_name = bot.get_user(ctx.message.author.id)
# embed = discord.Embed(
# description=f"""```コマンド:[!!t]\n発言鯖:{ctx.message.guild.name} | チャンネル名:{ctx.message.channel.name}\n発言者:{author_name} | ID:{ctx.message.id}```""",
# color=0x1d1d1d)
# channel = bot.get_channel(661122218847109130)
# await channel.send(embed=embed)
user = ctx.message.author
q_id = random.randint(0, 619)
await ctx.send("「{}」の読み方をひらがなで答えなさい。".format(training_set[q_id][0]))
answer = training_set[q_id][1]
exp = math.ceil(get_player_level(ctx, user.id))
kaitou = await bot.wait_for('message', timeout=12.0, check=lambda messages: messages.author.id == user.id)
if kaitou is None:
await ctx.send('時間切れだ。正解は「{}」だ。'.format(answer))
return
if kaitou.content == answer:
comment = experiment(ctx, user.id, exp * 4)
if random.random() < 0.005:
comment += "\n`エリクサー`を手に入れた!"
obtain_an_item(user.id, 1)
if random.random() < 0.1:
comment += "\n`ファイアボールの書`を手に入れた!"
obtain_an_item(user.id, 2)
if random.random() < 0.1:
comment += "\n`祈りの書`を手に入れた!"
obtain_an_item(user.id, 3)
conn.commit()
await ctx.send('正解だ!{}の経験値を得た。\n{}'.format(exp, comment))
else:
await ctx.send('残念!正解は「{}」だ。'.format(answer))
@bot.command(pass_context=True, description='クイズに解答し、正解すると経験値がもらえるぞ。')
async def q(ctx):
"""トレーニングをする"""
embed = discord.Embed(description="クイズのapi死亡のため使用できません")
return await ctx.send(embed=embed)
# author_name = bot.get_user(ctx.message.author.id)
# embed = discord.Embed(
# description=f"""```コマンド:[!!q]\n発言鯖:{ctx.message.guild.name} | チャンネル名:{ctx.message.channel.name}\n発言者:{author_name} | ID:{ctx.message.id}```""",
# color=0x1d1d1d)
# channel = bot.get_channel(661122218847109130)
# await channel.send(embed=embed)
user = ctx.message.author
resp = requests.get(url='http://24th.jp/test/quiz/api_quiz.php')
quiz_xml = ElementTree.fromstring(resp.text.encode('utf-8'))[1]
quiz_set = [quiz_xml[2].text, quiz_xml[3].text, quiz_xml[4].text, quiz_xml[5].text]
random.shuffle(quiz_set)
await ctx.send("Q. {}\n 1. {}\n 2. {}\n 3. {}\n 4. {}".format(quiz_xml[1].text, *quiz_set))
answer_num = quiz_set.index(quiz_xml[2].text) + 1
exp = math.ceil(get_player_level(ctx, user.id) / 10)
guess = await bot.wait_for('message', timeout=12.0, check=lambda messages: messages.author.id == user.id)
if guess is None:
await ctx.send('時間切れだ。正解は「{}」だ。'.format(quiz_xml[2].text))
return
if guess.content.isdigit() and int(guess.content) == answer_num:
comment = experiment(user.id, exp * 4)
if random.random() < 0.07:
comment += "\n`エリクサー`を手に入れた!"
obtain_an_item(user.id, 1)
if random.random() < 0.4:
comment += "\n`ファイアボールの書`を手に入れた!"
obtain_an_item(user.id, 2)
if random.random() < 0.4:
comment += "\n`祈りの書`を手に入れた!"
obtain_an_item(user.id, 3)
conn.commit()
await ctx.send('正解だ!{}の経験値を得た。\n{}'.format(exp, comment))
else:
await ctx.send('残念!正解は「{}」だ。'.format(quiz_xml[2].text))
items = {-10: "論外の証", -9: "loginの証", -8: "古参の証", 1: "エリクサー", 2: "ファイアボールの書", 3: "祈りの書", }
item_description = """アイテムの説明
エリクサー:チャンネルの全員を全回復させる。
ファイアボールの書:遠隔攻撃する。
祈りの書:仲間一人を復活させる。
サポーターの証:MMOくんをサポートしてくれた証だ!
"""
@bot.command(name='exp', aliases=['e'], pass_context=True, description="権限無し人間は使えません(/・ω・)/")
async def exp(ctx, mentions, addexp=''):
"""不正でEXPを付与する ※権限無し人間は使えません(/・ω・)/"""
user_id = ctx.message.mentions[0].id
if ctx.message.author.id == <PASSWORD> or ctx.message.author.id == <PASSWORD>:
player_exp = get_player_exp(user_id)
current_level = int(math.sqrt(player_exp))
afterexp = player_exp + int(addexp)
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("UPDATE player SET experience=? WHERE user_id=?", (afterexp, user_id,))
con.commit()
next_exp = afterexp
if next_exp < (current_level + 1) ** 2:
next_level = int(math.sqrt(next_exp))
return await ctx.send("<@{}>は`{}exp`を得た!".format(user_id, addexp, current_level, next_level))
elif next_exp > (current_level + 1) ** 2:
next_level = int(math.sqrt(next_exp))
await ctx.send(
"<@{}>は`{}exp`を得たそしてレベルアップした!`Lv.{} -> Lv.{}`".format(user_id, addexp, current_level, next_level))
return
@bot.command(name='item', aliases=['i'], pass_context=True, description=item_description)
async def item(ctx, item_name=""):
"""アイテムを使う"""
channel_id = ctx.message.channel.id
# if not ctx.message.author.id == <PASSWORD>7081309194:
# return await ctx.send("超激レアの調整中です。")
if "true" in kidou:
embed = discord.Embed(description="現在起動中です。しばらくお待ちください。",
color=0xff0000)
return await ctx.send(embed=embed)
if channel_id in channel_in_transaction:
return await ctx.send("`アイテム使用失敗。ゆっくりコマンドを打ってね。`")
try:
channel_in_transaction.append(channel_id)
await _item(ctx, ctx.message.author.id, channel_id, item_name, ctx.message.mentions)
conn.commit()
finally:
channel_in_transaction.remove(channel_id)
async def _item(ctx, user_id, channel_id, item_name, mentions):
if not item_name:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT item_id, count FROM item WHERE user_id=? ORDER BY item_id",
(user_id,)).fetchall()
con.commit()
my_items = c.fetchall()
item_list = "\n".join("{} : {}個".format(items[i[0]], i[1]) for i in my_items)
user_name = bot.get_user(user_id).name
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(
title=f"{user_name}が所有するアイテム",
description=f"{item_list}",
color=up
)
return await ctx.send(embed=embed)
elif item_name == "エリクサー":
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(
description=f"{elixir(user_id, channel_id)}",
color=up
)
return await ctx.send(embed=embed)
elif item_name == "e":
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(
description=f"{elixir(user_id, channel_id)}",
color=up
)
return await ctx.send(embed=embed)
elif item_name == "ファイアボールの書":
return await fireball(ctx, user_id, channel_id)
elif item_name == "f":
return await fireball(ctx, user_id, channel_id)
elif item_name == "祈りの書":
embed = discord.Embed(
description=f"{pray(user_id, channel_id, mentions)}",
color=0xff0000
)
return await ctx.send(embed=embed)
elif item_name == "i":
embed = discord.Embed(
description=f"{pray(user_id, channel_id, mentions)}",
color=0xff0000
)
return await ctx.send(embed=embed)
def elixir(user_id, channel_id):
if not consume_an_item(user_id, 1):
return "<@{}>はエリクサーを持っていない!".format(user_id)
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(
"SELECT player.user_id, player.experience FROM in_battle, player WHERE in_battle.channel_id=? AND player.user_id=in_battle.user_id",
(channel_id,)).fetchall()
con.commit()
in_battles = c.fetchall()
for in_battle in in_battles:
full_hp = int(math.sqrt(in_battle[1])) * 5 + 50
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("UPDATE in_battle SET player_hp=? WHERE user_id=?", (full_hp, in_battle[0],))
con.commit()
return "<@{}>はエリクサーを使った!このチャンネルの仲間全員が全回復した!".format(user_id)
async def fireball(ctx, user_id, channel_id):
player_hp, error_message = into_battle(ctx, user_id, channel_id)
embed = discord.Embed(description=error_message,
color=0xff0000)
if error_message: return await ctx.send(embed=embed)
if not consume_an_item(user_id, 2):
return await ctx.send("<@{}>はファイアボールの書を持っていない!".format(user_id))
player_level = get_player_level(ctx, user_id)
boss_level, boss_hp = get_boss_level_and_hp(channel_id)
player_attack = int(player_level * (1 + random.random()) / 10)
boss_hp = boss_hp - player_attack
if channel_id in special_monster:
monster_name = special_monster[channel_id]["name"]
elif channel_id in very_special_monster:
monster_name = very_special_monster[channel_id]["name"]
else:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?", (channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster_name = monsters[monster_num[0]]["name"]
if boss_level % MONSTER_NUM == 0:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?",
(channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster_name = tyoukyouteki[monster_num[0]]["name"]
elif boss_level % 5 == 0:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT monster FROM channel_status WHERE channel_id=?",
(channel_id,)).fetchone()
con.commit()
monster_num = c.fetchone()
monster_name = kyouteki[monster_num[0]]["name"]
user_name = bot.get_user(user_id).name
attack_message = "ファイアボール!{}は{}に{}のダメージを与えた!".format(user_name, monster_name, player_attack)
if boss_hp <= 0:
win_message = win_process(ctx, channel_id, boss_level, monster_name)
up = discord.Color(random.randint(0, 0xFFFFFF))
embedwin = discord.Embed(title="戦闘結果",
description=f"```{attack_message}```\n\n{win_message}",
color=up)
await ctx.send(embed=embedwin)
await reset_battle(ctx, channel_id, level_up=True)
else:
conn.execute("UPDATE channel_status SET boss_hp=? WHERE channel_id=?", (boss_hp, channel_id,))
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(
description="```{}\n{}のHP:{}/{}```".format(attack_message, monster_name, boss_hp, boss_level * 10 + 50),
color=up
)
await ctx.send(embed=embed)
def pray(user_id, channel_id, mentions):
if not mentions:
return "祈りの書は仲間を復活させます。祈る相手を指定して使います。\n例)!!item 祈りの書 @ユーザー名".format(user_id)
prayed_user_id = mentions[0].id
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT player_hp FROM in_battle WHERE channel_id=? and user_id=?",
(channel_id, prayed_user_id,)).fetchone()
con.commit()
prayed_user = c.fetchone()
if not prayed_user:
return "<@{}>は戦闘に参加していない!".format(prayed_user_id)
if prayed_user[0] != 0:
return "<@{}>はまだ生きている!".format(prayed_user_id)
player_hp, error_message = into_battle(ctx, user_id, channel_id)
if error_message: return error_message
if not consume_an_item(user_id, 3):
return "<@{}>は祈りの書を持っていない!".format(user_id)
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("UPDATE in_battle SET player_hp=1 WHERE user_id=?", (prayed_user_id,))
con.commit()
return "<@{0}>は祈りを捧げ、<@{1}>は復活した!\n<@{1}> 残りHP: 1".format(user_id, prayed_user_id, )
@bot.command(description='上位10サーバーのランキングを表示する')
async def srank(ctx):
"""上位10サーバーのランキングを表示する"""
embed = Embed(description="`!!ranking`を使ってください")
await ctx.send(embed=embed)
return
# if "true" in kidou:
# embed = discord.Embed(description="現在起動中です。しばらくお待ちください。",
# color=0xff0000)
# return await ctx.send(embed=embed)
# channels = conn.execute("SELECT channel_id, boss_level FROM channel_status ORDER BY boss_level DESC").fetchall()
# guilds = {}
# for channel in channels:
# c = bot.get_channel(channel[0])
# if not c: continue
# guild = c.guild
# if not guild.id in guilds:
# guilds[guild.id] = [guild.name, channel[1]]
# if len(guilds) > 19: break
# embed = discord.Embed(title="上位サーバーランキング",
# description="{}".format("\n".join(
# "{}位:`{}` (Lv{})".format(i + 1, | |
the value
rvkeys[rvals1_str].append(rvals2)
# create a map of combined keys
common_keys = {}
for lvkey in lvkeys.keys():
if (lvkey in rvkeys.keys()):
common_keys[lvkey] = 1
# for each type of join, merge the values
new_header_fields = []
# create the keys
for lkey in lkeys:
new_header_fields.append(lkey)
# print message for rkeys that are ignored
for rkey in rkeys:
if (rkey not in new_header_fields):
print ("INFO: rkey ignored from output:", rkey)
# add the left side columns
for i in range(len(self.header_fields)):
if (self.header_fields[i] not in lkeys):
if (lsuffix != ""):
new_header_fields.append(self.header_fields[i] + ":" + lsuffix)
else:
new_header_fields.append(self.header_fields[i])
# add the right side columns
for i in range(len(that.header_fields)):
if (that.header_fields[i] not in rkeys):
if (rsuffix != ""):
new_header_fields.append(that.header_fields[i] + ":" + rsuffix)
else:
if (that.header_fields[i] not in new_header_fields):
new_header_fields.append(that.header_fields[i])
else:
raise Exception("Duplicate key names found. Use lsuffix or rsuffix:", that.header_fields[i])
# construct new_header
new_header = "\t".join(new_header_fields)
# define the default lvalues
default_lvals = []
for h in self.header_fields:
if (h not in lkeys):
if (def_val_map != None and h in def_val_map.keys()):
default_lvals.append(def_val_map[h])
else:
default_lvals.append(default_val)
#default_lvals_str = "\t".join(default_lvals)
# define the default rvalues
default_rvals = []
for h in that.header_fields:
if (h not in rkeys):
if (def_val_map != None and h in def_val_map.keys()):
default_rvals.append(def_val_map[h])
else:
default_rvals.append(default_val)
#default_rvals_str = "\t".join(default_rvals)
# generate output by doing join
new_data = []
# iterate over left side
for line in self.data:
fields = line.split("\t")
lvals1 = []
for lkey in lkeys:
lval = fields[self.header_map[lkey]]
lvals1.append(lval)
lvals1_str = "\t".join(lvals1)
lvals2_arr = lvkeys[lvals1_str]
rvals2_arr = [default_rvals]
if (lvals1_str in rvkeys.keys()):
rvals2_arr = rvkeys[lvals1_str]
# do a MxN merge of left side values and right side values
for lvals2 in lvals2_arr:
for rvals2 in rvals2_arr:
# construct the new line
new_line = "\t".join(utils.merge_arrays([[lvals1_str], lvals2, rvals2]))
# take care of different join types
if (join_type == "inner"):
if (lvals1_str in common_keys.keys()):
new_data.append(new_line)
elif (join_type == "left_outer" or join_type == "left"):
new_data.append(new_line)
elif (join_type == "right_outer" or join_type == "right"):
if (lvals1_str in common_keys.keys()):
new_data.append(new_line)
elif (join_type == "full_outer" or join_type == "outer"):
new_data.append(new_line)
else:
raise Exception("Unknown join type:", join_type)
# iterate over right side
for line in that.data:
fields = line.split("\t")
rvals1 = []
for rkey in rkeys:
rval = fields[that.header_map[rkey]]
rvals1.append(rval)
rvals1_str = "\t".join(rvals1)
rvals2_arr = rvkeys[rvals1_str]
lvals2_arr = [default_lvals]
if (rvals1_str in lvkeys.keys()):
lvals2_arr = lvkeys[rvals1_str]
# MxN loop for multiple rows on left and right side
for lvals2 in lvals2_arr:
for rvals2 in rvals2_arr:
# construct the new line
new_line = "\t".join(utils.merge_arrays([[rvals1_str], lvals2, rvals2]))
# take care of different join types
if (join_type == "inner"):
pass
elif (join_type == "left_outer" or join_type == "left"):
pass
elif (join_type == "right_outer" or join_type == "right"):
if (rvals1_str not in common_keys.keys()):
new_data.append(new_line)
elif (join_type == "full_outer" or join_type == "outer"):
if (rvals1_str not in common_keys.keys()):
new_data.append(new_line)
else:
raise Exception("Unknown join type:", join_type)
return TSV(new_header, new_data)
# TODO: check this implementation
def natural_join(self, that):
# find the list of columns that are common
grouping_cols = []
for k in self.header.split("\t"):
if (k in that.header_map.keys()):
grouping_cols.append(k)
# create a set
grouping_cols_set = set(grouping_cols)
# validation
if (len(grouping_cols) == 0):
raise Exception("No grouping columns found:", self.header_fields, that.header_fields)
# number of rows should be unique
uniq_rows_1 = self.select(grouping_cols, inherit_message = "natural_join:this").distinct().num_rows()
uniq_rows_2 = that.select(grouping_cols, inherit_message = "natural_join:that").distinct().num_rows()
if (uniq_rows_1 != uniq_rows_2):
raise Exception("Number of rows with grouping keys should be exactly the same:", uniq_rows_1, uniq_rows_2)
# append the cols
new_header_fields = []
for k in grouping_cols:
new_header_fields.append(k)
for h in self.header.split("\t"):
if (h not in grouping_cols_set):
new_header_fields.append(h)
for h in that.header.split("\t"):
if (h not in grouping_cols_set):
new_header_fields.append(h)
new_header = "\t".join(new_header_fields)
# convert both tsvs to hashmaps
maps_1 = self.__convert_to_maps__()
maps_2 = that.__convert_to_maps__()
# join
combined = {}
# iterate over all rows of maps_1
for mp in maps_1:
keys = []
vs = []
for k in self.header_fields:
if (k in grouping_cols_set):
keys.append(str(mp[k]))
else:
vs.append(str(mp[k]))
keys_str = "\t".join(keys)
vs_str = "\t".join(vs)
combined[keys_str] = vs_str
# iterate over all rows of maps_2
for mp in maps_2:
keys = []
vs = []
for k in that.header_fields:
if (k in grouping_cols_set):
keys.append(str(mp[k]))
else:
vs.append(str(mp[k]))
keys_str = "\t".join(keys)
vs_str = "\t".join(vs)
# FIXME: this is prone to empty strings
combined[keys_str] = combined[keys_str] + "\t" + vs_str
# iterate over combined
new_data = []
for k, v in combined.items():
new_data.append(k + "\t" + v)
return TSV(new_header, new_data)
def cumulative_sum(self, col, new_col, as_int = True):
# check for presence of col
if (col not in self.header_map.keys()):
raise Exception("Column not found:", str(col), str(self.header_fields))
# check for validity of new col
if (new_col in self.header_map.keys()):
raise Exception("New column already exists:", str(new_col), str(self.header_fields))
# create new header
new_header = self.header + "\t" + new_col
# create new data
new_data = []
# cumulative sum
cumsum = 0
col_index = self.header_map[col]
# iterate
for line in self.data:
fields = line.split("\t")
col_value = float(fields[col_index])
cumsum += col_value
if (as_int == True):
new_line = line + "\t" + str(int(cumsum))
else:
new_line = line + "\t" + str(cumsum)
new_data.append(new_line)
# return
return TSV(new_header, new_data)
def replicate_rows(self, col, new_col = None, max_repl = 0):
# check for presence of col
if (col not in self.header_map.keys()):
raise Exception("Column not found:", str(col), str(self.header_fields))
# create new column if it is not existing
if (new_col == None):
new_col = "{}:replicate_rows".format(col)
# check new col
if (new_col in self.header_map.keys()):
raise Exception("New Column already exists:", str(new_col), str(self.header_fields))
# create data
new_data = []
new_header = self.header + "\t" + new_col
for line in self.data:
fields = line.split("\t")
col_value = int(fields[self.header_map[col]])
# check for guard conditions
if (max_repl > 0 and col_value > max_repl):
raise Exception("repl_value more than max_repl:", col_value, max_repl)
# replicate
for i in range(col_value):
new_data.append(line + "\t" + "1")
return TSV(new_header, new_data)
# TODO: Need better naming. The suffix semantics have been changed.
def explode(self, cols, exp_func, prefix, default_val = None, collapse = True, inherit_message = ""):
# get matching column and indexes
matching_cols = self.__get_matching_cols__(cols)
indexes = self.__get_col_indexes__(matching_cols)
# iterate
exploded_values = []
counter = 0
for line in self.data:
# progress
counter = counter + 1
utils.report_progress("explode: [1/2] calling explode functions", inherit_message, counter, len(self.data))
# process data
fields = line.split("\t")
col_values_map = {}
for i in indexes:
col_values_map[self.header_fields[i]] = fields[i]
exploded_values.append(exp_func(col_values_map))
# get the list of keys
exploded_keys = {}
for exploded_value_list_map in exploded_values:
for evm in exploded_value_list_map:
for k, v in evm.items():
# validation
if (len(k) == 0):
raise Exception("Invalid key in the hashmap:{}: {}".format(k, v))
exploded_keys[k] = 1
# create an ordered list of keys
exploded_keys_sorted = sorted(list(exploded_keys.keys()))
# new header and data
new_data = []
# create header
new_header_fields = []
if (collapse == True):
for j in range(len(self.header_fields)):
if (j not in indexes):
new_header_fields.append(self.header_fields[j])
else:
# take care of not referencing self.header_fields
for h in self.header_fields:
new_header_fields.append(h)
# create new names based on suffix
exploded_keys_new_names = []
# append new names to the exploded keys
for e in exploded_keys_sorted:
exploded_keys_new_names.append(prefix + ":" + e)
# check if any of new keys clash with old columns
for k in exploded_keys_new_names:
if (k in self.get_header_fields()):
raise Exception("Column already exist:", k, str(self.header_fields))
# append to the new_header_fields
for h in exploded_keys_new_names:
new_header_fields.append(h)
new_header = "\t".join(new_header_fields)
# iterate and generate new data
utils.print_code_todo_warning("explode: Verify this logic is not breaking anything. check TODO")
counter = 0
for i in range(len(self.data)):
# progress
counter = counter + 1
utils.report_progress("explode: [2/2] generating data", inherit_message, counter, len(self.data))
# process data
line = self.data[i]
fields = line.split("\t")
# get the new list of fields
new_fields = []
if (collapse == True):
for j in range(len(fields)):
if (j not in indexes):
new_fields.append(fields[j])
else:
# take care of not messing up with old fields
for f in fields:
new_fields.append(f)
# take care of this as the new_fields can be empty. TODO
#new_line = "\t".join(new_fields) | |
<gh_stars>1-10
from __future__ import absolute_import
"""
API operations for Workflows
"""
import logging
from sqlalchemy import desc
from galaxy import util
from galaxy import web
from galaxy import model
from galaxy.tools.parameters import visit_input_values, DataToolParameter, RuntimeValue
from galaxy.web.base.controller import BaseAPIController, url_for
from galaxy.workflow.modules import module_factory, ToolModule
from galaxy.jobs.actions.post import ActionBox
from galaxy.model.item_attrs import UsesAnnotations
from ..controllers.workflow import attach_ordered_steps
log = logging.getLogger(__name__)
class WorkflowsAPIController(BaseAPIController, UsesAnnotations):
@web.expose_api
def index(self, trans, **kwd):
"""
GET /api/workflows
Displays a collection of workflows.
"""
rval = []
for wf in trans.sa_session.query(trans.app.model.StoredWorkflow).filter_by(
user=trans.user, deleted=False).order_by(
desc(trans.app.model.StoredWorkflow.table.c.update_time)).all():
item = wf.get_api_value(value_mapper={'id':trans.security.encode_id})
encoded_id = trans.security.encode_id(wf.id)
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item)
for wf_sa in trans.sa_session.query( trans.app.model.StoredWorkflowUserShareAssociation ).filter_by(
user=trans.user ).join( 'stored_workflow' ).filter(
trans.app.model.StoredWorkflow.deleted == False ).order_by(
desc( trans.app.model.StoredWorkflow.update_time ) ).all():
item = wf_sa.stored_workflow.get_api_value(value_mapper={'id':trans.security.encode_id})
encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item)
return rval
@web.expose_api
def show(self, trans, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
Displays information needed to run a workflow from the command line.
"""
workflow_id = id
try:
decoded_workflow_id = trans.security.decode_id(workflow_id)
except TypeError:
trans.response.status = 400
return "Malformed workflow id ( %s ) specified, unable to decode." % str(workflow_id)
try:
stored_workflow = trans.sa_session.query(trans.app.model.StoredWorkflow).get(decoded_workflow_id)
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
trans.response.status = 400
return("Workflow is not owned by or shared with current user")
except:
trans.response.status = 400
return "That workflow does not exist."
item = stored_workflow.get_api_value(view='element', value_mapper={'id':trans.security.encode_id})
item['url'] = url_for('workflow', id=workflow_id)
latest_workflow = stored_workflow.latest_workflow
inputs = {}
for step in latest_workflow.steps:
if step.type == 'data_input':
inputs[step.id] = {'label':step.tool_inputs['name'], 'value':""}
else:
pass
# Eventually, allow regular tool parameters to be inserted and modified at runtime.
# p = step.get_required_parameters()
item['inputs'] = inputs
steps = {}
for step in latest_workflow.steps:
steps[step.id] = {'id': step.id,
'type': step.type,
'tool_id': step.tool_id,
'input_steps': {}}
for conn in step.input_connections:
steps[step.id]['input_steps'][conn.input_name] = {'source_step': conn.output_step_id,
'step_output': conn.output_name}
item['steps'] = steps
return item
@web.expose_api
def create(self, trans, payload, **kwd):
"""
POST /api/workflows
We're not creating workflows from the api. Just execute for now.
However, we will import them if installed_repository_file is specified
"""
# ------------------------------------------------------------------------------- #
### RPARK: dictionary containing which workflows to change and edit ###
param_map = {};
if (payload.has_key('parameters') ):
param_map = payload['parameters'];
# ------------------------------------------------------------------------------- #
if 'workflow_id' not in payload:
# create new
if 'installed_repository_file' in payload:
workflow_controller = trans.webapp.controllers[ 'workflow' ]
result = workflow_controller.import_workflow( trans=trans,
cntrller='api',
**payload)
return result
trans.response.status = 403
return "Either workflow_id or installed_repository_file must be specified"
if 'installed_repository_file' in payload:
trans.response.status = 403
return "installed_repository_file may not be specified with workflow_id"
stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(
trans.security.decode_id(payload['workflow_id']))
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
trans.response.status = 400
return("Workflow is not owned by or shared with current user")
workflow = stored_workflow.latest_workflow
if payload['history'].startswith('hist_id='):
#Passing an existing history to use.
history = trans.sa_session.query(self.app.model.History).get(
trans.security.decode_id(payload['history'][8:]))
if history.user != trans.user and not trans.user_is_admin():
trans.response.status = 400
return "Invalid History specified."
else:
history = self.app.model.History(name=payload['history'], user=trans.user)
trans.sa_session.add(history)
trans.sa_session.flush()
ds_map = payload['ds_map']
add_to_history = 'no_add_to_history' not in payload
for k in ds_map:
try:
if ds_map[k]['src'] == 'ldda':
ldda = trans.sa_session.query(self.app.model.LibraryDatasetDatasetAssociation).get(
trans.security.decode_id(ds_map[k]['id']))
assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset )
hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
elif ds_map[k]['src'] == 'ld':
ldda = trans.sa_session.query(self.app.model.LibraryDataset).get(
trans.security.decode_id(ds_map[k]['id'])).library_dataset_dataset_association
assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset )
hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
elif ds_map[k]['src'] == 'hda':
# Get dataset handle, add to dict and history if necessary
hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(
trans.security.decode_id(ds_map[k]['id']))
assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset )
else:
trans.response.status = 400
return "Unknown dataset source '%s' specified." % ds_map[k]['src']
if add_to_history and hda.history != history:
hda = hda.copy()
history.add_dataset(hda)
ds_map[k]['hda'] = hda
except AssertionError:
trans.response.status = 400
return "Invalid Dataset '%s' Specified" % ds_map[k]['id']
if not workflow:
trans.response.status = 400
return "Workflow not found."
if len( workflow.steps ) == 0:
trans.response.status = 400
return "Workflow cannot be run because it does not have any steps"
if workflow.has_cycles:
trans.response.status = 400
return "Workflow cannot be run because it contains cycles"
if workflow.has_errors:
trans.response.status = 400
return "Workflow cannot be run because of validation errors in some steps"
# Build the state for each step
rval = {}
for step in workflow.steps:
step_errors = None
if step.type == 'tool' or step.type is None:
step.module = module_factory.from_workflow_step( trans, step )
# Check for missing parameters
step.upgrade_messages = step.module.check_and_update_state()
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
step.state = step.module.state
####################################################
####################################################
# RPARK: IF TOOL_NAME IN PARAMETER MAP #
if step.tool_id in param_map:
change_param = param_map[step.tool_id]['param'];
change_value = param_map[step.tool_id]['value'];
step.state.inputs[change_param] = change_value;
####################################################
####################################################
if step.tool_errors:
trans.response.status = 400
return "Workflow cannot be run because of validation errors in some steps: %s" % step_errors
if step.upgrade_messages:
trans.response.status = 400
return "Workflow cannot be run because of step upgrade messages: %s" % step.upgrade_messages
else:
# This is an input step. Make sure we have an available input.
if step.type == 'data_input' and str(step.id) not in ds_map:
trans.response.status = 400
return "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id
step.module = module_factory.from_workflow_step( trans, step )
step.state = step.module.get_runtime_state()
step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
# Run each step, connecting outputs to inputs
workflow_invocation = self.app.model.WorkflowInvocation()
workflow_invocation.workflow = workflow
outputs = util.odict.odict()
rval['history'] = trans.security.encode_id(history.id)
rval['outputs'] = []
for i, step in enumerate( workflow.steps ):
job = None
if step.type == 'tool' or step.type is None:
tool = self.app.toolbox.get_tool( step.tool_id )
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
if prefixed_name in step.input_connections_by_name:
conn = step.input_connections_by_name[ prefixed_name ]
return outputs[ conn.output_step.id ][ conn.output_name ]
visit_input_values( tool.inputs, step.state.inputs, callback )
job, out_data = tool.execute( trans, step.state.inputs, history=history)
outputs[ step.id ] = out_data
for pja in step.post_job_actions:
if pja.action_type in ActionBox.immediate_actions:
ActionBox.execute(self.app, trans.sa_session, pja, job, replacement_dict=None)
else:
job.add_post_job_action(pja)
for v in out_data.itervalues():
rval['outputs'].append(trans.security.encode_id(v.id))
else:
#This is an input step. Use the dataset inputs from ds_map.
job, out_data = step.module.execute( trans, step.state)
outputs[step.id] = out_data
outputs[step.id]['output'] = ds_map[str(step.id)]['hda']
workflow_invocation_step = self.app.model.WorkflowInvocationStep()
workflow_invocation_step.workflow_invocation = workflow_invocation
workflow_invocation_step.workflow_step = step
workflow_invocation_step.job = job
trans.sa_session.add( workflow_invocation )
trans.sa_session.flush()
return rval
# ---------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------- #
# ---- RPARK EDITS ---- #
# ---------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------- #
@web.expose_api
<EMAIL>
def workflow_dict( self, trans, workflow_id, **kwd ):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow as a json dictionary.
"""
try:
stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
except Exception,e:
return ("Workflow with ID='%s' can not be found\n Exception: %s") % (workflow_id, str( e ))
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
trans.response.status = 400
return("Workflow is not owned by or shared with current user")
ret_dict = self._workflow_to_dict( trans, stored_workflow );
return ret_dict
@web.expose_api
def delete( self, trans, id, **kwd ):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
workflow_id = id;
try:
stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
except Exception,e:
return ("Workflow with ID='%s' can not be found\n Exception: %s") % (workflow_id, str( e ))
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
trans.response.status = 400
return("Workflow is not owned by or shared with current user")
#Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
#return 'OK'
return ( "Workflow '%s' successfully deleted" % stored_workflow.name )
@web.expose_api
def import_new_workflow(self, trans, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
"""
data = payload['workflow'];
workflow, missing_tool_tups = self._workflow_from_dict( trans, data, source="API" )
# galaxy workflow newly created id
workflow_id = workflow.id;
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id);
# return list
rval= [];
item = workflow.get_api_value(value_mapper={'id':trans.security.encode_id})
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item);
return item;
def _workflow_from_dict( self, trans, | |
struct.s_snapshot_id = 0
struct.s_snapshot_r_blocks_count = 0
struct.s_snapshot_list = 0
struct.s_error_count = 0
struct.s_first_error_time = 0
struct.s_first_error_ino = 0
struct.s_first_error_block = 0
struct.s_first_error_func = 0
struct.s_first_error_line = 0
struct.s_last_error_time = 0
struct.s_last_error_ino = 0
struct.s_last_error_line = 0
struct.s_last_error_block = 0
struct.s_last_error_func = 0
struct.s_mount_opts = 0
struct.s_usr_quota_inum = 0
struct.s_grp_quota_inum = 0
struct.s_overhead_blocks = 0
struct.s_backup_bgs = 0
struct.s_encrypt_algos = 0
struct.s_encrypt_pw_salt = 0
struct.s_lpf_ino = 0
struct.s_prj_quota_inum = 0
struct.s_checksum_seed = 0
struct.s_reserved = 0
struct.s_checksum = 0
if (struct.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) == 0:
struct.s_desc_size = ext4_superblock.EXT2_DESC_SIZE
return struct
class ext4_xattr_entry(ext4_struct):
_fields_ = [
("e_name_len", ctypes.c_ubyte), # 0x00
("e_name_index", ctypes.c_ubyte), # 0x01
("e_value_offs", ctypes.c_ushort), # 0x02
("e_value_inum", ctypes.c_uint), # 0x04
("e_value_size", ctypes.c_uint), # 0x08
("e_hash", ctypes.c_uint) # 0x0C
# Variable length field "e_name" missing at 0x10
]
def _from_buffer_copy(raw, offset=0, platform64=True):
struct = ext4_xattr_entry.from_buffer_copy(raw, offset)
struct.e_name = raw[offset + 0x10: offset + 0x10 + struct.e_name_len]
return struct
@property
def _size(self): return 4 * ((ctypes.sizeof(type(self)) + self.e_name_len + 3) // 4) # 4-byte alignment
class ext4_xattr_header(ext4_struct):
_fields_ = [
("h_magic", ctypes.c_uint), # 0x0, Must be 0xEA020000
("h_refcount", ctypes.c_uint), # 0x4
("h_blocks", ctypes.c_uint), # 0x8
("h_hash", ctypes.c_uint), # 0xC
("h_checksum", ctypes.c_uint), # 0x10
("h_reserved", ctypes.c_uint * 3), # 0x14
]
class ext4_xattr_ibody_header(ext4_struct):
_fields_ = [
("h_magic", ctypes.c_uint) # 0x0, Must be 0xEA020000
]
class InodeType:
UNKNOWN = 0x0 # Unknown file type
FILE = 0x1 # Regular file
DIRECTORY = 0x2 # Directory
CHARACTER_DEVICE = 0x3 # Character device
BLOCK_DEVICE = 0x4 # Block device
FIFO = 0x5 # FIFO
SOCKET = 0x6 # Socket
SYMBOLIC_LINK = 0x7 # Symbolic link
CHECKSUM = 0xDE # Checksum entry; not really a file type, but a type of directory entry
# ----------------------------- HIGH LEVEL ------------------------------
class MappingEntry:
def __init__(self, file_block_idx, disk_block_idx, block_count=1):
self.file_block_idx = file_block_idx
self.disk_block_idx = disk_block_idx
self.block_count = block_count
def __iter__(self):
yield self.file_block_idx
yield self.disk_block_idx
yield self.block_count
def __repr__(self):
return "{type:s}({file_block_idx!r:s}, {disk_block_idx!r:s}, {blocK_count!r:s})".format(
blocK_count=self.block_count,
disk_block_idx=self.disk_block_idx,
file_block_idx=self.file_block_idx,
type=type(self).__name__
)
def copy(self):
return MappingEntry(self.file_block_idx, self.disk_block_idx, self.block_count)
def create_mapping(*entries):
file_block_idx = 0
result = [None] * len(entries)
for i, entry in enumerate(entries):
disk_block_idx, block_count = entry
result[i] = MappingEntry(file_block_idx, disk_block_idx, block_count)
file_block_idx += block_count
return result
def optimize(entries):
entries.sort(key=lambda entry: entry.file_block_idx)
idx = 0
while idx < len(entries):
while idx + 1 < len(entries) \
and entries[idx].file_block_idx + entries[idx].block_count == entries[idx + 1].file_block_idx \
and entries[idx].disk_block_idx + entries[idx].block_count == entries[idx + 1].disk_block_idx:
tmp = entries.pop(idx + 1)
entries[idx].block_count += tmp.block_count
idx += 1
class Volume:
ROOT_INODE = 2
def __init__(self, stream, offset=0, ignore_flags=False, ignore_magic=False):
self.ignore_flags = ignore_flags
self.ignore_magic = ignore_magic
self.offset = offset
self.platform64 = True # Initial value needed for Volume.read_struct
self.stream = stream
# Superblock
self.superblock = self.read_struct(ext4_superblock, 0x400)
self.platform64 = (self.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) != 0
if not ignore_magic and self.superblock.s_magic != 0xEF53:
raise MagicError("Invalid magic value in superblock: 0x{magic:04X} (expected 0xEF53)".format(
magic=self.superblock.s_magic))
# Group descriptors
self.group_descriptors = [None] * (self.superblock.s_inodes_count // self.superblock.s_inodes_per_group)
group_desc_table_offset = (0x400 // self.block_size + 1) * self.block_size # First block after superblock
for group_desc_idx in range(len(self.group_descriptors)):
group_desc_offset = group_desc_table_offset + group_desc_idx * self.superblock.s_desc_size
self.group_descriptors[group_desc_idx] = self.read_struct(ext4_group_descriptor, group_desc_offset)
def __repr__(self):
return "{type_name:s}(volume_name = {volume_name!r:s}, uuid = {uuid!r:s}, last_mounted = {last_mounted!r:s})".format(
last_mounted=self.superblock.s_last_mounted,
type_name=type(self).__name__,
uuid=self.uuid,
volume_name=self.superblock.s_volume_name
)
@property
def block_size(self):
return 1 << (10 + self.superblock.s_log_block_size)
def get_inode(self, inode_idx, file_type=InodeType.UNKNOWN):
group_idx, inode_table_entry_idx = self.get_inode_group(inode_idx)
inode_table_offset = self.group_descriptors[group_idx].bg_inode_table * self.block_size
inode_offset = inode_table_offset + inode_table_entry_idx * self.superblock.s_inode_size
return Inode(self, inode_offset, inode_idx, file_type)
def get_inode_group(self, inode_idx):
group_idx = (inode_idx - 1) // self.superblock.s_inodes_per_group
inode_table_entry_idx = (inode_idx - 1) % self.superblock.s_inodes_per_group
return (group_idx, inode_table_entry_idx)
def read(self, offset, byte_len):
if self.offset + offset != self.stream.tell():
self.stream.seek(self.offset + offset, io.SEEK_SET)
return self.stream.read(byte_len)
def read_struct(self, structure, offset, platform64=None):
raw = self.read(offset, ctypes.sizeof(structure))
if hasattr(structure, "_from_buffer_copy"):
return structure._from_buffer_copy(raw, platform64=platform64 if platform64 != None else self.platform64)
else:
return structure.from_buffer_copy(raw)
@property
def root(self):
return self.get_inode(Volume.ROOT_INODE, InodeType.DIRECTORY)
@property
def uuid(self):
uuid = self.superblock.s_uuid
uuid = [uuid[:4], uuid[4: 6], uuid[6: 8], uuid[8: 10], uuid[10:]]
return "-".join("".join("{0:02X}".format(c) for c in part) for part in uuid)
class Inode:
def __init__(self, volume, offset, inode_idx, file_type=InodeType.UNKNOWN):
self.inode_idx = inode_idx
self.offset = offset
self.volume = volume
self.file_type = file_type
self.inode = volume.read_struct(ext4_inode, offset)
def __len__(self):
return self.inode.i_size
def __repr__(self):
if self.inode_idx != None:
return "{type_name:s}(inode_idx = {inode!r:s}, offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format(
inode=self.inode_idx,
offset=self.offset,
type_name=type(self).__name__,
uuid=self.volume.uuid
)
else:
return "{type_name:s}(offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format(
offset=self.offset,
type_name=type(self).__name__,
uuid=self.volume.uuid
)
def _parse_xattrs(self, raw_data, offset, prefix_override={}):
prefixes = {
0: "",
1: "user.",
2: "system.posix_acl_access",
3: "system.posix_acl_default",
4: "trusted.",
6: "security.",
7: "system.",
8: "system.richacl"
}
prefixes.update(prefixes)
# Iterator over ext4_xattr_entry structures
i = 0
while i < len(raw_data):
xattr_entry = ext4_xattr_entry._from_buffer_copy(raw_data, i, platform64=self.volume.platform64)
if (
xattr_entry.e_name_len | xattr_entry.e_name_index | xattr_entry.e_value_offs | xattr_entry.e_value_inum) == 0:
# End of ext4_xattr_entry list
break
if not xattr_entry.e_name_index in prefixes:
raise Ext4Error("Unknown attribute prefix {prefix:d} in inode {inode:d}".format(
inode=self.inode_idx,
prefix=xattr_entry.e_name_index
))
xattr_name = prefixes[xattr_entry.e_name_index] + xattr_entry.e_name.decode("iso-8859-2")
if xattr_entry.e_value_inum != 0:
# external xattr
xattr_inode = self.volume.get_inode(xattr.e_value_inum, InodeType.FILE)
if not self.volume.ignore_flags and (xattr_inode.inode.i_flags & ext4_inode.EXT4_EA_INODE_FL) != 0:
raise Ext4Error(
"Inode {value_indoe:d} associated with the extended attribute {xattr_name!r:s} of inode {inode:d} is not marked as large extended attribute value.".format(
inode=self.inode_idx,
value_inode=xattr_inode.inode_idx,
xattr_name=xattr_name
))
# TODO Use xattr_entry.e_value_size or xattr_inode.inode.i_size?
xattr_value = xattr_inode.open_read().read()
else:
# internal xattr
xattr_value = raw_data[
xattr_entry.e_value_offs + offset: xattr_entry.e_value_offs + offset + xattr_entry.e_value_size]
yield (xattr_name, xattr_value)
i += xattr_entry._size
def directory_entry_comparator(dir_a, dir_b):
file_name_a, _, file_type_a = dir_a
file_name_b, _, file_type_b = dir_b
if file_type_a == InodeType.DIRECTORY == file_type_b or file_type_a != InodeType.DIRECTORY != file_type_b:
tmp = wcscmp(file_name_a.lower(), file_name_b.lower())
return tmp if tmp != 0 else wcscmp(file_name_a, file_name_b)
else:
return -1 if file_type_a == InodeType.DIRECTORY else 1
directory_entry_key = functools.cmp_to_key(directory_entry_comparator)
def get_inode(self, *relative_path, decode_name=None):
if not self.is_dir:
raise Ext4Error("Inode {inode:d} is not a directory.".format(inode=self.inode_idx))
current_inode = self
for i, part in enumerate(relative_path):
if not self.volume.ignore_flags and not current_inode.is_dir:
current_path = "/".join(relative_path[:i])
raise Ext4Error("{current_path!r:s} (Inode {inode:d}) is not a directory.".format(
current_path=current_path,
inode=inode_idx
))
file_name, inode_idx, file_type = next(
filter(lambda entry: entry[0] == part, current_inode.open_dir(decode_name)), (None, None, None))
if inode_idx == None:
current_path = "/".join(relative_path[:i])
raise FileNotFoundError("{part!r:s} not found in {current_path!r:s} (Inode {inode:d}).".format(
current_path=current_path,
inode=current_inode.inode_idx,
part=part
))
current_inode = current_inode.volume.get_inode(inode_idx, file_type)
return current_inode
@property
def is_dir(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFDIR) != 0
else:
return self.file_type == InodeType.DIRECTORY
@property
def is_file(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFREG) != 0
else:
return self.file_type == InodeType.FILE
@property
def is_symlink(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFLNK) != 0
else:
return self.file_type == InodeType.SYMBOLIC_LINK
@property
def is_in_use(self):
group_idx, bitmap_bit = self.volume.get_inode_group(self.inode_idx)
inode_usage_bitmap_offset = self.volume.group_descriptors[group_idx].bg_inode_bitmap * self.volume.block_size
inode_usage_byte = self.volume.read(inode_usage_bitmap_offset + bitmap_bit // 8, 1)[0]
return ((inode_usage_byte >> (7 - bitmap_bit % 8)) & 1) != 0
@property
def mode_str(self):
special_flag = lambda letter, execute, special: {
(False, False): "-",
(False, True): letter.upper(),
(True, False): "x",
(True, True): letter.lower()
}[(execute, special)]
try:
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
device_type = {
ext4_inode.S_IFIFO: "p",
ext4_inode.S_IFCHR: "c",
ext4_inode.S_IFDIR: "d",
ext4_inode.S_IFBLK: "b",
ext4_inode.S_IFREG: "-",
ext4_inode.S_IFLNK: "l",
ext4_inode.S_IFSOCK: "s",
}[self.inode.i_mode & 0xF000]
else:
device_type = {
InodeType.FILE: "-",
InodeType.DIRECTORY: "d",
InodeType.CHARACTER_DEVICE: "c",
InodeType.BLOCK_DEVICE: "b",
InodeType.FIFO: "p",
InodeType.SOCKET: "s",
InodeType.SYMBOLIC_LINK: "l"
}[self.file_type]
except KeyError:
device_type = "?"
return "".join([
device_type,
"r" if (self.inode.i_mode & ext4_inode.S_IRUSR) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWUSR) != 0 else "-",
special_flag("s", (self.inode.i_mode & ext4_inode.S_IXUSR) != 0,
(self.inode.i_mode & ext4_inode.S_ISUID) != 0),
"r" if (self.inode.i_mode & ext4_inode.S_IRGRP) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWGRP) != 0 else "-",
special_flag("s", (self.inode.i_mode & ext4_inode.S_IXGRP) != 0,
(self.inode.i_mode & ext4_inode.S_ISGID) != 0),
"r" if (self.inode.i_mode & ext4_inode.S_IROTH) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWOTH) != 0 else "-",
special_flag("t", (self.inode.i_mode & ext4_inode.S_IXOTH) != 0,
(self.inode.i_mode & ext4_inode.S_ISVTX) != 0),
])
def open_dir(self, decode_name=None):
# Parse args
if decode_name == None:
decode_name = lambda raw: raw.decode("utf8")
if not self.volume.ignore_flags and not self.is_dir:
raise Ext4Error("Inode ({inode:d}) is not a directory.".format(inode=self.inode_idx))
# # Hash trees are compatible with linear arrays
| |
<gh_stars>1000+
#!/usr/bin/env python3
"""
GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Code generator for wrapping a C++ module with Pybind11
Author: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, line-too-long
import re
from pathlib import Path
import gtwrap.interface_parser as parser
import gtwrap.template_instantiator as instantiator
class PybindWrapper:
"""
Class to generate binding code for Pybind11 specifically.
"""
def __init__(self,
module_name,
top_module_namespaces='',
use_boost=False,
ignore_classes=(),
module_template=""):
self.module_name = module_name
self.top_module_namespaces = top_module_namespaces
self.use_boost = use_boost
self.ignore_classes = ignore_classes
self._serializing_classes = []
self.module_template = module_template
self.python_keywords = [
'lambda', 'False', 'def', 'if', 'raise', 'None', 'del', 'import',
'return', 'True', 'elif', 'in', 'try', 'and', 'else', 'is',
'while', 'as', 'except', 'lambda', 'with', 'assert', 'finally',
'nonlocal', 'yield', 'break', 'for', 'not', 'class', 'from', 'or',
'continue', 'global', 'pass'
]
# amount of indentation to add before each function/method declaration.
self.method_indent = '\n' + (' ' * 8)
def _py_args_names(self, args):
"""Set the argument names in Pybind11 format."""
names = args.names()
if names:
py_args = []
for arg in args.list():
if arg.default is not None:
default = ' = {arg.default}'.format(arg=arg)
else:
default = ''
argument = 'py::arg("{name}"){default}'.format(
name=arg.name, default='{0}'.format(default))
py_args.append(argument)
return ", " + ", ".join(py_args)
else:
return ''
def _method_args_signature(self, args):
"""Generate the argument types and names as per the method signature."""
cpp_types = args.to_cpp(self.use_boost)
names = args.names()
types_names = [
"{} {}".format(ctype, name)
for ctype, name in zip(cpp_types, names)
]
return ', '.join(types_names)
def wrap_ctors(self, my_class):
"""Wrap the constructors."""
res = ""
for ctor in my_class.ctors:
res += (
self.method_indent + '.def(py::init<{args_cpp_types}>()'
'{py_args_names})'.format(
args_cpp_types=", ".join(ctor.args.to_cpp(self.use_boost)),
py_args_names=self._py_args_names(ctor.args),
))
return res
def _wrap_method(self,
method,
cpp_class,
prefix,
suffix,
method_suffix=""):
py_method = method.name + method_suffix
cpp_method = method.to_cpp()
if cpp_method in ["serialize", "serializable"]:
if not cpp_class in self._serializing_classes:
self._serializing_classes.append(cpp_class)
serialize_method = self.method_indent + \
".def(\"serialize\", []({class_inst} self){{ return gtsam::serialize(*self); }})".format(class_inst=cpp_class + '*')
deserialize_method = self.method_indent + \
'.def("deserialize", []({class_inst} self, string serialized)' \
'{{ gtsam::deserialize(serialized, *self); }}, py::arg("serialized"))' \
.format(class_inst=cpp_class + '*')
return serialize_method + deserialize_method
if cpp_method == "pickle":
if not cpp_class in self._serializing_classes:
raise ValueError(
"Cannot pickle a class which is not serializable")
pickle_method = self.method_indent + \
".def(py::pickle({indent} [](const {cpp_class} &a){{ /* __getstate__: Returns a string that encodes the state of the object */ return py::make_tuple(gtsam::serialize(a)); }},{indent} [](py::tuple t){{ /* __setstate__ */ {cpp_class} obj; gtsam::deserialize(t[0].cast<std::string>(), obj); return obj; }}))"
return pickle_method.format(cpp_class=cpp_class,
indent=self.method_indent)
# Add underscore to disambiguate if the method name matches a python keyword
if py_method in self.python_keywords:
py_method = py_method + "_"
is_method = isinstance(method, instantiator.InstantiatedMethod)
is_static = isinstance(method, parser.StaticMethod)
return_void = method.return_type.is_void()
args_names = method.args.names()
py_args_names = self._py_args_names(method.args)
args_signature_with_names = self._method_args_signature(method.args)
caller = cpp_class + "::" if not is_method else "self->"
function_call = ('{opt_return} {caller}{method_name}'
'({args_names});'.format(
opt_return='return' if not return_void else '',
caller=caller,
method_name=cpp_method,
args_names=', '.join(args_names),
))
ret = ('{prefix}.{cdef}("{py_method}",'
'[]({opt_self}{opt_comma}{args_signature_with_names}){{'
'{function_call}'
'}}'
'{py_args_names}){suffix}'.format(
prefix=prefix,
cdef="def_static" if is_static else "def",
py_method=py_method,
opt_self="{cpp_class}* self".format(
cpp_class=cpp_class) if is_method else "",
opt_comma=', ' if is_method and args_names else '',
args_signature_with_names=args_signature_with_names,
function_call=function_call,
py_args_names=py_args_names,
suffix=suffix,
))
# Create __repr__ override
# We allow all arguments to .print() and let the compiler handle type mismatches.
if method.name == 'print':
# Redirect stdout - see pybind docs for why this is a good idea:
# https://pybind11.readthedocs.io/en/stable/advanced/pycpp/utilities.html#capturing-standard-output-from-ostream
ret = ret.replace(
'self->print',
'py::scoped_ostream_redirect output; self->print')
# Make __repr__() call .print() internally
ret += '''{prefix}.def("__repr__",
[](const {cpp_class}& self{opt_comma}{args_signature_with_names}){{
gtsam::RedirectCout redirect;
self.{method_name}({method_args});
return redirect.str();
}}{py_args_names}){suffix}'''.format(
prefix=prefix,
cpp_class=cpp_class,
opt_comma=', ' if args_names else '',
args_signature_with_names=args_signature_with_names,
method_name=method.name,
method_args=", ".join(args_names) if args_names else '',
py_args_names=py_args_names,
suffix=suffix)
return ret
def wrap_methods(self,
methods,
cpp_class,
prefix='\n' + ' ' * 8,
suffix=''):
"""
Wrap all the methods in the `cpp_class`.
"""
res = ""
for method in methods:
# To avoid type confusion for insert
if method.name == 'insert' and cpp_class == 'gtsam::Values':
name_list = method.args.names()
type_list = method.args.to_cpp(self.use_boost)
# inserting non-wrapped value types
if type_list[0].strip() == 'size_t':
method_suffix = '_' + name_list[1].strip()
res += self._wrap_method(method=method,
cpp_class=cpp_class,
prefix=prefix,
suffix=suffix,
method_suffix=method_suffix)
res += self._wrap_method(
method=method,
cpp_class=cpp_class,
prefix=prefix,
suffix=suffix,
)
return res
def wrap_variable(self,
namespace,
module_var,
variable,
prefix='\n' + ' ' * 8):
"""
Wrap a variable that's not part of a class (i.e. global)
"""
variable_value = ""
if variable.default is None:
variable_value = variable.name
else:
variable_value = variable.default
return '{prefix}{module_var}.attr("{variable_name}") = {namespace}{variable_value};'.format(
prefix=prefix,
module_var=module_var,
variable_name=variable.name,
namespace=namespace,
variable_value=variable_value)
def wrap_properties(self, properties, cpp_class, prefix='\n' + ' ' * 8):
"""Wrap all the properties in the `cpp_class`."""
res = ""
for prop in properties:
res += ('{prefix}.def_{property}("{property_name}", '
'&{cpp_class}::{property_name})'.format(
prefix=prefix,
property="readonly"
if prop.ctype.is_const else "readwrite",
cpp_class=cpp_class,
property_name=prop.name,
))
return res
def wrap_operators(self, operators, cpp_class, prefix='\n' + ' ' * 8):
"""Wrap all the overloaded operators in the `cpp_class`."""
res = ""
template = "{prefix}.def({{0}})".format(prefix=prefix)
for op in operators:
if op.operator == "[]": # __getitem__
res += "{prefix}.def(\"__getitem__\", &{cpp_class}::operator[])".format(
prefix=prefix, cpp_class=cpp_class)
elif op.operator == "()": # __call__
res += "{prefix}.def(\"__call__\", &{cpp_class}::operator())".format(
prefix=prefix, cpp_class=cpp_class)
elif op.is_unary:
res += template.format("{0}py::self".format(op.operator))
else:
res += template.format("py::self {0} py::self".format(
op.operator))
return res
def wrap_enum(self, enum, class_name='', module=None, prefix=' ' * 4):
"""
Wrap an enum.
Args:
enum: The parsed enum to wrap.
class_name: The class under which the enum is defined.
prefix: The amount of indentation.
"""
if module is None:
module = self._gen_module_var(enum.namespaces())
cpp_class = enum.cpp_typename().to_cpp()
if class_name:
# If class_name is provided, add that as the namespace
cpp_class = class_name + "::" + cpp_class
res = '{prefix}py::enum_<{cpp_class}>({module}, "{enum.name}", py::arithmetic())'.format(
prefix=prefix, module=module, enum=enum, cpp_class=cpp_class)
for enumerator in enum.enumerators:
res += '\n{prefix} .value("{enumerator.name}", {cpp_class}::{enumerator.name})'.format(
prefix=prefix, enumerator=enumerator, cpp_class=cpp_class)
res += ";\n\n"
return res
def wrap_enums(self, enums, instantiated_class, prefix=' ' * 4):
"""Wrap multiple enums defined in a class."""
cpp_class = instantiated_class.to_cpp()
module_var = instantiated_class.name.lower()
res = ''
for enum in enums:
res += "\n" + self.wrap_enum(
enum, class_name=cpp_class, module=module_var, prefix=prefix)
return res
def wrap_instantiated_class(
self, instantiated_class: instantiator.InstantiatedClass):
"""Wrap the class."""
module_var = self._gen_module_var(instantiated_class.namespaces())
cpp_class = instantiated_class.to_cpp()
if cpp_class in self.ignore_classes:
return ""
if instantiated_class.parent_class:
class_parent = "{instantiated_class.parent_class}, ".format(
instantiated_class=instantiated_class)
else:
class_parent = ''
if instantiated_class.enums:
# If class has enums, define an instance and set module_var to the instance
instance_name = instantiated_class.name.lower()
class_declaration = (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>> '
'{instance_name}({module_var}, "{class_name}");'
'\n {instance_name}').format(
shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_class.name,
class_parent=class_parent,
instance_name=instance_name,
module_var=module_var)
module_var = instance_name
else:
class_declaration = (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
).format(shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_class.name,
class_parent=class_parent,
module_var=module_var)
return ('{class_declaration}'
'{wrapped_ctors}'
'{wrapped_methods}'
'{wrapped_static_methods}'
'{wrapped_properties}'
'{wrapped_operators};\n'.format(
class_declaration=class_declaration,
wrapped_ctors=self.wrap_ctors(instantiated_class),
wrapped_methods=self.wrap_methods(
instantiated_class.methods, cpp_class),
wrapped_static_methods=self.wrap_methods(
instantiated_class.static_methods, cpp_class),
wrapped_properties=self.wrap_properties(
instantiated_class.properties, cpp_class),
wrapped_operators=self.wrap_operators(
instantiated_class.operators, cpp_class)))
def wrap_instantiated_declaration(
self, instantiated_decl: instantiator.InstantiatedDeclaration):
"""Wrap the class."""
module_var = self._gen_module_var(instantiated_decl.namespaces())
cpp_class = instantiated_decl.to_cpp()
if cpp_class in self.ignore_classes:
return ""
res = (
'\n py::class_<{cpp_class}, '
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
).format(shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_decl.name,
module_var=module_var)
return res
def wrap_stl_class(self, stl_class):
"""Wrap STL containers."""
module_var = self._gen_module_var(stl_class.namespaces())
cpp_class = stl_class.to_cpp()
if cpp_class in self.ignore_classes:
return ""
return (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
'{wrapped_ctors}'
'{wrapped_methods}'
'{wrapped_static_methods}'
'{wrapped_properties};\n'.format(
shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=stl_class.name,
class_parent=str(stl_class.parent_class) +
(', ' if stl_class.parent_class else ''),
module_var=module_var,
wrapped_ctors=self.wrap_ctors(stl_class),
wrapped_methods=self.wrap_methods(stl_class.methods,
cpp_class),
wrapped_static_methods=self.wrap_methods(
stl_class.static_methods, cpp_class),
wrapped_properties=self.wrap_properties(
stl_class.properties, cpp_class),
))
def wrap_functions(self,
functions,
namespace,
prefix='\n' + ' ' * 8,
suffix=''):
"""
Wrap all the global functions.
"""
res = ""
for function in functions:
function_name = function.name
# Add underscore to disambiguate if the function name matches a python keyword
python_keywords = self.python_keywords + ['print']
if function_name in python_keywords:
function_name = function_name + "_"
cpp_method = function.to_cpp()
is_static = isinstance(function, parser.StaticMethod)
return_void = function.return_type.is_void()
args_names = function.args.names()
py_args_names = self._py_args_names(function.args)
args_signature = self._method_args_signature(function.args)
caller = namespace + "::"
function_call = ('{opt_return} {caller}{function_name}'
'({args_names});'.format(
opt_return='return'
if not return_void else '',
caller=caller,
function_name=cpp_method,
args_names=', '.join(args_names),
))
ret = ('{prefix}.{cdef}("{function_name}",'
'[]({args_signature}){{'
'{function_call}'
'}}'
'{py_args_names}){suffix}'.format(
prefix=prefix,
cdef="def_static" if is_static else "def",
function_name=function_name,
args_signature=args_signature,
function_call=function_call,
py_args_names=py_args_names,
suffix=suffix))
res += ret
return res
def _partial_match(self, namespaces1, namespaces2):
for i in range(min(len(namespaces1), len(namespaces2))):
if namespaces1[i] != namespaces2[i]:
return False
return True
def _gen_module_var(self, namespaces):
"""Get the Pybind11 module name from the namespaces."""
# We skip the first | |
import itertools
import math
from . import vector_tile_pb2
# Constants
## Complex Value Type
CV_TYPE_STRING = 0
CV_TYPE_FLOAT = 1
CV_TYPE_DOUBLE = 2
CV_TYPE_UINT = 3
CV_TYPE_SINT = 4
CV_TYPE_INLINE_UINT = 5
CV_TYPE_INLINE_SINT = 6
CV_TYPE_BOOL_NULL = 7
CV_TYPE_LIST = 8
CV_TYPE_MAP = 9
CV_TYPE_LIST_DOUBLE = 10
## Complex Value Bool/Null Meaning
CV_NULL = 0
CV_BOOL_FALSE = 1
CV_BOOL_TRUE = 2
DEFAULT_SPLINE_DEGREE = 2
# Python3 Compatability
try:
unicode
other_str = unicode
except NameError:
other_str = bytes
long = int
def zig_zag_encode(val):
return (int(val) << 1) ^ (int(val) >> 31)
def zig_zag_encode_64(val):
return (int(val) << 1) ^ (int(val) >> 63)
def zig_zag_decode(val):
return ((val >> 1) ^ (-(val & 1)))
def command_integer(cmd_id, count):
return (cmd_id & 0x7) | (count << 3);
def command_move_to(count):
return command_integer(1, count)
def command_line_to(count):
return command_integer(2, count)
def command_close_path():
return command_integer(7,1)
def get_command_id(command_integer):
return command_integer & 0x7;
def get_command_count(command_integer):
return command_integer >> 3
def next_command_move_to(command_integer):
return get_command_id(command_integer) == 1
def next_command_line_to(command_integer):
return get_command_id(command_integer) == 2
def next_command_close_path(command_integer):
return get_command_id(command_integer) == 7
def get_inline_value_id(complex_value):
return complex_value & 0x0F;
def get_inline_value_parameter(complex_value):
return complex_value >> 4;
def complex_value_integer(cmd_id, param):
return (cmd_id & 0x0F) | (param << 4);
class Float(float):
def __new__(self, *args, **kwargs):
x = float(*args, **kwargs)
vm = vector_tile_pb2.Tile.Value()
vm.float_value = x
return float.__new__(self, vm.float_value)
def __init__(self, *args, **kwargs):
float.__init__(*args, **kwargs)
class UInt(long):
def __new__(self, *args, **kwargs):
return long.__new__(self, *args, **kwargs)
def __init__(self, *args, **kwargs):
long.__init__(*args, **kwargs)
def scaling_calculation(precision, min_float, max_float):
if min_float >= max_float:
raise Exception("Invalid Float Range")
if precision > (max_float - min_float):
raise Exception("Precision value too large for range")
if precision < 0:
raise Exception("Precision can not be a negative value")
lbits = math.ceil(math.log((max_float - min_float) / precision, 2) + 1.0)
#lbytes = int(math.ceil(lbits / 8.0))
bPow = int(math.ceil(math.log(max_float - min_float, 2)))
#dPow = 8*lbytes - 1
dPow = lbits - 1
sF = pow(2.0, (dPow - bPow))
sR = pow(2.0, (bPow - dPow))
return {'sF': sF, 'sR': sR, 'base': min_float }
class FloatList(list):
def __init__(self, *args, **kwargs):
if len(args) < 0:
raise Exception("FloatList initialization requires first argument to be Scaling object")
if isinstance(args[0], FloatList):
self._scaling = args[0]._scaling
elif isinstance(args[0], Scaling):
self._scaling = args[0]
args = tuple(args[1:])
else:
raise Exception("Unknown object passed to FloatList, first argument must be a Scaling object")
if isinstance(args[0], list):
new_list = []
for v in args[0]:
if v is None:
new_list.append(v)
elif isinstance(v, float):
new_list.append(self._scaling.encode_value(v))
elif isinstance(v, int) or isinstance(v, long):
new_list.append(self._scaling.encode_value(float(v)))
new_args = [new_list]
new_args.extend(args[1:])
args = tuple(new_args)
super(FloatList, self).__init__(*args, **kwargs)
def append_value(self, value):
if value is None:
self.append(None)
else:
self.append(self._scaling.encode_value(value))
def get_value_at(self, index):
if self[index] is None:
return self[index]
return self._scaling.decode_value(self[index])
def set_value_at(self, index, value):
if value is None:
self[index] = None
else:
self[index] = self._scaling.encode_value(value)
def get_all_values(self):
vals = []
for v in self:
if v is None:
vals.append(None)
else:
vals.append(self._scaling.decode_value(v))
return vals
@property
def index(self):
return self._scaling.index
class FeatureAttributes(object):
def __init__(self, feature, layer, is_geometric=False):
self._feature = feature
self._layer = layer
self._attr = {}
self._attr_current = False
self._is_geometric = is_geometric
def _encode_attr(self):
if self._layer._inline_attributes:
if self._is_geometric:
self._feature.geometric_attributes[:] = self._layer.add_attributes(self._attr, True)
else:
self._feature.attributes[:] = self._layer.add_attributes(self._attr, False)
else:
self._feature.tags[:] = self._layer.add_attributes(self._attr)
self._attr_current = True
def _decode_attr(self):
if not self._attr_current:
if self._layer._inline_attributes:
if self._is_geometric:
if len(self._feature.geometric_attributes) == 0:
self._attr = {}
else:
self._attr = self._layer.get_attributes(self._feature.geometric_attributes, True)
else:
if len(self._feature.attributes) == 0:
self._attr = {}
else:
self._attr = self._layer.get_attributes(self._feature.attributes)
else:
if len(self._feature.tags) == 0:
self._attr = {}
else:
self._attr = self._layer.get_attributes(self._feature.tags)
self._attr_current = True
def __len__(self):
self._decode_attr()
return len(self._attr)
def __getitem__(self, key):
self._decode_attr()
if not isinstance(key, str) and not isinstance(key, other_str):
raise TypeError("Keys must be of type str")
return self._attr[key]
def __delitem__(self, key):
self._decode_attr()
del self._attr[key]
self._encode_attr()
def __setitem__(self, key, value):
if not isinstance(key, str) and not isinstance(key, other_str):
raise TypeError("Keys must be of type str or other_str")
self._decode_attr()
self._attr[key] = value
self._encode_attr()
def __iter__(self):
self._decode_attr()
return self._attr.__iter__()
def __eq__(self, other):
self._decode_attr()
if isinstance(other, dict):
return self._attr == other
elif isinstance(other, FeatureAttributes):
other._decode_attr()
return self._attr == other._attr
return False
def __str__(self):
self._decode_attr()
return self._attr.__str__()
def __contains__(self, key):
self._decode_attr()
return self._attr.__contains__(key)
def set(self, attr):
self._attr = dict(attr)
self._encode_attr()
class Feature(object):
def __init__(self, feature, layer, has_elevation=None):
self._feature = feature
self._layer = layer
if has_elevation is None:
if len(self._feature.elevation) != 0:
self._has_elevation = True
else:
self._has_elevation = False
else:
if has_elevation and self._layer.version < 3:
raise Exception("Layers of version 1 or 2 can not have elevation data in features")
self._has_elevation = has_elevation
self._reset_cursor()
self._attributes = FeatureAttributes(feature, layer, is_geometric=False)
if self._layer._inline_attributes:
self._geometric_attributes = FeatureAttributes(feature, layer, is_geometric=True)
else:
self._geometric_attributes = {}
def _reset_cursor(self):
self.cursor = []
if self._has_elevation:
self.cursor[:3] = itertools.repeat(0, 3)
else:
self.cursor[:2] = itertools.repeat(0, 2)
self._cursor_at_end = False
def _encode_point(self, pt, cmd_list, elevation_list):
cmd_list.append(zig_zag_encode(int(pt[0]) - self.cursor[0]))
cmd_list.append(zig_zag_encode(int(pt[1]) - self.cursor[1]))
self.cursor[0] = int(pt[0])
self.cursor[1] = int(pt[1])
if self._has_elevation:
if self._layer._elevation_scaling is None:
elevation_list.append(int(pt[2]) - self.cursor[2])
self.cursor[2] = int(pt[2])
else:
new_pt = self._layer._elevation_scaling.encode_value(pt[2])
elevation_list.append(new_pt - self.cursor[2])
self.cursor[2] = new_pt
def _decode_point(self, integers):
self.cursor[0] = self.cursor[0] + zig_zag_decode(integers[0])
self.cursor[1] = self.cursor[1] + zig_zag_decode(integers[1])
out = [self.cursor[0], self.cursor[1]]
if len(integers) > 2:
self.cursor[2] = self.cursor[2] + integers[2]
if self._layer._elevation_scaling is None:
out.append(self.cursor[2])
else:
out.append(self._layer._elevation_scaling.decode_value(self.cursor[2]))
return out
def _points_equal(self, pt1, pt2):
if pt1[0] is not pt2[0] or pt1[1] is not pt2[1] or (self._has_elevation and pt1[2] is not pt2[2]):
return False
return True
@property
def has_elevation(self):
return self._has_elevation
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, attrs):
self._attributes.set(attrs)
@property
def geometric_attributes(self):
return self._geometric_attributes
@geometric_attributes.setter
def geometric_attributes(self, attrs):
if not self._layer._inline_attributes:
raise Exception("Can not set geometric attributes for none inline attributes configured layer.")
self._geometric_attributes.set(attrs)
@property
def id(self):
if self._feature.HasField('id'):
return self._feature.id;
elif self._feature.HasField('string_id'):
return self._feature.string_id;
return None
@id.setter
def id(self, id_val):
if isinstance(id_val, int):
self._feature.id = id_val
if self._feature.HasField('string_id'):
self._feature.ClearField('string_id')
elif self._layer.version >= 3:
self._feature.string_id = id_val
if self._feature.HasField('id'):
self._feature.ClearField('id')
else:
raise Exception("Can not set string id for features using version 2 or below of the VT specification")
def clear_geometry(self):
self.has_geometry = False
self._reset_cursor()
self._feature.ClearField('geometry')
self._feature.ClearField('elevation')
class PointFeature(Feature):
def __init__(self, feature, layer, has_elevation=None):
super(PointFeature, self).__init__(feature, layer, has_elevation)
if feature.type is not vector_tile_pb2.Tile.POINT:
feature.type = vector_tile_pb2.Tile.POINT
self.type = 'point'
self._num_points = 0
def add_points(self, points):
if not isinstance(points, list):
raise Exception("Invalid point geometry")
if not self._cursor_at_end:
# Use geometry retrieval process to move cursor to proper position
pts = self.get_points()
self._num_points = len(pts)
if len(points) < 1:
return
multi_point = isinstance(points[0], list)
if multi_point:
num_commands = len(points)
else:
num_commands = 1
cmd_list = []
if self._has_elevation:
elevation_list = []
else:
elevation_list = None
if self._num_points == 0:
cmd_list.append(command_move_to(num_commands))
try:
if multi_point:
for i in range(num_commands):
self._encode_point(points[i], cmd_list, elevation_list)
else:
self._encode_point(points, cmd_list, elevation_list)
except Exception as e:
self._reset_cursor()
raise e
if self._num_points != 0:
self._num_points = self._num_points + num_commands
self._feature.geometry[0] = command_move_to(self._num_points)
self._feature.geometry.extend(cmd_list)
if elevation_list:
try:
self._feature.elevation.extend(elevation_list)
except ValueError:
raise Exception("Elevation scaling results in value outside of value range of sint32, reduce elevation scaling precision.")
def get_points(self, no_elevation=False):
points = []
self._reset_cursor()
geom = iter(self._feature.geometry)
if self.has_elevation and not no_elevation:
elevation = iter(self._feature.elevation)
try:
current_command = next(geom)
while next_command_move_to(current_command):
for i in range(get_command_count(current_command)):
if self.has_elevation and not no_elevation:
points.append(self._decode_point([next(geom), next(geom), next(elevation)]))
else:
points.append(self._decode_point([next(geom), next(geom)]))
current_command = next(geom)
except StopIteration:
pass
self._cursor_at_end = True
return points
def get_geometry(self, no_elevation = False):
return self.get_points(no_elevation)
class LineStringFeature(Feature):
def __init__(self, feature, layer, has_elevation=None):
super(LineStringFeature, self).__init__(feature, layer, has_elevation)
if feature.type is not vector_tile_pb2.Tile.LINESTRING:
feature.type = vector_tile_pb2.Tile.LINESTRING
self.type = 'line_string'
def add_line_string(self, linestring):
num_commands = len(linestring)
if num_commands < 2:
raise Exception("Error adding linestring, less then 2 points provided")
if not self._cursor_at_end:
# Use geometry retrieval process to move cursor to proper position
self.get_line_strings()
if self._has_elevation:
elevation_list = []
else:
elevation_list = None
try:
cmd_list = []
cmd_list.append(command_move_to(1))
self._encode_point(linestring[0], cmd_list, elevation_list)
cmd_list.append(command_line_to(num_commands - 1))
for i in range(1, num_commands):
self._encode_point(linestring[i], cmd_list, elevation_list)
except Exception as e:
self._reset_cursor()
raise e
self._feature.geometry.extend(cmd_list)
if elevation_list:
try:
self._feature.elevation.extend(elevation_list)
except ValueError:
raise Exception("Elevation scaling results in value outside of value range of sint32, reduce elevation scaling precision.")
def get_line_strings(self, no_elevation=False):
line_strings = []
line_string = []
self._reset_cursor()
geom = iter(self._feature.geometry)
if self.has_elevation and not no_elevation:
elevation = iter(self._feature.elevation)
try:
current_command = next(geom)
while next_command_move_to(current_command):
line_string = []
if get_command_count(current_command) != 1:
raise Exception("Command move_to has command count not equal to 1 in a line string")
if self.has_elevation and not no_elevation:
line_string.append(self._decode_point([next(geom), next(geom), next(elevation)]))
else:
line_string.append(self._decode_point([next(geom), next(geom)]))
current_command = next(geom)
if | |
<filename>pydrive2/test/test_file.py
# -*- coding: utf-8 -*-
import filecmp
import os
import unittest
import pytest
import sys
from io import BytesIO
from tempfile import mkdtemp
from time import time
from six.moves import range
import timeout_decorator
from concurrent.futures import ThreadPoolExecutor, as_completed
from googleapiclient import errors
from pydrive2.auth import GoogleAuth
from pydrive2.drive import GoogleDrive
from pydrive2.files import ApiRequestError, GoogleDriveFile
from pydrive2.test import test_util
from pydrive2.test.test_util import (
pydrive_retry,
setup_credentials,
create_file,
delete_dir,
delete_file,
settings_file_path,
)
class GoogleDriveFileTest(unittest.TestCase):
"""Tests basic file operations of files.GoogleDriveFile.
Upload and download of contents and metadata, and thread-safety checks.
Equivalent to Files.insert, Files.update, Files.patch in Google Drive API.
"""
@classmethod
def setup_class(cls):
setup_credentials()
cls.tmpdir = mkdtemp()
cls.ga = GoogleAuth(
settings_file_path("default.yaml", os.path.join(cls.tmpdir, ""))
)
cls.ga.ServiceAuth()
@classmethod
def tearDownClass(cls):
delete_dir(cls.tmpdir)
@classmethod
def getTempFile(cls, prefix="", content=""):
filename = os.path.join(cls.tmpdir, prefix + str(time()))
if content:
create_file(filename, content)
return filename
def test_01_Files_Insert(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("firsttestfile")
file1["title"] = filename
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
self.assertEqual(file2["title"], filename)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_02_Files_Insert_Unicode(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile(u"첫번째 파일")
file1["title"] = filename
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
self.assertEqual(file2["title"], filename)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_03_Files_Insert_Content_String(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("secondtestfile")
content = "hello world!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.GetContentString(), content)
pydrive_retry(
file1.FetchContent
) # Force download and double check content
self.assertEqual(file1.metadata["title"], filename)
self.assertEqual(file1.GetContentString(), content)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
pydrive_retry(file2.FetchContent)
self.assertEqual(file2.GetContentString(), content)
self.assertEqual(file2.metadata["title"], filename)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_04_Files_Insert_Content_Unicode_String(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile(u"두번째 파일")
content = u"안녕 세상아!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.GetContentString(), content)
self.assertEqual(file1.metadata["title"], filename)
pydrive_retry(
file1.FetchContent
) # Force download and double check content.
self.assertEqual(file1.GetContentString(), content)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
pydrive_retry(file2.FetchContent)
self.assertEqual(file2.GetContentString(), content)
self.assertEqual(file2.metadata["title"], filename)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_05_Files_Insert_Content_File(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("filecontent")
file1["title"] = filename
contentFile = self.getTempFile("actual_content", "some string")
file1.SetContentFile(contentFile)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
pydrive_retry(
file1.FetchContent
) # Force download and double check content.
fileOut = self.getTempFile()
pydrive_retry(file1.GetContentFile, fileOut)
self.assertEqual(filecmp.cmp(contentFile, fileOut), True)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
fileOut = self.getTempFile()
pydrive_retry(file2.GetContentFile, fileOut)
self.assertEqual(filecmp.cmp(contentFile, fileOut), True)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_06_Files_Patch(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("prepatchtestfile")
newfilename = self.getTempFile("patchtestfile")
file1["title"] = filename
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
file1["title"] = newfilename
pydrive_retry(file1.Upload) # Files.patch
self.assertEqual(file1.metadata["title"], newfilename)
file2 = drive.CreateFile({"id": file1["id"]}) # Download file from id.
pydrive_retry(file2.FetchMetadata)
self.assertEqual(file2.metadata["title"], newfilename)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_07_Files_Patch_Skipping_Content(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("prepatchtestfile")
newfilename = self.getTempFile("patchtestfile")
content = "hello world!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
file1["title"] = newfilename
pydrive_retry(file1.Upload) # Files.patch
self.assertEqual(file1.metadata["title"], newfilename)
self.assertEqual(file1.GetContentString(), content)
self.assertEqual(file1.GetContentString(), content)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_08_Files_Update_String(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("preupdatetestfile")
newfilename = self.getTempFile("updatetestfile")
content = "hello world!"
newcontent = "hello new world!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
self.assertEqual(file1.GetContentString(), content)
pydrive_retry(
file1.FetchContent
) # Force download and double check content.
self.assertEqual(file1.GetContentString(), content)
file1["title"] = newfilename
file1.SetContentString(newcontent)
pydrive_retry(file1.Upload) # Files.update
self.assertEqual(file1.metadata["title"], newfilename)
self.assertEqual(file1.GetContentString(), newcontent)
self.assertEqual(file1.GetContentString(), newcontent)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_09_Files_Update_File(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("preupdatetestfile")
newfilename = self.getTempFile("updatetestfile")
contentFile = self.getTempFile("actual_content", "some string")
contentFile2 = self.getTempFile("actual_content_2", "some string")
file1["title"] = filename
file1.SetContentFile(contentFile)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
pydrive_retry(
file1.FetchContent
) # Force download and double check content.
fileOut = self.getTempFile()
pydrive_retry(file1.GetContentFile, fileOut)
self.assertEqual(filecmp.cmp(contentFile, fileOut), True)
file1["title"] = newfilename
file1.SetContentFile(contentFile2)
pydrive_retry(file1.Upload) # Files.update
self.assertEqual(file1.metadata["title"], newfilename)
fileOut = self.getTempFile()
pydrive_retry(file1.GetContentFile, fileOut)
self.assertEqual(filecmp.cmp(contentFile2, fileOut), True)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_10_Files_Download_Service(self):
"""
Tests that a fresh GoogleDrive object can correctly authenticate
and download from a file ID.
"""
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile("prepatchtestfile")
content = "hello world!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
self.assertEqual(file1.metadata["title"], filename)
fileOut1 = self.getTempFile()
pydrive_retry(file1.GetContentFile, fileOut1)
# fresh download-only instance
auth = GoogleAuth(
settings_file_path("default.yaml", os.path.join(self.tmpdir, ""))
)
auth.ServiceAuth()
drive2 = GoogleDrive(auth)
file2 = drive2.CreateFile({"id": file1["id"]})
fileOut2 = self.getTempFile()
pydrive_retry(file2.GetContentFile, fileOut2)
self.assertEqual(filecmp.cmp(fileOut1, fileOut2), True)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_11_Files_Get_Content_Buffer(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
filename = self.getTempFile()
content = "hello world!\ngoodbye, cruel world!"
file1["title"] = filename
file1.SetContentString(content)
pydrive_retry(file1.Upload) # Files.insert
buffer1 = pydrive_retry(file1.GetContentIOBuffer)
self.assertEqual(file1.metadata["title"], filename)
self.assertEqual(len(buffer1), len(content))
self.assertEqual(b"".join(iter(buffer1)).decode("ascii"), content)
buffer2 = pydrive_retry(file1.GetContentIOBuffer, encoding="ascii")
self.assertEqual(len(buffer2), len(content))
self.assertEqual("".join(iter(buffer2)), content)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_12_Upload_Download_Empty_File(self):
filename = os.path.join(self.tmpdir, str(time()))
create_file(filename, "")
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
file1.SetContentFile(filename)
pydrive_retry(file1.Upload)
fileOut1 = self.getTempFile()
pydrive_retry(file1.GetContentFile, fileOut1)
self.assertEqual(os.path.getsize(fileOut1), 0)
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_13_Upload_Download_Empty_String(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
file1.SetContentString("")
pydrive_retry(file1.Upload)
self.assertEqual(pydrive_retry(file1.GetContentString), "")
# Force download and double check content
pydrive_retry(file1.FetchContent)
self.assertEqual(file1.GetContentString(), "")
# Download file from id
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.FetchContent)
self.assertEqual(file2.GetContentString(), "")
self.DeleteUploadedFiles(drive, [file1["id"]])
# Tests for Trash/UnTrash/Delete.
# ===============================
def test_Files_Trash_File(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
self.assertFalse(file1.metadata[u"labels"][u"trashed"])
# Download to verify non-trashed state on GDrive.
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.FetchMetadata)
self.assertFalse(file2.metadata[u"labels"][u"trashed"])
pydrive_retry(file1.Trash)
self.assertTrue(file1.metadata[u"labels"][u"trashed"])
pydrive_retry(file2.FetchMetadata)
self.assertTrue(file2.metadata[u"labels"][u"trashed"])
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_Files_Trash_File_Just_ID(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
self.assertFalse(file1.metadata[u"labels"][u"trashed"])
# Trash file by ID.
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.Trash)
# Verify trashed by downloading metadata.
pydrive_retry(file1.FetchMetadata)
self.assertTrue(file1.metadata[u"labels"][u"trashed"])
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_Files_UnTrash_File(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
pydrive_retry(file1.Trash)
self.assertTrue(file1.metadata[u"labels"][u"trashed"])
# Verify that file is trashed by downloading metadata.
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.FetchMetadata)
self.assertTrue(file2.metadata[u"labels"][u"trashed"])
# Un-trash the file, and assert local metadata is updated correctly.
pydrive_retry(file1.UnTrash)
self.assertFalse(file1.metadata[u"labels"][u"trashed"])
# Re-fetch the metadata, and assert file un-trashed on GDrive.
pydrive_retry(file2.FetchMetadata)
self.assertFalse(file2.metadata[u"labels"][u"trashed"])
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_Files_UnTrash_File_Just_ID(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
pydrive_retry(file1.Trash)
self.assertTrue(file1.metadata[u"labels"][u"trashed"])
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.UnTrash) # UnTrash without fetching metadata.
pydrive_retry(file1.FetchMetadata)
self.assertFalse(file1.metadata[u"labels"][u"trashed"])
self.DeleteUploadedFiles(drive, [file1["id"]])
def test_Files_Delete_File(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file1.Delete)
try:
pydrive_retry(file2.FetchMetadata)
self.fail("File not deleted correctly.")
except ApiRequestError:
pass
def test_Files_Delete_File_Just_ID(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
file2 = drive.CreateFile({"id": file1["id"]})
pydrive_retry(file2.Delete)
try:
pydrive_retry(file1.FetchMetadata)
self.fail("File not deleted correctly.")
except ApiRequestError:
pass
# Tests for Permissions.
# ======================
def test_Files_FetchMetadata_Fields(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
self.assertFalse("permissions" in file1)
pydrive_retry(file1.FetchMetadata, "permissions")
self.assertTrue("permissions" in file1)
pydrive_retry(file1.Delete)
def test_Files_FetchAllMetadata_Fields(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
pydrive_retry(file1.FetchMetadata, fetch_all=True)
self.assertTrue("hasThumbnail" in file1)
self.assertTrue("thumbnailVersion" in file1)
self.assertTrue("permissions" in file1)
pydrive_retry(file1.Delete)
def test_Files_Insert_Permission(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
# Verify only one permission before inserting permission.
permissions = pydrive_retry(file1.GetPermissions)
self.assertEqual(len(permissions), 1)
self.assertEqual(len(file1["permissions"]), 1)
# Insert the permission.
permission = pydrive_retry(
file1.InsertPermission,
{"type": "anyone", "value": "anyone", "role": "reader"},
)
self.assertTrue(permission)
self.assertEqual(len(file1["permissions"]), 2)
self.assertEqual(file1["permissions"][0]["type"], "anyone")
permissions = pydrive_retry(file1.GetPermissions)
self.assertEqual(len(file1["permissions"]), 2)
self.assertEqual(file1["permissions"][0]["type"], "anyone")
self.assertEqual(permissions[0]["type"], "anyone")
# Verify remote changes made.
file2 = drive.CreateFile({"id": file1["id"]})
permissions = pydrive_retry(file2.GetPermissions)
self.assertEqual(len(permissions), 2)
self.assertEqual(permissions[0]["type"], "anyone")
pydrive_retry(file1.Delete)
def test_Files_Get_Permissions(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
self.assertFalse("permissions" in file1)
permissions = pydrive_retry(file1.GetPermissions)
self.assertTrue(permissions is not None)
self.assertTrue("permissions" in file1)
pydrive_retry(file1.Delete)
def test_Files_Delete_Permission(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
pydrive_retry(
file1.InsertPermission,
{"type": "anyone", "value": "anyone", "role": "reader"},
)
permissions = pydrive_retry(file1.GetPermissions)
self.assertEqual(len(permissions), 2)
self.assertEqual(len(file1["permissions"]), 2)
pydrive_retry(file1.DeletePermission, permissions[0]["id"])
self.assertEqual(len(file1["permissions"]), 1)
# Verify remote changes made.
file2 = drive.CreateFile({"id": file1["id"]})
permissions = pydrive_retry(file2.GetPermissions)
self.assertEqual(len(permissions), 1)
pydrive_retry(file1.Delete)
def test_Files_Delete_Permission_Invalid(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
pydrive_retry(file1.Upload)
try:
pydrive_retry(file1.DeletePermission, "invalid id")
self.fail("Deleting invalid permission not raising exception.")
except ApiRequestError:
pass
pydrive_retry(file1.Delete)
def test_ApiRequestError_HttpError_Propagation(self):
file = GoogleDrive(self.ga).CreateFile()
pydrive_retry(file.Upload)
try:
pydrive_retry(file.DeletePermission, "invalid id")
self.fail("Deleting invalid permission not raising exception.")
except ApiRequestError as exc:
self.assertTrue(
exc.args and isinstance(exc.args[0], errors.HttpError)
)
self.assertTrue(exc.error is not None)
# Validating for HttpError 404 "Permission not found: invalid id"
self.assertTrue(exc.error["code"] == 404)
finally:
pydrive_retry(file.Delete)
def test_GFile_Conversion_Lossless_String(self):
drive = GoogleDrive(self.ga)
file1 = drive.CreateFile()
# Upload a string, and convert into Google Doc format.
test_string = "Generic, non-exhaustive ASCII test string."
file1.SetContentString(test_string)
pydrive_retry(file1.Upload, {"convert": True})
# Download string as plain text.
downloaded_string = file1.GetContentString(mimetype="text/plain")
self.assertEqual(
test_string, downloaded_string, "Strings do not match"
)
# Download content into file and ensure that file content matches original
# content string.
downloaded_file_name = "_tmp_downloaded_file_name.txt"
pydrive_retry(
file1.GetContentFile,
downloaded_file_name,
mimetype="text/plain",
remove_bom=True,
)
downloaded_string = open(downloaded_file_name).read()
self.assertEqual(
test_string, downloaded_string, "Strings do not match"
)
# Delete temp file.
delete_file(downloaded_file_name)
# Tests for GDrive conversion.
# ============================
def | |
Returns correctPercent as a float"""
correctPercent = correctCountVar / self.iterationNum * 100
return correctPercent
def updateScoreStrVar(self, scorePercent):
"""Takes a percent value and converts it to a string rounded to 2 decimal places.
Accepts scorePercent as a float
Returns tempScoreString as a string."""
if scorePercent > 100: # deal with edge cases
tempScoreString = '100.00%'
elif scorePercent <= 0: # deal with edge cases
tempScoreString = '0.00%'
else:
tempScoreString = str(round(scorePercent, 2)) + '%'
return tempScoreString
def updateScoreBarPixel(self, scorePercent):
"""Takes a percent value and calculates the width of a score bar for tkinter canvas rectangle (for cpu score bar)
Accepts scorePercent as a float
Returns tempScoreWidth as an integer."""
if scorePercent > 100: # deal with edge cases
tempScoreWidth = 410
elif scorePercent < 0.5: # deal with edge cases
tempScoreWidth = 5
else:
tempScoreWidth = round(scorePercent / 100 * 410)
return tempScoreWidth
def updateScoreBarPosition(self, scorePercent, oldScorePercent):
"""Calculates x positions for tkinter canvas rectangles based on changes in score percent (for user score bar)
Accepts scorePercent as a float
oldScorePercent as a float
Returns scoreBlackBarX2Pos as an integer
scoreColourBarX2Pos as an integer
significantChange as a boolean"""
if scorePercent > 100: # deal with edge cases
scoreWidthDefault = 410
elif scorePercent < (100 / 410): # deal with edge cases
scoreWidthDefault = 1
else:
scoreWidthDefault = round(scorePercent / 100 * 410)
if abs(scorePercent - oldScorePercent) >= (100 / 410): # check for change in percent between old and new percent
significantChange = True
if scorePercent > oldScorePercent: # check if score has increased/decreased
scoreBlackBarX2Pos = round(oldScorePercent / 100 * 410)
scoreColourBarX2Pos = round(scorePercent / 100 * 410)
else:
scoreBlackBarX2Pos = round(scorePercent / 100 * 410)
scoreColourBarX2Pos = round(oldScorePercent / 100 * 410)
else: # assigns x-position values if score percentage stays at 100% or 0%
significantChange = False
scoreBlackBarX2Pos = scoreWidthDefault
scoreColourBarX2Pos = scoreWidthDefault + 1
return scoreBlackBarX2Pos, scoreColourBarX2Pos, significantChange
def incrementHexVal(self, hexStr, increment):
"""Changes an input hex value by a step value of increment
Accepts hexStr as a string in format '#FFFFFF'
increment as an integer
Returns newHexStr as a string in format '#FFFFFF'"""
# deconstruct hex to red green blue
rgbVal = [''] * 3
rgbVal[0] = hexStr[1:3] # red
rgbVal[1] = hexStr[3:5] # green
rgbVal[2] = hexStr[5:] # blue
for i in range(3):
if rgbVal[i] == '00': # check if value already 0
continue
# convert hex to int
rgbVal[i] = int(rgbVal[i], 16)
if rgbVal[i] < 26: # verify valid rgb value
rgbVal[i] = '00'
elif rgbVal[i] > 255:
rgbVal[i] = 'ff'
else:
rgbVal[i] += increment
rgbVal[i] = hex(rgbVal[i])
rgbVal[i] = rgbVal[i][2:] # remove the '0x' prefix in returned hex() method string
newHexStr = '#' + rgbVal[0] + rgbVal[1] + rgbVal[2]
return newHexStr
def cosineHexVal(self, hexStr, radians):
"""Multiplies a normalized hex color value with a cosine function.
Accepts hexStr as a string in format '#FFFFFF'
radians as a float
Returns newHexStr as a string in format '#FFFFFF'"""
# deconstruct hex to red green blue
rgbValue = [''] * 3
rgbValue[0] = hexStr[1:3] # red
rgbValue[1] = hexStr[3:5] # green
rgbValue[2] = hexStr[5:] # blue
for i in range(3):
if rgbValue[i] == '00': # check if value already 0
continue
# convert hex to int
rgbValue[i] = int(rgbValue[i], 16)
if round(rgbValue[i] * (math.cos(radians) + 1) / 2) < 26: # verify valid rgb value
rgbValue[i] = '00'
elif rgbValue[i] > 255:
rgbValue[i] = 'ff'
else:
rgbValue[i] = round(rgbValue[i] * (math.cos(radians) + 1) / 2)
rgbValue[i] = hex(rgbValue[i])
rgbValue[i] = rgbValue[i][2:] # remove the '0x' prefix in returned hex() method string
newHexStr = '#' + rgbValue[0] + rgbValue[1] + rgbValue[2]
return newHexStr
def advCosineHexVal(self, hexStr, endHexStr, radians):
"""Uses a cosine function normalized to be non-negative that is multiplied with the
difference between two corresponding hex string values. This is summed with hexStr
and returns as a combined hex string value.
Accepts hexStr as a string in format '#FFFFFF'
radians as a float in radians for cos()
endHexStr as a string in format '#FFFFFF'
Returns newHexStr as a string in format '#FFFFFF'"""
# deconstruct hexStr to red green blue
hexStrRGB = [''] * 3
hexStrRGB[0] = hexStr[1:3] # red
hexStrRGB[1] = hexStr[3:5] # green
hexStrRGB[2] = hexStr[5:] # blue
# deconstruct endHexStr to red green blue
endHexStrRGB = [''] * 3
endHexStrRGB[0] = endHexStr[1:3] # red
endHexStrRGB[1] = endHexStr[3:5] # green
endHexStrRGB[2] = endHexStr[5:] # blue
tempRGB = [0] * 3
for i in range(3):
# convert hex to int
hexStrRGB[i] = int(hexStrRGB[i], 16)
endHexStrRGB[i] = int(endHexStrRGB[i], 16)
# do the cosine magic
tempRGB[i] = round(hexStrRGB[i] + (endHexStrRGB[i] - hexStrRGB[i]) * (math.cos(radians) + 1) / 2)
# convert back to hex
tempRGB[i] = hex(tempRGB[i])
if len(tempRGB[i]) == 3: # check if hex conversion results in single digit hex value
tempRGB[i] = tempRGB[i].replace('0x', '0x0')
tempRGB[i] = tempRGB[i][2:] # remove '0x' prefix
newHexStr = '#' + tempRGB[0] + tempRGB[1] + tempRGB[2]
return newHexStr
def userScorePercentColorFade(self, startHexVal, increment):
"""Increments hex colour value of self.userScorePercentLabel to change to black in a gradient.
Accepts startHexVal as a string in format '#FFFFFF'
increment as an integer/float if calling functions incrementHexVal/cosineHexVal"""
# extract individual hex values
red = startHexVal[1:3]
green = startHexVal[3:5]
if (red != '00') or green != '00':
newHexVal = self.cosineHexVal(startHexVal, increment)
self.radiansTextCounter += math.pi/75 # comment when using self.incrementHexVal -- denominator controls num of iterations needed
self.userScorePercentLabel['fg'] = newHexVal
# self.userScorePercentLabel.after(self.changeSpeed, lambda : self.userScorePercentColorFade(newHexVal, increment)) # comment when using self.cosineHexVal()
self.userScorePercentLabel.after(self.changeSpeed, lambda : self.userScorePercentColorFade(newHexVal, self.radiansTextCounter)) # comment when using self.incrementHexVal()
else:
return
def userScoreBarColorFade(self, startHexValz, endHexValz, incrementz):
"""Increments hex colour value of self.userScorePercentLabel to change from the starting hex value color
to the end hex color as a gradient.
Accepts startHexVal as a string in format '#FFFFFF'
increment as an integer/float if calling functions incrementHexVal/advCosineHexVal
endHexValz as a string in format #FFFFFF"""
if startHexValz.upper() != endHexValz.upper():
newHexVal = self.advCosineHexVal(startHexValz, endHexValz, incrementz)
self.radiansBarCounter += math.pi/75 # comment when using self.incrementHexVal -- denominator controls num of iterations needed
self.userScoreCanvas.itemconfig(self.userColourBar, fill = newHexVal, outline = newHexVal)
# self.userScorePercentLabel.after(self.changeSpeed, lambda : self.userScorePercentColorFade(newHexVal, increment)) # comment when using self.advCosineHexVal()
self.userScoreCanvas.after(self.changeSpeed, lambda : self.userScoreBarColorFade(newHexVal, endHexValz, self.radiansBarCounter)) # comment when using self.incrementHexVal()
else:
return
def updateTkinter(self, userChoice):
"""Accepts userChoice as an integer"""
self.iterationNum += 1
# get user and cpu predictions
correctChoice = self.cnnModel.labels
cpuChoice = self.cnnModel.pred
print(self.labelList[correctChoice])
# check if user and cpu made correct choice
if userChoice == correctChoice:
self.userCorrect += 1
self.correctChoice = True
else:
self.correctChoice = False
if cpuChoice == correctChoice:
self.cpuCorrect += 1
# change color of self.userScorePercentLabel on right (green) or wrong (red) choice
if self.correctChoice:
self.userScorePercentLabel['fg'] = self.correctAnsColor
# self.userScorePercentColorFade(self.correctAnsColor, self.increment) # uncomment if using self.incrementHexVal()
self.radiansTextCounter = 0 # uncomment if using self.cosineHexVal()
self.userScorePercentColorFade(self.correctAnsColor, self.radiansTextCounter) # uncomment if using self.cosineHexVal()
else:
self.userScorePercentLabel['fg'] = self.wrongAnsColor
# self.userScorePercentColorFade(self.wrongAnsColor, self.increment) # uncomment if using self.incrementHexVal()
self.radiansTextCounter = 0 # uncomment if using self.cosineHexVal()
self.userScorePercentColorFade(self.wrongAnsColor, self.radiansTextCounter) # uncomment if using self.cosineHexVal()
# update self.infoFrame labels
self.userInfoDynamicLabel['text'] = str(int(self.userCorrect))
self.cpuInfoDynamicLabel['text'] = str(int(self.cpuCorrect))
self.numImagesInfoDynamicLabel['text'] = str(int(self.iterationNum))
# re-calculate user and cpu correct percentages
self.userCorrectPercent = self.userCorrect / self.iterationNum * 100
self.cpuCorrectPercent = self.cpuCorrect / self.iterationNum * 100
# update correct percentages
self.userScorePercentLabel['text'] = self.updateScoreStrVar(self.userCorrectPercent)
self.cpuScorePercentLabel['text'] = self.updateScoreStrVar(self.cpuCorrectPercent)
# re-calculate score bar pixel width for user and cpu
newUserBlackBarX2Pos, newUserColorX2Pos, significantChanges = self.updateScoreBarPosition(self.userCorrectPercent, self.oldUserPercent)
self.userScoreCanvas.coords(self.userBlackBar, 0, 21, newUserBlackBarX2Pos, 57)
self.userScoreCanvas.coords(self.userColourBar, newUserBlackBarX2Pos, 21, newUserColorX2Pos, 57)
self.cpuScoreCanvas.coords(self.cpuScoreBar, 0, 21, self.updateScoreBarPixel(self.cpuCorrectPercent), 57)
self.oldUserPercent = self.userCorrectPercent
print(self.userScoreCanvas.coords(self.userBlackBar))
print(self.userScoreCanvas.coords(self.userColourBar))
# change color of self.userColorBar on right (green) or wrong (red) choice
if significantChanges:
if self.correctChoice:
self.userScoreCanvas.itemconfig(self.userColourBar, fill = self.correctAnsColor, outline = self.correctAnsColor)
self.radiansBarCounter = math.pi
self.userScoreBarColorFade(self.correctAnsColor, self.blackColor, self.radiansBarCounter)
else:
self.userScoreCanvas.itemconfig(self.userColourBar, fill = self.wrongAnsColor, outline = self.wrongAnsColor)
self.radiansBarCounter = math.pi
self.userScoreBarColorFade(self.wrongAnsColor, self.windowBGColor, self.radiansBarCounter)
# update lastCorrectLabelVar with the correct choice for the image
self.lastCorrectLabelVar['text'] = ' ' + self.labelList[self.cnnModel.labels]
# update model and image for next iteration
self.count += | |
#!/usr/bin/python
import main
from numpy import *
from matplotlib import pyplot
def smooth (x, radius, iters = 1):
if iters == 0:
return x
ix = cumsum(x, 0)
sx = (ix[radius:,:] - ix[:-radius]) / radius
return smooth(sx, radius, iters - 1)
#----( beat functions )-------------------------------------------------------
def beat_fun_exp (angle):
return exp(sin(angle))
def beat_fun_sqr (angle):
'a factor of 2/3 allows the largest 1:1 stair to be about 1 octave wide'
return 2.0 / 3.0 * (sin(angle) + 1) ** 2
def beat_fun_sqr_weak (angle):
'a factor of 1/2 allows the largest 1:1 stair to be about 1/2 octave wide'
return 0.5 * (sin(angle) + 1) ** 2
def beat_fun_sqr_weaker (angle):
'a factor of 1/3 allows the largest 1:1 stair to be about 1/4 octave wide'
return 1.0 / 3.0 * (sin(angle) + 1) ** 2
def beat_fun_pow (exponent):
def fun (angle):
return 2 * sqrt(exponent) * (sin(angle) / 2 + 0.5) ** exponent
return fun
def beat_fun_box (angle, radius):
'box beat functions have many tongues'
return abs(angle - pi / 2) < radius
def beat_fun_box_highpass (angle, radius):
'box beat functions have many tongues'
return (abs(angle - pi / 2) < radius) - (radius / pi)
def beat_fun_box_strong (strength = 10):
'used to determine strength for given tongue width'
def fun (angle):
return sqrt(strength) * (abs(angle - pi / 2) < pi / 4)
return fun
'''
Empirically, each strength unit absorbs
~1/2 semitone = log(2) / 24 radius around 1:1
'''
beat_fun_box_tempo = beat_fun_box_strong(10)
beat_fun_box_pitch = beat_fun_box_strong(5)
beat_fun = beat_fun_box_pitch
'''
Pitch and tempo have different acuity, hence different radius
'''
tempo_radius_circ = pi * 3 / 8
pitch_radius_circ = pi * 1 / 6
def beat_fun_circ (
angle1,
angle2,
radius = pitch_radius_circ,
strength = 1.5,
):
'''
Circular beat regions have even more tongues than square regions,
but are not separable
'''
return ( strength
/ radius
* ( (angle1 - radius) ** 2
+ (angle2 - radius) ** 2
< radius ** 2
)
)
'''
The piecewise biquadratic beta function acts much like the circular beat fun,
but is separable and continuous.
'''
def beat_fun_biquad (t):
return (t < 2) * (1 - (1 - t) ** 2)
tempo_radius_biquad = pi / 3
pitch_radius_biquad = pi / 7
def beat_fun_pair_biquad (
angle1,
angle2,
radius = pitch_radius_biquad,
strength = 2.5,
):
return ( strength
/ radius
* beat_fun_biquad(angle1 / radius)
* beat_fun_biquad(angle2 / radius)
)
def beat_fun_pair_biquad_highpass (
angle1,
angle2,
radius = pitch_radius_biquad,
strength = 2.0,
):
return ( strength
/ radius
* ( beat_fun_biquad(angle1 / radius)
- radius * 4 / 3 / (2 * pi)
)
* beat_fun_biquad(angle2 / radius)
)
tempo_radius_bilin = pi / 2
pitch_radius_bilin = pi / 4
def beat_fun_bilin (angle, radius = pitch_radius_bilin):
return maximum(0, 1 - abs(angle / radius))
def beat_fun_pair_bilin (
angle1,
angle2,
radius = pitch_radius_bilin,
strength = 8.0,
):
return ( strength
/ radius
* beat_fun_bilin(angle1, radius)
* beat_fun_bilin(angle2, radius)
)
def beat_fun_bicos (angle, radius = pitch_radius_biquad):
return maximum(0, cos(angle) - cos(radius)) / (1 - cos(radius))
def beat_fun_bicos_highpass (angle, radius = pitch_radius_biquad):
return ( ( maximum(0, cos(angle) - cos(radius))
- (2 * sin(radius) - 2 * radius * cos(radius)) / (2 * pi)
)
/ (1 - cos(radius))
)
def beat_fun_pair_bicos (
angle1,
angle2,
radius = pitch_radius_biquad,
strength = 2.0,
):
return ( strength
/ radius
* beat_fun_bicos(angle1, radius)
* beat_fun_bicos(angle2, radius)
)
def beat_fun_pair_bicos_highpass (
angle1,
angle2,
radius = pitch_radius_biquad,
strength = 2.0,
):
return ( strength
/ radius
* beat_fun_bicos_highpass(angle1, radius)
* beat_fun_bicos(angle2, radius)
)
def beat_fun_pair_box (
angle1,
angle2,
radius = pitch_radius_biquad,
strength = 1.0,
):
return ( strength
/ radius
* beat_fun_box_highpass(angle1, radius)
* beat_fun_box(angle2, radius)
)
def beat_fun_bicos1 (angle, radius = pitch_radius_biquad):
def cos1 (x):
return (1 + cos(x)) ** 2
return maximum(0, (cos1(angle) - cos1(radius)) / (cos1(0) - cos1(radius)))
def beat_fun_bicos2 (angle, radius = pitch_radius_biquad, tol=1e-10):
x = maximum(0, cos(angle))
y = sin(angle)
x2 = x**2
y2 = y**2
r2 = x2 + y2
u2 = x2 / (r2 + tol**2)
U2 = cos(radius)**2
return maximum(0, (u2 - U2) / (1 - U2))
tempo_radius = tempo_radius_biquad
pitch_radius = pitch_radius_biquad
beat_fun_pair = beat_fun_pair_bicos
@main.command
def beats ():
'Plots example separable beat functions'
T = arange(0, 4 * pi, 0.01)
pyplot.figure()
for fun in [
beat_fun_exp,
beat_fun_bilin,
beat_fun_bicos,
beat_fun_bicos_highpass,
beat_fun_bicos1,
beat_fun_bicos2,
]:
pyplot.plot(T, fun(T), label = fun.__name__)
pyplot.title('Beat functions')
pyplot.xlabel('phase')
pyplot.ylabel('syncrhonizing')
pyplot.legend()
pyplot.savefig('beats.pdf')
pyplot.show()
@main.command
def beat_response (ratio = 64.0, samples = 2000):
'Plots convolution of two beat functions'
param = (array(range(samples)) + 0.5) / samples
u = pow(ratio, param - 0.5)
v = 1 / u
periods = sqrt(samples)
t = 2 * pi * param * periods
pairs = [
(beat_fun_bicos, beat_fun_bicos),
(beat_fun_bicos, beat_fun_bicos_highpass),
(beat_fun_bicos_highpass, beat_fun_bicos),
(beat_fun_bicos_highpass, beat_fun_bicos_highpass),
]
def name (f):
return f.__name__.replace('beat_fun_','')
pyplot.figure()
for f,g in pairs:
conv = sum(f(outer(u,t)) * g(outer(v,t)), 1) / samples
assert conv.shape == (samples,)
pyplot.plot(u/v, conv, label = '%s * %s' % (name(f), name(g)))
pyplot.title('Correlation of beat function pairs')
pyplot.legend()
pyplot.xlabel('frequency ratio')
pyplot.ylabel('mean beat response')
pyplot.xscale('log')
pyplot.xlim(1 / ratio, ratio)
pyplot.savefig('beat_response.pdf')
pyplot.show()
#----( beat strength normalization )----
def max_bend (acuity):
'returns optimal angle and bend strength at optimal angle'
from scipy.optimize import fminbound
U = 1 - (pi / acuity) ** 2 / 2 # approximating cos(pi / acuity)
def f (angle):
x = cos(angle)
beat = (x - U) / (1 - U)
return - beat ** 2 * sin(2 * angle)
LB,UB = 0.0, pi / max(1.0,acuity)
(angle,) = fminbound(f,LB,UB)
return angle,-f(angle)
def beat_scale (acuity):
_,bend = max_bend(acuity)
return bend ** -0.5
@main.command
def beat_strength (min_acuity = 3, max_acuity = 7):
'Plots beat strength for a pair of oscillators'
pyplot.figure()
for acuity in range(min_acuity, max_acuity+1):
dangle = arange(0, 2*pi/acuity, 0.01)
angle = dangle / 2
x = cos(angle)
U = cos(pi / acuity)
beat = (x - U) / (1 - U)
bend = beat * beat * sin(dangle)
pyplot.plot(angle, bend, label='acuity = %i' % acuity)
x0,y0 = max_bend(acuity)
pyplot.plot([x0],[y0], 'ko')
pyplot.title('Bend strength')
pyplot.xlabel('angle difference')
pyplot.ylabel('beat strength')
pyplot.legend(loc = 'upper right')
pyplot.savefig('beat_strength.png')
pyplot.show()
@main.command
def beat_rescale (min_acuity = 1, max_acuity = 24):
'Prints & plots beat rescaling coefficients for various acuities'
print 'acuity\tscale'
print '-' * 32
acuity = range(min_acuity, max_acuity+1)
scale = map(beat_scale, acuity)
for a,s in zip(acuity,scale):
print '%i\t%s' % (a,s)
pyplot.title('Beat function rescaling function')
pyplot.plot(acuity,scale)
pyplot.xlabel('acuity')
pyplot.ylabel('beat scale')
pyplot.show()
@main.command
def beat_rescale_log2 (min_log2_acuity = -3, max_log2_acuity = 4, size = 20):
'Prints & log-log-plots beat rescaling coefficients for various acuities'
import scipy
print 'acuity\tscale'
print '-' * 32
acuity = 0.5 * array(range(2 * min_log2_acuity, 1 + 2 * max_log2_acuity))
scale = log(map(beat_scale, exp(acuity))) / log(2)
for a,s in zip(acuity,scale):
print '%s\t%s' % (a,s)
pyplot.title('Beat function rescaling function')
pyplot.plot(acuity, scale, 'o')
pyplot.xlabel('log2(acuity)')
pyplot.ylabel('log2(beat scale)')
pyplot.show()
class BeatFun:
def __init__ (self, acuity):
a = pi / acuity
cos_a = cos(a)
sin_a = sin(a)
# let f(theta) = max(0, cos(theta) - cos(a))
Ef = (sin_a - a * cos_a) / pi
Ef2 = (a - 3 * sin_a * cos_a + 2 * a * cos_a ** 2) / (2 * pi)
Vf = Ef2 - Ef ** 2
self.floor = cos_a
self.shift = -Ef
self.scale = 1 / sqrt(Vf)
def __call__ (self, theta):
return (maximum(0.0, cos(theta) - self.floor) + self.shift) * self.scale
@main.command
def standard_beat (min_acuity = 1.0, max_acuity = 20.0, count = 12):
'Plots zero-mean unit-variance beat functions at various acuity'
print 'acuity\t\tmean\t\tvariance'
print '-' * 8 * 6
angle = arange(0, 2*pi, 0.01)
for i in range(count):
acuity = min_acuity * pow(max_acuity / min_acuity, (i + 0.5) / count)
fun = BeatFun(acuity)
beat = fun(angle)
Ef = sum(beat) / len(beat)
Ef2 = sum(beat ** 2) / len(beat)
Vf = Ef2 - Ef ** 2
print '%g\t%g\t%g' % (acuity, Ef, Vf)
pyplot.plot(angle, beat)
pyplot.title('Beat function rescaling function')
pyplot.xlabel('theta')
pyplot.ylabel('beat(theta)')
pyplot.show()
#----( frequency plots )------------------------------------------------------
@main.command
def sim_strength (size = 6, steps = 40000, period = 40):
'''
Plots frequencies of pitch-coupled set, varying coupling strength
This motivates choice of strength = sqrt(2 / size),
so that strength = 1 for a coupled pair.
'''
dt = 1.0 / period
# intialize
phase = random.uniform(0,1,size)
golden = False
if golden:
phi = (sqrt(5) - 1) / 2
freq0 = pow(phi, array(range(size)) - | |
id's + the (complete) word of which we want to find the index
# Output: the index of the first piece in the word piece.
# Example for finding the indices of the relevant tokens:
# word_to_find = 'broccoli'
# tokens = "[CLS] ' mom ##will send you gifts bro ##cco ##li and will ##send you bro ##cco ##li fruits for the return journey , ' shouted the quartermaster . [SEP] ' mom will send you gifts and bro ##cco ##li for the return journey , ' shouted the quartermaster . [SEP] ".split()
# input_ids =[101, 18, 202, 313, 414, 515, 616, 717, 838, 949, 111, 222, 333, 444, 555, 666, 18, 8231, 313, 101, 18, 8231, 313, 101, 18, 8231, 313, 101, 18, 8231, 313, 101, 18, 8231, 313, ]
# old_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 18 ]
# new_ind= [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
# Result: [7, 14]
"""
def get_offset_new(tokens_a_new, model_type):
# offset from location in tokens_a_new to location here, so we can activate new2old_ind[] on it.
if model_type == 'bert':
# ['[CLS]'] + tokens_a_new + ['[SEP]']+ tokens_b_new+ ['[SEP]']
return len(tokens_a_new) + 2
if model_type == 'roberta':
# ['<s>'] + tokens_a_new + ['</s>', '</s>'] + tokens_b_new+ ['</s>']
return len(tokens_a_new) + 3
def validate_new_ids(word, input_ids, word_ind, new2old_ind, offset):
retrieved_token = tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[word_ind + offset]]])[0]
relevant_word_piece = word[:len(retrieved_token)]
if retrieved_token != relevant_word_piece and wnu.to_singular(retrieved_token) != wnu.to_singular(relevant_word_piece):
my_logger(f"index of wordpiece doesn't match the index of original sentence: '{retrieved_token}' != '{relevant_word_piece}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
def validate_new_ids_for_country(country, country_adj, input_ids, word_ind, new2old_ind, offset):
retrieved_token = tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[word_ind + offset]]])[0]
country_cond = retrieved_token == country[:len(retrieved_token)]
country_adj_cond = retrieved_token == country_adj[:len(retrieved_token)]
if not (country_cond or country_adj_cond):
my_logger(f"\nindex of wordpiece doesn't match the index of original sentence: '{retrieved_token}' != '{country[:len(retrieved_token)]}' and != '{country_adj[:len(retrieved_token)]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
def country2adj_func(country):
country = country[0].upper() + country[1:] # capitalizing country string
if country in country2adj:
return country2adj[country]
else:
return country
global item2class, class2ind
ext_emb = [0] * len(input_ids)
# returning 0 vector when not using any knowledge enhancement method
if use_ext_embeddings == False:
return ext_emb
if ee_gen_method == 'four_phenomena': # adding ext_id=1 to any hypernym and =2 to its hyponym, using wordnet - but exclude nouns that appear in both sentences
s_new, new2old_ind = word_piece_connected(tokens, input_ids, model_type) # s_new is the connected tokens list.
tokens_a_new, tokens_b_new = word_piece_connected(tokens_a, [1]*len(tokens_a), model_type)[0], word_piece_connected(tokens_b, [1]*len(tokens_b), model_type)[0] # connecting the word_pieces together
offset_new = get_offset_new(tokens_a_new, model_type) # offset from location in tokens_a_new to location here: ['[CLS]']+ tokens_a_new + ['[SEP]']+ tokens_b_new+ ['[SEP]'] , so we can activate new2old_ind[] on it.
relation2id_vocab = dict(location_head=1, location_tail=2, color_head=3, color_tail=4, trademarks_head=5, trademarks_tail=6, hypernymy_head=7, hypernymy_tail=8)
## Location
pairs = wnu.find_location_country_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, local_wiki, is_print=False, filter_repeat_word=True)
# {'alabama': {'location': ['trussville'],
# 'location_ind': [1],
# 'country_ind': [0]}}
offset = (1, offset_new)
for country in pairs: # hypo in premise -> country in hypothesis
for location, location_i in zip(pairs[country]['location'], pairs[country]['location_ind']):
ext_emb[new2old_ind[location_i + offset[0]]] = relation2id_vocab['location_tail']
if not (tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0] == location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]):
warnings.warn(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0]}' != '{location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]}'")
my_logger(f"=2= Warning! index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0]}' != '{location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
for country_i in pairs[country]['country_ind']:
ext_emb[new2old_ind[country_i + offset[1]]] = relation2id_vocab['location_head']
if not (tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0] == country[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0])]):
my_logger(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0]}' != '{country[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
## Color
pairs = wnu.find_color_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, local_wiki_features, is_print=False, filter_repeat_word=True)
# pairs = {'grey': {'noun': ['steel'],
# 'noun_ind': [6],
# 'feature_ind': [9],
# 'type': 'color'}}
offset = (1, offset_new)
for color in pairs: # hypo in premise -> country in hypothesis
for noun, noun_i in zip(pairs[color]['noun'], pairs[color]['noun_ind']):
ext_emb[new2old_ind[noun_i + offset[0]]] = relation2id_vocab['color_tail']
validate_new_ids(noun, input_ids, noun_i, new2old_ind, offset[0])
for feature_i in pairs[color]['feature_ind']:
ext_emb[new2old_ind[feature_i + offset[1]]] = relation2id_vocab['color_head']
validate_new_ids(color, input_ids, feature_i, new2old_ind, offset[1])
## Trademarks
pairs = wnu.find_trademark_country_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, local_wiki, is_print=False, filter_repeat_word=True)
# pairs = {'grey': {'noun': ['steel'],
# 'noun_ind': [6],
# 'feature_ind': [9],
# 'type': 'company'}}
offset = (1, offset_new)
for country in pairs: # hypo in premise -> country in hypothesis
for noun, noun_i in zip(pairs[country]['company'], pairs[country]['company_ind']):
ext_emb[new2old_ind[noun_i + offset[0]]] = relation2id_vocab['trademarks_tail']
validate_new_ids(noun, input_ids, noun_i, new2old_ind, offset[0])
for feature_i in pairs[country]['country_ind']:
ext_emb[new2old_ind[feature_i + offset[1]]] = relation2id_vocab['trademarks_head']
validate_new_ids_for_country(country, country2adj_func(country).lower(), input_ids, feature_i, new2old_ind, offset[1])
## Hypernymy
pairs, _ = wnu.find_hypernymy_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, filter_repeat_word=True, mode=args.hypernymy_mode)
offset = (1, offset_new)
for hyper in pairs: # hypo in premise -> hyper in hypothesis
for hypo, hypo_i in zip(pairs[hyper]['hypo'], pairs[hyper]['hypo_ind']):
ext_emb[new2old_ind[hypo_i + offset[0]]] = relation2id_vocab['hypernymy_tail']
validate_new_ids(hypo, input_ids, hypo_i, new2old_ind, offset[0])
for hyper_i in pairs[hyper]['hyper_ind']:
ext_emb[new2old_ind[hyper_i + offset[1]]] = relation2id_vocab['hypernymy_head']
validate_new_ids(hyper, input_ids, hyper_i, new2old_ind, offset[1])
elif ee_gen_method == 'tell_label_at_cls': # WORKS. cheating and 'telling' the model the lable, to see if it reaches 100% accuracy using ext. embeddings
loc = 1
ext_emb[loc] = label_id
elif ee_gen_method == 'all_zeros': #
pass
elif ee_gen_method == 'hypernym_old_pairs_wordnet_filtered_clean': # extracting only pairs with Relation=Hypernymy. Adding ext_id=1 to any hypernym and =2 to its hyponym, using wordnet - but exclude nouns that appear in both sentences
# ext_emb = list(np.random.randint(3, 999, size=len(ext_emb)))
s_new, new2old_ind = word_piece_connected(tokens, input_ids, model_type) # s_new is the connected tokens list.
tokens_a_new, tokens_b_new = word_piece_connected(tokens_a, [1]*len(tokens_a), model_type)[0], word_piece_connected(tokens_b, [1]*len(tokens_b), model_type)[0] # connecting the word_pieces together
offset_new = get_offset_new(tokens_a_new, model_type) # offset from location in tokens_a_new to location here: ['[CLS]']+ tokens_a_new + ['[SEP]']+ tokens_b_new+ ['[SEP]'] , so we can activate new2old_ind[] on it.
(p_h_pairs, h_p_pairs), (doc1, doc2) = wnu.find_hyper_hypo_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, filter_repeat_word=True)
for pairs, offset in zip([p_h_pairs, h_p_pairs], [(1, offset_new), (offset_new, 1)]):
for hyper in pairs: # hypo in premise -> hyper in hypothesis
for hypo, hypo_i in zip(pairs[hyper]['hypo'], pairs[hyper]['hypo_ind']):
ext_emb[new2old_ind[hypo_i + offset[0]]] = 2
if not (wnu.to_singular(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0]) == wnu.to_singular(hypo[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0])])):
warnings.warn(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0]}' != '{hypo[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0])]}'")
my_logger(f"Warning! index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0]}' != '{hypo[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hypo_i + offset[0]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
(p_h_pairs_, h_p_pairs_), (doc1_, doc2_) = wnu.find_hyper_hypo_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new)
print('********************************************************')
for hyper_i in pairs[hyper]['hyper_ind']:
ext_emb[new2old_ind[hyper_i + offset[1]]] = 1
if not (wnu.to_singular(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hyper_i + offset[1]]]])[0]) == wnu.to_singular(hyper[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hyper_i + offset[1]]]])[0])])):
my_logger(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hyper_i + offset[1]]]])[0]}' != '{hyper[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[hyper_i + offset[1]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
(p_h_pairs_, h_p_pairs_), (doc1_, doc2_) = wnu.find_hyper_hypo_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new)
print('********************************************************')
elif ee_gen_method == 'location_wiki_filtered_clean': # extracting only pairs with Relation=Location.
s_new, new2old_ind = word_piece_connected(tokens, input_ids, model_type) # s_new is the connected tokens list.
tokens_a_new, tokens_b_new = word_piece_connected(tokens_a, [1]*len(tokens_a), model_type)[0], word_piece_connected(tokens_b, [1]*len(tokens_b), model_type)[0] # connecting the word_pieces together
offset_new = get_offset_new(tokens_a_new, model_type) # offset from location in tokens_a_new to location here: ['[CLS]']+ tokens_a_new + ['[SEP]']+ tokens_b_new+ ['[SEP]'] , so we can activate new2old_ind[] on it.
pairs = wnu.find_location_country_pairs(example.text_a, example.text_b, tokens_a_new, tokens_b_new, local_wiki, is_print=False, filter_repeat_word=True)
# {'alabama': {'location': ['trussville'],
# 'location_ind': [1],
# 'country_ind': [0]}}
offset = (1, offset_new)
for country in pairs: # hypo in premise -> country in hypothesis
for location, location_i in zip(pairs[country]['location'], pairs[country]['location_ind']):
ext_emb[new2old_ind[location_i + offset[0]]] = 2
if not (tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0] == location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]):
warnings.warn(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0]}' != '{location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]}'")
my_logger(f"Warning! index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0]}' != '{location[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[location_i + offset[0]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
for country_i in pairs[country]['country_ind']:
ext_emb[new2old_ind[country_i + offset[1]]] = 1
if not (tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0] == country[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0])]):
my_logger(f"index of wordpiece doesn't match the index of original sentence: '{tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0]}' != '{country[:len(tokenizer.convert_ids_to_tokens([input_ids[new2old_ind[country_i + offset[1]]]])[0])]}'")
my_logger(f"\nExample:\nPremise:{example.text_a}\nHypothesis:{example.text_b}\n")
print('********************************************************')
elif ee_gen_method == 'color_wiki_filtered_clean': # extracting only pairs with Relation=Color.
s_new, new2old_ind = word_piece_connected(tokens, input_ids, model_type) # s_new is the connected tokens list.
tokens_a_new, tokens_b_new = word_piece_connected(tokens_a, [1]*len(tokens_a), model_type)[0], word_piece_connected(tokens_b, [1]*len(tokens_b), model_type)[0] # connecting the word_pieces together
offset_new | |
[u'q'] ,
u'籔' : [u's'] ,
u'衜' : [u'd'] ,
u'佞' : [u'n'] ,
u'珡' : [u'q'] ,
u'杮' : [u'b', u'f'] ,
u'韹' : [u'h'] ,
u'㙸' : [u'p', u'b'] ,
u'嫻' : [u'x'] ,
u'罾' : [u'z'] ,
u'讆' : [u'w'] ,
u'予' : [u'y'] ,
u'琏' : [u'l'] ,
u'耗' : [u'h', u'm'] ,
u'䜙' : [u'a'] ,
u'暘' : [u'y'] ,
u'頧' : [u'd'] ,
u'弩' : [u'n'] ,
u'纨' : [u'w'] ,
u'誰' : [u's'] ,
u'䦲' : [u'q', u'y'] ,
u'眹' : [u'z'] ,
u'荁' : [u'h'] ,
u'䙃' : [u'p', u'k', u'r', u'd'] ,
u'懂' : [u'd'] ,
u'魑' : [u'c'] ,
u'幓' : [u's'] ,
u'秒' : [u'm'] ,
u'藚' : [u'x'] ,
u'䣜' : [u'c'] ,
u'癣' : [u'x'] ,
u'艫' : [u'l'] ,
u'鷪' : [u'y'] ,
u'䅭' : [u'p'] ,
u'惬' : [u'q'] ,
u'驻' : [u'z'] ,
u'好' : [u'h'] ,
u'磼' : [u'z'] ,
u'抁' : [u'y'] ,
u'蔄' : [u'm'] ,
u'嘎' : [u'g'] ,
u'窑' : [u'y'] ,
u'鴔' : [u'f'] ,
u'蚙' : [u'q'] ,
u'渞' : [u'q'] ,
u'垣' : [u'y'] ,
u'麩' : [u'f'] ,
u'䬰' : [u's'] ,
u'澳' : [u'a', u'y'] ,
u'鈶' : [u'y'] ,
u'捀' : [u'f'] ,
u'䓅' : [u'q'] ,
u'鏋' : [u'm'] ,
u'筐' : [u'k'] ,
u'峕' : [u's'] ,
u'蝘' : [u'y'] ,
u'偢' : [u'c'] ,
u'瓥' : [u'l'] ,
u'齨' : [u'j'] ,
u'胭' : [u'y'] ,
u'桲' : [u'p', u'b'] ,
u'凷' : [u'k'] ,
u'飽' : [u'b'] ,
u'䖄' : [u'r'] ,
u'猋' : [u'b'] ,
u'貊' : [u'm'] ,
u'嶔' : [u'q'] ,
u'霣' : [u'y'] ,
u'疤' : [u'b'] ,
u'㪦' : [u'y'] ,
u'怭' : [u'b'] ,
u'膬' : [u'c'] ,
u'劶' : [u'k'] ,
u'馼' : [u'w'] ,
u'㴿' : [u'd'] ,
u'葅' : [u'j', u'z'] ,
u'櫆' : [u'k'] ,
u'問' : [u'w'] ,
u'鱕' : [u'f'] ,
u'䟘' : [u'h'] ,
u'浟' : [u'y', u'd'] ,
u'軞' : [u'm'] ,
u'忨' : [u'w'] ,
u'䩱' : [u's'] ,
u'酷' : [u'k'] ,
u'矸' : [u'g'] ,
u'㓺' : [u'j'] ,
u'萀' : [u'h'] ,
u'掅' : [u'q'] ,
u'鰐' : [u'e'] ,
u'弒' : [u's'] ,
u'箕' : [u'j'] ,
u'辝' : [u'c'] ,
u'眢' : [u'y'] ,
u'䘬' : [u'r'] ,
u'抯' : [u'z'] ,
u'帼' : [u'g'] ,
u'窿' : [u'l'] ,
u'軇' : [u'd'] ,
u'䧉' : [u'l'] ,
u'癌' : [u'a', u'y'] ,
u'懙' : [u'y'] ,
u'鉤' : [u'q', u'g'] ,
u'嵦' : [u'a'] ,
u'秩' : [u'z'] ,
u'跱' : [u's', u'z'] ,
u'當' : [u'd'] ,
u'䒀' : [u'f'] ,
u'渇' : [u'k'] ,
u'醎' : [u'm'] ,
u'㴑' : [u's'] ,
u'岐' : [u'q'] ,
u'訟' : [u's', u'r'] ,
u'唡' : [u'l'] ,
u'璠' : [u'f'] ,
u'㮪' : [u'q'] ,
u'洱' : [u'e'] ,
u'邸' : [u'd'] ,
u'㰻' : [u'y'] ,
u'厺' : [u'q'] ,
u'襉' : [u'j'] ,
u'呋' : [u'f'] ,
u'毊' : [u'x'] ,
u'㫔' : [u'g'] ,
u'汛' : [u'x'] ,
u'韢' : [u'h'] ,
u'勤' : [u'q'] ,
u'衳' : [u'z'] ,
u'䭵' : [u'f'] ,
u'櫴' : [u'l'] ,
u'䰆' : [u'r'] ,
u'梉' : [u'z'] ,
u'霌' : [u'c'] ,
u'搖' : [u'y'] ,
u'䶛' : [u'l'] ,
u'钡' : [u'b'] ,
u'簦' : [u'd'] ,
u'㤨' : [u'q', u'g'] ,
u'斫' : [u'z'] ,
u'飯' : [u'f'] ,
u'耮' : [u'l'] ,
u'儸' : [u'l'] ,
u'綻' : [u'z'] ,
u'頾' : [u'z'] ,
u'臃' : [u'y'] ,
u'楈' : [u'x'] ,
u'囍' : [u'x'] ,
u'駓' : [u'p'] ,
u'䉚' : [u'm'] ,
u'滝' : [u's', u'l'] ,
u'镠' : [u'l'] ,
u'婪' : [u'l'] ,
u'䏯' : [u'z'] ,
u'諵' : [u'n'] ,
u'牺' : [u'x', u's'] ,
u'寿' : [u's'] ,
u'愃' : [u'x'] ,
u'蚂' : [u'm'] ,
u'垌' : [u'd', u't'] ,
u'礓' : [u'j'] ,
u'麒' : [u'q'] ,
u'㨕' : [u'y'] ,
u'贛' : [u'z', u'g'] ,
u'澜' : [u'l'] ,
u'別' : [u'b'] ,
u'䂮' : [u'l'] ,
u'樵' : [u'q'] ,
u'讴' : [u'o'] ,
u'㜷' : [u'x', u'm', u'n'] ,
u'墾' : [u'k'] ,
u'佇' : [u'z'] ,
u'降' : [u'x', u'j'] ,
u'烎' : [u'y'] ,
u'㷐' : [u'h'] ,
u'杗' : [u'm'] ,
u'蓖' : [u'b'] ,
u'嗠' : [u'l'] ,
u'罧' : [u's', u'l'] ,
u'鳦' : [u'y'] ,
u'㡩' : [u'l'] ,
u'药' : [u'y'] ,
u'淰' : [u's', u'l', u'n'] ,
u'偹' : [u'b'] ,
u'魿' : [u'l'] ,
u'計' : [u'j'] ,
u'䤊' : [u'r'] ,
u'疍' : [u'd'] ,
u'䒗' : [u'q'] ,
u'愚' : [u'y'] ,
u'醥' : [u'p'] ,
u'岧' : [u't'] ,
u'礪' : [u'l'] ,
u'贲' : [u'b', u'f'] ,
u'䠴' : [u'q', u'y', u'z'] ,
u'璷' : [u'l'] ,
u'恄' : [u'x'] ,
u'郏' : [u'j'] ,
u'忑' : [u't'] ,
u'硔' : [u'h'] ,
u'豜' : [u'j'] ,
u'䭞' : [u'y'] ,
u'矡' : [u'j'] ,
u'䛫' : [u'z'] ,
u'据' : [u'j'] ,
u'鏹' : [u'q'] ,
u'廻' : [u'h'] ,
u'签' : [u'q'] ,
u'辆' : [u'l'] ,
u'䪈' : [u'q'] ,
u'瀏' : [u'l'] ,
u'萗' : [u'c'] ,
u'䌙' : [u'h'] ,
u'折' : [u's', u'z'] ,
u'㖢' : [u'q', u'j'] ,
u'鰧' : [u't'] ,
u'嬩' : [u'y'] ,
u'窨' : [u'y', u'x'] ,
u'躰' : [u't'] ,
u'䶲' : [u'r', u'n'] ,
u'猹' : [u'z'] ,
u'蝁' : [u'e'] ,
u'䉃' : [u'j'] ,
u'旂' : [u'q'] ,
u'齑' : [u'j'] ,
u'婓' : [u'f'] ,
u'緒' : [u'x'] ,
u'臚' : [u'l'] ,
u'䳜' : [u't'] ,
u'牣' : [u'r'] ,
u'虫' : [u'h', u'c'] ,
u'駪' : [u's'] ,
u'䕭' : [u'q'] ,
u'擬' : [u'n'] ,
u'㟶' : [u'y'] ,
u'鹻' : [u'j'] ,
u'嵽' : [u'd'] ,
u'糼' : [u'g'] ,
u'暁' : [u'x'] ,
u'脄' : [u'm'] ,
u'刎' : [u'w'] ,
u'纑' : [u'l'] ,
u'㮓' : [u'g'] ,
u'餔' : [u'b'] ,
u'芙' : [u'f'] ,
u'樞' : [u's'] ,
u'厣' : [u'y'] ,
u'骩' : [u'w'] ,
u'估' : [u'g'] ,
u'殳' : [u's'] ,
u'阶' : [u'j'] ,
u'杀' : [u's'] ,
u'韋' : [u'h', u'w'] ,
u'罐' : [u'g'] ,
u'壕' : [u'h'] ,
u'荘' : [u'z'] ,
u'呢' : [u'n'] ,
u'魨' : [u't'] ,
u'蓭' : [u'a'] ,
u'汲' : [u'j'] ,
u'嗷' : [u'a'] ,
u'鳽' : [u'j'] ,
u'䆄' : [u's'] ,
u'看' : [u'k'] ,
u'袊' : [u'l'] ,
u'妔' : [u'k'] ,
u'䰝' : [u'z'] ,
u'錣' : [u'z'] ,
u'熤' : [u'y'] ,
u'㺦' : [u'l'] ,
u'搭' : [u'd', u't'] ,
u'薬' : [u'y'] ,
u'嚶' : [u'y'] ,
u'簽' : [u'q'] ,
u'鶼' : [u'q', u'j'] ,
u'聅' : [u'c'] ,
u'滆' : [u'h', u'g'] ,
u'兏' : [u'c'] ,
u'顕' : [u'x'] ,
u'䏘' : [u'p'] ,
u'楟' : [u't'] ,
u'諞' : [u'p'] ,
u'寨' : [u'z'] ,
u'乱' : [u'l'] ,
u'長' : [u'c', u'z'] ,
u'珸' : [u'w'] ,
u'蠀' : [u'c'] ,
u'䬂' : [u'y', u'x'] ,
u'澅' : [u'h'] ,
u'㺏' : [u'l'] ,
u'逐' : [u'z', u'd', u't'] ,
u'匒' : [u'd'] ,
u'瞕' : [u'z'] ,
u'莝' : [u'c'] ,
u'䚟' : [u'd'] ,
u'笢' : [u'm'] ,
u'䨬' : [u'l'] ,
u'溯' : [u's'] ,
u'㶹' : [u'h'] ,
u'鼺' : [u'l'] ,
u'刼' : [u'j'] ,
u'皿' : [u'm'] ,
u'苇' : [u'w'] ,
u'穌' : [u's'] ,
u'䥖' : [u't'] ,
u'淙' : [u'c', u's'] ,
u'鹤' : [u'h'] ,
u'兦' : [u'w'] ,
u'痩' : [u's'] ,
u'臱' : [u'm'] ,
u'䓳' : [u'h'] ,
u'祶' : [u'd'] ,
u'䢀' : [u'q'] ,
u'戇' : [u'z', u'g'] ,
u'傐' : [u'h'] ,
u'蘟' : [u'y'] ,
u'夡' : [u'q'] ,
u'碠' : [u'd'] ,
u'㞪' : [u'n'] ,
u'愱' : [u'j'] ,
u'鲸' : [u'q', u'j'] ,
u'徺' : [u'j'] ,
u'蕉' : [u'q', u'j'] ,
u'塋' : [u'y'] ,
u'柊' : [u'z'] ,
u'㛔' : [u'p'] ,
u'恛' : [u'h'] ,
u'鯢' : [u'n'] ,
u'㽥' : [u'r'] ,
u'葳' : [u'w'] ,
u'䝵' : [u'b'] ,
u'更' : [u'g'] ,
u'䀆' : [u'j'] ,
u'撉' : [u'd'] ,
u'鬌' : [u'd'] ,
u'栖' : [u'q', u'x'] ,
u'䆛' : [u'c', u'z'] ,
u'颡' : [u's'] ,
u'瀦' : [u'z'] ,
u'㔨' : [u'b'] ,
u'榫' : [u's'] ,
u'谮' : [u'z', u'j'] ,
u'崸' : [u'd'] ,
u'熻' : [u'x'] ,
u'鐾' : [u'b'] ,
u'跃' : [u'y'] ,
u'效' : [u'x'] ,
u'嫍' : [u't'] ,
u'闓' : [u'k'] ,
u'乚' : [u'y'] ,
u'拝' : [u'b'] ,
u'饠' : [u'l'] ,
u'噪' : [u'z'] ,
u'俯' : [u'f'] ,
u'蛵' : [u'x'] ,
u'繺' : [u's'] ,
u'埿' : [u'b', u'n'] ,
u'洃' : [u'h'] ,
u'誂' : [u't', u'd'] ,
u'完' : [u'w'] ,
u'甓' : [u'p'] ,
u'銒' : [u'x', u'j'] ,
u'㘕' : [u'h', u'l'] ,
u'脛' : [u'k', u'j'] ,
u'掜' : [u'y', u'n'] ,
u'帥' : [u's'] ,
u'䲮' : [u'y'] ,
u'昵' : [u'z', u'n'] ,
u'螴' : [u'c'] ,
u'咾' : [u'l'] ,
u'䍇' : [u't'] ,
u'驍' : [u'x'] ,
u'糎' : [u'l'] ,
u'歗' : [u'x'] ,
u'裖' : [u'z'] ,
u'姠' : [u's'] ,
u'獧' : [u'j'] ,
u'郦' : [u'z', u'l'] ,
u'㑩' : [u'l'] ,
u'软' : [u'r'] ,
u'懰' : [u'l'] ,
u'屹' : [u'y'] ,
u'靿' : [u'y'] ,
u'蘈' : [u't'] ,
u'禍' : [u'h'] ,
u'䢗' : [u'q', u'h'] ,
u'洚' : [u'h', u'j'] ,
u'鶥' : [u'm'] ,
u'㰤' : [u'q', u'k'] ,
u'傧' : [u'b'] ,
u'甪' : [u'l'] ,
u'脲' : [u'n'] ,
u'䐴' : [u'g'] ,
u'䯁' : [u'l'] ,
u'汄' : [u'z'] ,
u'鳏' : [u'y', u'g'] ,
u'㽎' : [u'd', u't'] ,
u'发' : [u'b', u'f'] ,
u'瑔' : [u'q'] ,
u'聜' : [u'd'] ,
u'䝞' : [u'h'] ,
u'篡' : [u'c'] ,
u'䫫' : [u'l'] ,
u'潮' : [u'c'] ,
u'㹸' : [u'n'] ,
u'勻' : [u'y'] ,
u'睾' : [u'h', u'g'] | |
IPv6 address on the Internet.
addr = makeIP6InterfaceAddress(nodePrefix, macAddr=interface.macAddress, prefixLen=128)
# Record a global route that directs traffic for the delegated prefix to the node.
self.ip6PrefixRoutes.append(
IPRoute(
dest = nodePrefix,
interface = None,
via = addr.ip
)
)
# Return a single auto-config response containing the node address and delegated prefix.
return [
IP6AutoConfigInfo(
addresses = [ addr ],
delegatedPrefixes = [ nodePrefix ],
defaultGateway = None,
dnsServers = [ ]
)
]
def requestIP4AdvertisedRoutes(self, interface):
routes = []
if self.ip4Subnet != None:
# Add
routes.append(
IPRoute(
dest = self.ip4Subnet,
interface = interface,
via = None
)
)
return routes
def requestIP6AdvertisedRoutes(self, interface):
routes = []
if self.ip6Prefix != None:
# Add a global route for the entire Internet that declares it 'on-link' on the requesting
# node's interface.
routes.append(
IPRoute(
dest = self.ip6Prefix,
interface = interface,
via = None
)
)
# Add prefix routes for each of the nodes on the Internet, directing traffic destined to internal
# networks to the respective gateway. However exclude any routes that point to the node that is
# making the route request.
nodeAddresses = [ a.ip for a in interface.ip6Addresses ]
routes += [ p for p in self.ip6PrefixRoutes if p.via not in nodeAddresses ]
return routes
def summary(self, prefix=''):
s = Network.summary(self, prefix)
if self.ip4Subnet:
s += '%s IPv4 Subnet: %s\n' % (prefix, self.ip4Subnet)
if self.ip6Prefix:
s += '%s IPv6 Prefix: %s\n' % (prefix, self.ip6Prefix)
return s
class WiFiNetwork(Network):
def __init__(self, name):
Network.__init__(self, name)
def requestIP4AutoConfig(self, interface):
if interface.network != self:
raise ConfigException('Attempt to auto-configure IPv4 interface %s which is not on network %s' % (interface.ifName, self.name))
# For each node attached to the network...
for i in self.attachedInterfaces:
# Skip the node if it is the same one that is requesting auto-config.
if i.node == interface.node:
continue
# Request auto-config information from the current node. If successful,
# return the info to the caller.
autoConfig = i.node.requestIP4AutoConfig(interface)
if autoConfig:
return autoConfig
return None
def requestIP6AutoConfig(self, interface):
if interface.network != self:
raise ConfigException('Attempt to auto-configure IPv6 interface %s which is not on network %s' % (interface.ifName, self.name))
autoConfigs = []
# For each node attached to the network...
for i in self.attachedInterfaces:
# Skip the node if it is the same one that is requesting auto-config.
if i.node == interface.node:
continue
# Request auto-config information from the current node. If successful, add the result
# to the information collected from other nodes on the network.
autoConfig = i.node.requestIP6AutoConfig(interface)
if autoConfig:
autoConfigs.append(autoConfig)
return autoConfigs
def requestIP4AdvertisedRoutes(self, interface):
routes = []
# TODO: implement this
return routes
def requestIP6AdvertisedRoutes(self, interface):
routes = []
# TODO: implement this
return routes
class ThreadNetwork(Network):
def __init__(self, name, meshLocalPrefix):
Network.__init__(self, name)
self.meshLocalPrefix = toIP6Prefix(meshLocalPrefix, 'Unable to initialize thread network %s: ' % name)
if not self.meshLocalPrefix or self.meshLocalPrefix.prefixlen != 64:
raise ConfigException('Invalid mesh local prefix specified for network %s' % name)
self.ip4Supported = False
self.usesMAC64 = True
def requestIP6AutoConfig(self, interface):
if interface.network != self:
raise ConfigException('Attempt to auto-configure IPv6 interface %s which is not on network %s' % (interface.ifName, self.name))
# Form an /64 interface address for the node with an IID derived from the node id. This is the
# node's mesh-local address.
addr = makeIP6InterfaceAddress(self.meshLocalPrefix, macAddr=interface.macAddress, prefixLen=64, macIs64Bit=True)
# Return a single auto-config response containing the node address.
return [
IP6AutoConfigInfo(
addresses = [ addr ],
delegatedPrefixes = [ ],
defaultGateway= None,
dnsServers = [ ]
)
]
def summary(self, prefix=''):
s = Network.summary(self, prefix)
s += '%s Mesh Local Prefix: %s\n' % (prefix, self.meshLocalPrefix)
return s
class LegacyThreadNetwork(Network):
def __init__(self, name):
Network.__init__(self, name)
self.ip4Supported = False
self.usesMAC64 = True
class CellularNetwork(WiFiNetwork):
pass
class HostNetwork(Network):
def __init__(self, name):
Network.__init__(self, name)
def buildNetwork(self):
# Do nothing.
pass
def clearNetwork(self):
# Do nothing.
pass
def summary(self, prefix=''):
# Don't show host networks. They only exist to give host interface objects
# something to point at.
return ''
#===============================================================================
# Node Classes
#===============================================================================
class Node():
def __init__(self, name, isHostNode=False):
self.name = name
self.isHostNode = isHostNode
if isHostNode:
self.nsName = None
else:
self.nsName = namePrefix + name
self.nodeIndex = len(nodes) + 1 # NOTE: nodeIndexes must be > 0
self.nextInterfaceIndex = 1
self.configState = "incomplete"
self.interfaces = []
self.ip4Enabled = True
self.ip4DefaultGateway = None
self.ip4DefaultInterface = None
self.ip4Routes = []
self.ip4DNSServers = None
self.ip6Enabled = True
self.ip6DefaultGateway = None
self.ip6DefaultInterface = None
self.ip6Routes = []
self.ip6DNSServers= None
self.useTapInterfaces = False
self.hostAliases = []
# Add the new node to the list of nodes associated with the active simnet object.
nodes.append(self)
def addInterface(self, network, name):
# If a network name was specified (verses an actual Network object), find the associated Network object.
if isinstance(network, str):
network = Network.findNetwork(network, 'Unable to add interface to node %s: ' % self.name)
existingInterface = None
# If the node is implemented by the host and an network has NOT been specified...
if self.isHostNode and network == None:
# If the specified interface name is 'host-default', substitute in the name of the host's
# default interface, as determined by examining the host's default IPv4 route.
if name == 'host-default':
name = next((r['dev'] for r in getHostRoutes() if r['dest'] == 'default'), None)
if not name:
raise ConfigException('Unable to determine host default interface')
# Search the list of existing host interfaces for one that matches the specified
# interface name. If a match is found then this interface will be implemented by the
# existing host interface, rather than being a virtual interface.
existingInterface = next((i for i in getHostInterfaces() if i['dev'] == name), None)
# Construct the appropriate type of interface.
if existingInterface:
i = ExistingHostInterface(self, existingInterface)
elif self.useTapInterfaces:
i = TapInterface(self, network, name)
else:
i = VirtualInterface(self, network, name)
return i
def configureAddresses(self):
if self.configState == "complete":
return
if self.configState == "inprogress":
raise ConfigException('Loop detected in automatic address assignment')
self.configState = "inprogress"
self._configureAddresses()
self.configState = "complete"
def _configureAddresses(self):
# Perform configuration for each interface...
for i in self.interfaces:
# If IPv4 is enabled for the interface and the attached network supports IPv4...
if i.ip4Enabled and i.network.ip4Supported:
# If IPv4 automatic configuration is enabled...
if i.autoConfigIP4:
# If an IPv4 address has not been assigned query the associated network for
# IPv4 auto-config information and configure the interface to used the returned
# address. Fail if unsuccessful.
if i.ip4Address == None:
i.ip4Address = i.requestIP4AutoConfig().address
if i.ip4Address == None:
raise ConfigException('Unable to automatically assign IPv4 address to node %s, interface %s' % (self.name, i.ifName))
# Auto configure the IPv4 default gateway if necessary.
if self.ip4DefaultGateway == None:
self.ip4DefaultGateway = i.requestIP4AutoConfig().defaultGateway
self.ip4DefaultInterface = i
# Auto configure the IPv4 DNS servers if necessary.
if self.ip4DNSServers == None:
self.ip4DNSServers = i.requestIP4AutoConfig().dnsServers
# If the assigned IPv4 interface address is a network address (i.e. the node-specific
# bits of the address are all zeros), then assign a host interface address within
# specified network based on the node's index.
if i.ip4Address != None and isNetworkAddress(i.ip4Address):
i.ip4Address = makeIP4IntefaceAddress(i.ip4Address.network, self.nodeIndex)
# If IPv6 is enabled for the interface and the attached network supports IPv6...
if i.ip6Enabled and i.network.ip6Supported:
# Form the interface's link-local address if it hasn't already been specified.
if i.ip6LinkLocalAddress == None:
i.ip6LinkLocalAddress = makeIP6InterfaceAddress(llaPrefix, macAddr=i.macAddress, macIs64Bit=i.usesMAC64, prefixLen=64)
if i.ip6LinkLocalAddress not in i.ip6Addresses:
i.ip6Addresses = [ i.ip6LinkLocalAddress ] + i.ip6Addresses
# If IPv6 automatic configuration is enabled...
if i.autoConfigIP6:
# Query the associated network for IPv6 auto-config information. Delegate to the node subclass
# to filter the responses. For each remaining response...
for autoConfig in self.filterIP6AutoConfig(i, i.requestIP6AutoConfig(i)):
# Add the auto-config addresses to the list of addresses for the | |
<reponame>nakedible/vpnease-l2tp
# orm/dependency.py
# Copyright (C) 2005, 2006, 2007 <NAME> <EMAIL>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Bridge the ``PropertyLoader`` (i.e. a ``relation()``) and the
``UOWTransaction`` together to allow processing of scalar- and
list-based dependencies at flush time.
"""
from sqlalchemy.orm import sync
from sqlalchemy.orm.sync import ONETOMANY,MANYTOONE,MANYTOMANY
from sqlalchemy import sql, util, exceptions
from sqlalchemy.orm import session as sessionlib
def create_dependency_processor(prop):
types = {
ONETOMANY : OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY : ManyToManyDP,
}
if prop.association is not None:
return AssociationDP(prop)
else:
return types[prop.direction](prop)
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.association = prop.association
self.secondary = prop.secondary
self.direction = prop.direction
self.is_backref = prop.is_backref
self.post_update = prop.post_update
self.foreign_keys = prop.foreign_keys
self.passive_deletes = prop.passive_deletes
self.enable_typechecks = prop.enable_typechecks
self.key = prop.key
self._compile_synchronizers()
def _get_instrumented_attribute(self):
"""Return the ``InstrumentedAttribute`` handled by this
``DependencyProecssor``.
"""
return getattr(self.parent.class_, self.key)
def register_dependencies(self, uowcommit):
"""Tell a ``UOWTransaction`` what mappers are dependent on
which, with regards to the two or three mappers handled by
this ``PropertyLoader``.
Also register itself as a *processor* for one of its mappers,
which will be executed after that mapper's objects have been
saved or before they've been deleted. The process operation
manages attributes and dependent operations upon the objects
of one of the involved mappers.
"""
raise NotImplementedError()
def whose_dependent_on_who(self, obj1, obj2):
"""Given an object pair assuming `obj2` is a child of `obj1`,
return a tuple with the dependent object second, or None if
they are equal.
Used by objectstore's object-level topological sort (i.e. cyclical
table dependency).
"""
if obj1 is obj2:
return None
elif self.direction == ONETOMANY:
return (obj1, obj2)
else:
return (obj2, obj1)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
"""This method is called during a flush operation to
synchronize data between a parent and child object.
It is called within the context of the various mappers and
sometimes individual objects sorted according to their
insert/update/delete order (topological sort).
"""
raise NotImplementedError()
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
"""Used before the flushes' topological sort to traverse
through related objects and ensure every instance which will
require save/update/delete is properly added to the
UOWTransaction.
"""
raise NotImplementedError()
def _verify_canload(self, child):
if not self.enable_typechecks:
return
if child is not None and not self.mapper.canload(child):
raise exceptions.FlushError("Attempting to flush an item of type %s on collection '%s', which is handled by mapper '%s' and does not load items of that type. Did you mean to use a polymorphic mapper for this relationship ? Set 'enable_typechecks=False' on the relation() to disable this exception. Mismatched typeloading may cause bi-directional relationships (backrefs) to not function properly." % (child.__class__, self.prop, self.mapper))
def _synchronize(self, obj, child, associationrow, clearkeys, uowcommit):
"""Called during a flush to synchronize primary key identifier
values between a parent/child object, as well as to an
associationrow in the case of many-to-many.
"""
raise NotImplementedError()
def _compile_synchronizers(self):
"""Assemble a list of *synchronization rules*, which are
instructions on how to populate the objects on each side of a
relationship. This is done when a ``DependencyProcessor`` is
first initialized.
The list of rules is used within commits by the ``_synchronize()``
method when dependent objects are processed.
"""
self.syncrules = sync.ClauseSynchronizer(self.parent, self.mapper, self.direction)
if self.direction == sync.MANYTOMANY:
self.syncrules.compile(self.prop.primaryjoin, issecondary=False, foreign_keys=self.foreign_keys)
self.syncrules.compile(self.prop.secondaryjoin, issecondary=True, foreign_keys=self.foreign_keys)
else:
self.syncrules.compile(self.prop.primaryjoin, foreign_keys=self.foreign_keys)
def get_object_dependencies(self, obj, uowcommit, passive = True):
"""Return the list of objects that are dependent on the given
object, as according to the relationship this dependency
processor represents.
"""
return sessionlib.attribute_manager.get_history(obj, self.key, passive = passive)
def _conditional_post_update(self, obj, uowcommit, related):
"""Execute a post_update call.
For relations that contain the post_update flag, an additional
``UPDATE`` statement may be associated after an ``INSERT`` or
before a ``DELETE`` in order to resolve circular row
dependencies.
This method will check for the post_update flag being set on a
particular relationship, and given a target object and list of
one or more related objects, and execute the ``UPDATE`` if the
given related object list contains ``INSERT``s or ``DELETE``s.
"""
if obj is not None and self.post_update:
for x in related:
if x is not None:
uowcommit.register_object(obj, postupdate=True, post_update_cols=self.syncrules.dest_columns())
break
class OneToManyDP(DependencyProcessor):
def register_dependencies(self, uowcommit):
if self.post_update:
if not self.is_backref:
stub = MapperStub(self.parent, self.mapper, self.key)
uowcommit.register_dependency(self.mapper, stub)
uowcommit.register_dependency(self.parent, stub)
uowcommit.register_processor(stub, self, self.parent)
else:
uowcommit.register_dependency(self.parent, self.mapper)
uowcommit.register_processor(self.parent, self, self.parent)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if delete:
# head object is being deleted, and we manage its list of child objects
# the child objects have to have their foreign key to the parent set to NULL
if not self.cascade.delete_orphan or self.post_update:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=self.passive_deletes)
if childlist is not None:
for child in childlist.deleted_items():
if child is not None and childlist.hasparent(child) is False:
self._synchronize(obj, child, None, True, uowcommit)
self._conditional_post_update(child, uowcommit, [obj])
for child in childlist.unchanged_items():
if child is not None:
self._synchronize(obj, child, None, True, uowcommit)
self._conditional_post_update(child, uowcommit, [obj])
else:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=True)
if childlist is not None:
for child in childlist.added_items():
self._synchronize(obj, child, None, False, uowcommit)
self._conditional_post_update(child, uowcommit, [obj])
for child in childlist.deleted_items():
if not self.cascade.delete_orphan and not self._get_instrumented_attribute().hasparent(child):
self._synchronize(obj, child, None, True, uowcommit)
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " preprocess_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if delete:
# head object is being deleted, and we manage its list of child objects
# the child objects have to have their foreign key to the parent set to NULL
if self.post_update:
pass
elif self.cascade.delete_orphan:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=self.passive_deletes)
if childlist is not None:
for child in childlist.deleted_items():
if child is not None and childlist.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(c, isdelete=True)
for child in childlist.unchanged_items():
if child is not None:
uowcommit.register_object(child, isdelete=True)
for c in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(c, isdelete=True)
else:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=self.passive_deletes)
if childlist is not None:
for child in childlist.deleted_items():
if child is not None and childlist.hasparent(child) is False:
uowcommit.register_object(child)
for child in childlist.unchanged_items():
if child is not None:
uowcommit.register_object(child)
else:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=True)
if childlist is not None:
for child in childlist.added_items():
if child is not None:
uowcommit.register_object(child)
for child in childlist.deleted_items():
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False)
elif childlist.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(c, isdelete=True)
def _synchronize(self, obj, child, associationrow, clearkeys, uowcommit):
source = obj
dest = child
if dest is None or (not self.post_update and uowcommit.is_deleted(dest)):
return
self._verify_canload(child)
self.syncrules.execute(source, dest, obj, child, clearkeys)
class ManyToOneDP(DependencyProcessor):
def register_dependencies(self, uowcommit):
if self.post_update:
if not self.is_backref:
stub = MapperStub(self.parent, self.mapper, self.key)
uowcommit.register_dependency(self.mapper, stub)
uowcommit.register_dependency(self.parent, stub)
uowcommit.register_processor(stub, self, self.parent)
else:
uowcommit.register_dependency(self.mapper, self.parent)
uowcommit.register_processor(self.mapper, self, self.parent)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if delete:
if self.post_update and not self.cascade.delete_orphan:
# post_update means we have to update our row to not reference the child object
# before we can DELETE the row
for obj in deplist:
self._synchronize(obj, None, None, True, uowcommit)
childlist = self.get_object_dependencies(obj, uowcommit, passive=self.passive_deletes)
if childlist is not None:
self._conditional_post_update(obj, uowcommit, childlist.deleted_items() + childlist.unchanged_items() + childlist.added_items())
else:
for obj in deplist:
childlist = self.get_object_dependencies(obj, uowcommit, passive=True)
if childlist is not None:
for child in childlist.added_items():
self._synchronize(obj, child, None, False, uowcommit)
self._conditional_post_update(obj, uowcommit, childlist.deleted_items() + childlist.unchanged_items() + childlist.added_items())
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " PRE process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if self.post_update:
return
if | |
<reponame>ronichoudhury-work/nci-nanoparticles-vm
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Ansible's module magic requires this to be
# 'from ansible.module_utils.basic import *' otherwise it will error out. See:
# https://github.com/ansible/ansible/blob/v1.9.4-1/lib/ansible/module_common.py#L41-L59
# For more information on this magic. For now we noqa to prevent flake8 errors
from ansible.module_utils.basic import * # noqa
from inspect import getmembers, ismethod, getargspec
try:
from girder_client import GirderClient, AuthenticationError, HttpError
HAS_GIRDER_CLIENT = True
except ImportError:
HAS_GIRDER_CLIENT = False
DOCUMENTATION = '''
---
module: girder
author: "<NAME> (<EMAIL>)
version_added: "0.1"
short_description: A module that wraps girder_client
requirements: [ girder_client==1.1.0 ]
description:
- Manage a girder instance using the RESTful API
options:
host:
required: false
default: 'localhost'
description:
- domain or IP of the host running girder
port:
required: false
default: '80' for http, '443' for https
description:
- port the girder instance is running on
apiRoot:
required: false
default: '/api/v1'
description:
- path on server corresponding to the root of Girder REST API
scheme:
required: false
default: 'http'
description:
- A string containing the scheme for the Girder host
dryrun:
required: false
default: None (passed through)
description:
- See GirderClient.__init__()
blacklist:
required: false
default: None (passed through)
description:
- See GirderClient.__init__()
username:
required: true
description:
- Valid username for the system
- Required with password
- must be specified if 'token' is not specified
- (See note on 'user')
password:
required: true
description:
- Valid password for the system
- Required with username
- must be specified if 'token' is not specified
- (See note on 'user')
token:
required: true
description:
- A girder client token
- Can be retrieved by accessing the accessing the 'token' attribute
from a successfully authenticated call to girder in a previous
task.
- Required if 'username' and 'password' are not specified
- (See note on 'user')
state:
required: false
default: "present"
choices: ["present", "absent"]
description:
- Used to indicate the presence or absence of a resource
- e.g., user, plugin, assetstore
user:
required: false
description:
- If using the 'user' task, you are NOT REQUIRED to pass in a
'username' & 'password', or a 'token' attributes. This is because
the first user created on an fresh install of girder is
automatically made an administrative user. Once you are certain
you have an admin user you should use those credentials in all
subsequent tasks that use the 'user' task.
- Takes a mapping of key value pairs
options:
login:
required: true
description:
- The login name of the user
password:
required: true
description:
- The password of the user
firstName:
required: false
default: pass through to girder client
description:
- The first name of the user
lastName:
required: false
default: pass through to girder client
description:
- The last name of the user
email:
required: false
default: pass through to girder client
description:
- The email of the user
admin:
required: false
default: false
description:
- If true, make the user an administrator.
plugin:
required: false
description:
- Specify what plugins should be activated (state: present)
or deactivated (state: absent).
- Takes a list of plugin names, incorrect names are silently
ignored
assetstore:
required: false
description:
- Specifies an assetstore
- Takes many options depending on 'type'
options:
name:
required: true
description:
- Name of the assetstore
type:
required: true
choices: ['filesystem', 'gridfs', 's3', 'hdfs']
description:
- Currently only 'filesystem' has been tested
readOnly:
required: false
default: false
description:
- Should the assetstore be read only?
current:
required: false
default: false
description:
- Should the assetstore be set as the current
assetstore?
options (filesystem):
root:
required: true
description:
- Filesystem path to the assetstore
options (gridfs) (EXPERIMENTAL):
db:
required: true
description:
- database name
mongohost:
required: true
description:
- Mongo host URI
replicaset:
required: false
default: ''
description:
- Replica set name
options (s3) (EXPERIMENTAL):
bucket:
required: true
description:
- The S3 bucket to store data in
prefix:
required: true
description:
- Optional path prefix within the bucket under which
files will be stored
accessKeyId:
required: true
description:
- the AWS access key ID to use for authentication
secret:
required: true
description:
- the AWS secret key to use for authentication
service:
required: false
default: s3.amazonaws.com
description:
- The S3 service host (for S3 type)
- This can be used to specify a protocol and port
- use the form [http[s]://](host domain)[:(port)]
- Do not include the bucket name here
options (hdfs) (EXPERIMENTAL):
host:
required: true
description:
- None
port:
required: true
description:
- None
path:
required: true
description:
- None
user:
required: true
description:
- None
webHdfsPort
required: true
description:
- None
'''
EXAMPLES = '''
#############
# Example using 'user'
###
# Ensure "admin" user exists
- name: Create 'admin' User
girder:
user:
firstName: "Chris"
lastName: "Kotfila"
login: "admin"
password: "<PASSWORD>"
email: "<EMAIL>"
admin: yes
state: present
# Ensure a 'foobar' user exists
- name: Create 'foobar' User
girder:
username: "admin"
password: "<PASSWORD>"
user:
firstName: "Foo"
lastName: "Bar"
login: "foobar"
password: "<PASSWORD>"
email: "<EMAIL>"
admin: yes
state: present
# Remove the 'foobar' user
- name: Remove 'foobar' User
username: "admin"
password: "<PASSWORD>"
girder:
user:
login: "foobar"
password: "<PASSWORD>"
state: absent
#############
# Example using 'plugins'
###
# To enable or disable all plugins you may pass the "*"
# argument. This does not (yet) support arbitrary regexes
- name: Disable all plugins
girder:
username: "admin"
password: "<PASSWORD>"
plugins: "*"
state: absent
- name: Enable thumbnails plugin
girder:
username: "admin"
password: "<PASSWORD>"
port: 8080
plugins:
- thumbnails
state: present
# Note that 'thumbnails' is still enabled from the previous task,
# the 'plugins' task ensures that plugins are enabled or disabled,
# it does NOT define the complete list of enabled or disabled plugins.
- name: Ensure jobs and gravatar plugins are enabled
girder:
username: "admin"
password: "<PASSWORD>"
plugins:
- jobs
- gravatar
state: present
############
# Filesystem Assetstore Tests
#
- name: Create filesystem assetstore
girder:
username: "admin"
password: "<PASSWORD>"
assetstore:
name: "Temp Filesystem Assetstore"
type: "filesystem"
root: "/data/"
current: true
state: present
- name: Delete filesystem assetstore
girder:
username: "admin"
password: "<PASSWORD>"
assetstore:
name: "Temp Filesystem Assetstore"
type: "filesystem"
root: "/tmp/"
state: absent
############
# Examples using get
#
# Get my info
- name: Get users from http://localhost:80/api/v1/users
girder:
username: 'admin'
password: '<PASSWORD>'
get:
path: "users"
register: ret_val
# Prints debugging messages with the emails of the users
# From the last task by accessing 'gc_return' of the registered
# variable 'ret_val'
- name: print emails of users
debug: msg="{{ item['email'] }}"
with_items: "{{ ret_val['gc_return'] }}"
#############
# Advanced usage
#
# Supports get, post, put, delete methods, but does
# not guarantee idempotence on these methods!
- name: Restart the server
girder:
username: "admin"
password: "<PASSWORD>"
put:
path: "system/restart"
# An example of posting an item to Girder
# Note that this is NOT idempotent. Running
# multiple times will create "An Item", "An Item (1)",
# "An Item (2)", etc..
- name: Get Me
girder:
username: "admin"
password: "<PASSWORD>"
get:
path: "user/me"
register: ret
# Show use of 'token' for subsequent authentication
- name: Get my public folder
girder:
token: "{{ ret['token'] }}"
get:
path: "folder"
parameters:
parentType: "user"
parentId: "{{ ret['gc_return']['_id'] }}"
text: "Public"
register: ret
- name: Post an item to my public folder
girder:
host: "data.kitware.com"
scheme: 'https'
token: "{{ ret['token'] }}"
post:
path: "item"
parameters:
folderId: "{{ ret['gc_return'][0]['_id'] }}"
name: "An Item"
'''
def class_spec(cls, include=None):
include = include if include is not None else []
for fn, method in getmembers(cls, predicate=ismethod):
if fn in include:
spec = getargspec(method)
# spec.args[1:] so we don't include 'self'
params = spec.args[1:]
d = len(spec.defaults) if spec.defaults is not None else 0
r = len(params) - d
yield (fn, {"required": params[:r],
"optional": params[r:]})
class GirderClientModule(GirderClient):
# | |
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Optional, Any
class ShowVersionSchema(MetaParser):
"""Schema for show version"""
schema = {
'os': str,
'version': str,
Optional('platform'): str,
Optional('model'): str,
}
class ShowVersion(ShowVersionSchema):
"""Parser for show version"""
cli_command = [
'show version',
]
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command[0])
ret_dict = {}
# ********************************************
# * ASA *
# ********************************************
# Cisco Adaptive Security Appliance Software Version 9.8(4)10
asa_os_version_pattern = re.compile(r'^Cisco\s+Adaptive Security Appliance Software Version (?P<version>.+)$')
# Hardware: ASAv, 2048 MB RAM, CPU Xeon E5 series 3491 MHz,
# Hardware: ASA5520, 512 MB RAM, CPU Pentium 4 Celeron 2000 MHz
asa_platform_pattern = re.compile(r'^Hardware:\s+(?P<platform>.*), .*, .*$')
# Model Id: ASAv10
asa_model_pattern = re.compile(r'Model\s+Id\:\s+(?P<model>.+)')
# ********************************************
# * GAIA *
# ********************************************
# Product version Check Point Gaia R80.40
gaia_os_version_pattern = re.compile(r'^Product version Check Point Gaia (?P<version>.*)$')
# ********************************************
# * IOSXE *
# ********************************************
# Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500e-UNIVERSALK9-M), Version 03.04.06.SG RELEASE SOFTWARE (fc1)
# Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.07E RELEASE SOFTWARE (fc3)
# Cisco IOS XE Software, Version 17.05.01a
# Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.07E RELEASE SOFTWARE (fc3)
# Cisco IOS Software [Amsterdam], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 17.3.1a, RELEASE SOFTWARE (fc3)
# Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500e-UNIVERSALK9-M), Version 03.03.02.SG RELEASE SOFTWARE (fc1)
# Cisco IOS Software [Bengaluru], ASR1000 Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 17.5.1a, RELEASE SOFTWARE (fc3)
iosxe_os_version_platform_pattern = re.compile(r'^Cisco IOS.*XE Software(?:.*\((?P<platform>[^\-]+).*\))?,(?: Experimental)? Version (?P<version>[\w\.\(\)\:]+).*$')
# cisco WS-C2940-8TT-S (RC32300) processor (revision H0) with 19868K bytes of memory.
# cisco WS-C3650-48PD (MIPS) processor with 4194304K bytes of physical memory.
# cisco C9500-24Y4C (X86) processor with 2900319K/6147K bytes of memory.
# cisco CSR1000V (VXE) processor (revision VXE) with 715705K/3075K bytes of memory.
iosxe_model_pattern = re.compile(r'^[Cc]isco (?P<model>\S+) \(.*\).* with \S+ bytes of(?: physical)? memory.$')
# Cisco IOS-XE software, Copyright (c) 2005-2017 by cisco Systems, Inc.
iosxe_backup_os_pattern = re.compile(r'^[Cc]isco IOS(?: |-)XE [Ss]oftware.*$')
# Switch Ports Model SW Version SW Image Mode
# ------ ----- ----- ---------- ---------- ----
# * 1 41 C9300-24P 17.07.01 CAT9K_IOSXE INSTALL
iosxe_backup_model_version_pattern = re.compile(r'^\*?\s*\d+\s+\d+\s+(?P<model>[\w\-]+)\s+(?P<version>[\w\-\.]+)\s+\w+\s+\w+$')
# Model Number : C9300-24P
iosxe_backup_model_pattern = re.compile(r'^Model\s+Number\s+\:\s+(?P<model>.+)$')
# ********************************************
# * IOSXR *
# ********************************************
# Cisco IOS XR Software, Version 6.1.4.10I[Default]
# Cisco IOS XR Software, Version 6.2.1.23I[Default]
# Cisco IOS XR Software, Version 6.3.1.15I
# Cisco IOS XR Software, Version 6.4.2[Default]
iosxr_os_version_pattern = re.compile(r'^Cisco IOS XR Software, Version (?P<version>[\w\.]+).*$')
# cisco ASR9K Series (Intel 686 F6M14S4) processor with 6291456K bytes of memory.
# cisco IOS XRv Series (Pentium Celeron Stepping 3) processor with 4193911K bytes of memory.
# cisco IOS-XRv 9000 () processor
# cisco CRS-16/S-B (Intel 686 F6M14S4) processor with 12582912K bytes of memory.
iosxr_platform_pattern = re.compile(r'^^cisco (?P<platform>\S+|IOS(?: |-)XRv ?\d*)(?: Series)? \(.*\) processor.*$')
# ********************************************
# * IOS *
# ********************************************
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 15.2(2)E8, RELEASE SOFTWARE (fc1)
# IOS (tm) C2940 Software (C2940-I6K2L2Q4-M), Version 12.1(22)EA12, RELEASE SOFTWARE (fc1)
# Cisco IOS Software, C2960X Software (C2960X-UNIVERSALK9-M), Version 15.2(2)E7, RELEASE SOFTWARE (fc3)
# Cisco IOS Software, 901 Software (ASR901-UNIVERSALK9-M), Version 15.6(2)SP4, RELEASE SOFTWARE (fc3)
# Cisco IOS Software [Bengaluru], ASR1000 Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 17.5.1a, RELEASE SOFTWARE (fc3)
ios_os_version_platform_pattern = re.compile(r'^(?!.*XE Software.*)(Cisco IOS Software|IOS \(\S+\))(?: \[.*\])?,?\s*(?P<alternate_platform>.+)?\s+Software \((?P<platform>[^\-]+).*\),(?: Experimental)? Version (?P<version>[\w\.\:\(\)]+),?.*$')
# Cisco CISCO1941/K9 (revision 1.0) with 491520K/32768K bytes of memory.
ios_model_pattern = re.compile(r'^[Cc]isco (?P<model>\S+) \(.*\).* with \S+ bytes of(?: physical)? memory.$')
# ********************************************
# * JUNOS *
# ********************************************
# Junos: 18.2R2-S1
junos_os_version_pattern = re.compile(r'^Junos: (?P<version>\S+)$')
# Model: ex4200-24p
junos_model_pattern = re.compile(r'^Model: (?P<model>\S+)$')
# ********************************************
# * NXOS *
# ********************************************
# Cisco Nexus Operating System (NX-OS) Software
nxos_os_pattern = re.compile(r'^.*Nexus Operating System.*$')
# system: version 6.0(2)U6(10)
# NXOS: version 9.3(6uu)I9(1uu) [build 9.3(6)]
nxos_version_pattern = re.compile(r'^(?:system|NXOS):\s+version (?P<version>\S+)(?: \[build (?P<build>.*)\])?$')
# cisco Nexus 3048 Chassis ("48x1GE + 4x10G Supervisor")
# cisco Nexus9000 C9396PX Chassis
nxos_platform_and_model_pattern = re.compile(r'^cisco (?P<platform>Nexus\s?[\d]+) ?(?P<model>\S+)? Chassis.*$')
# ********************************************
# * VIPTELLA *
# ********************************************
# 15.3.3
viptella_os_pattern = re.compile(r'^(?P<version>\d+(?:\.\d+)?(?:\.\d+)?)$')
for line in output.splitlines():
line = line.strip()
# ********************************************
# * ASA *
# ********************************************
# Cisco Adaptive Security Appliance Software Version 9.8(4)10
m = asa_os_version_pattern.match(line)
if m:
ret_dict['os'] = 'asa'
ret_dict['version'] = m.groupdict()['version']
continue
# Hardware: ASAv, 2048 MB RAM, CPU Xeon E5 series 3491 MHz,
# Hardware: ASA5520, 512 MB RAM, CPU Pentium 4 Celeron 2000 MHz
m = asa_platform_pattern.match(line)
if m:
ret_dict['platform'] = m.groupdict()['platform']
continue
# Model Id: ASAv10
m = asa_model_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model'].lower()
continue
# ********************************************
# * GAIA *
# ********************************************
# Product version Check Point Gaia R80.40
m = gaia_os_version_pattern.match(line)
if m:
ret_dict['os'] = 'gaia'
ret_dict['version'] = m.groupdict()['version']
continue
# ********************************************
# * IOSXE *
# ********************************************
# Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500e-UNIVERSALK9-M), Version 03.04.06.SG RELEASE SOFTWARE (fc1)
# Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.07E RELEASE SOFTWARE (fc3)
# Cisco IOS XE Software, Version 17.05.01a
# Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.07E RELEASE SOFTWARE (fc3)
# Cisco IOS Software [Amsterdam], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 17.3.1a, RELEASE SOFTWARE (fc3)
m = iosxe_os_version_platform_pattern.match(line)
if m:
ret_dict['os'] = 'iosxe'
group = m.groupdict()
if group['platform']:
ret_dict['platform'] = group['platform'].lower()
ret_dict['version'] = group['version']
continue
# cisco WS-C2940-8TT-S (RC32300) processor (revision H0) with 19868K bytes of memory.
# cisco WS-C3650-48PD (MIPS) processor with 4194304K bytes of physical memory.
# cisco C9500-24Y4C (X86) processor with 2900319K/6147K bytes of memory.
m = iosxe_model_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model']
continue
# Cisco IOS-XE software, Copyright (c) 2005-2017 by cisco Systems, Inc.
# Cisco IOS XE software, Copyright (c) 2005-2017 by cisco Systems, Inc.
m = iosxe_backup_os_pattern.match(line)
if m:
ret_dict['os'] = 'iosxe'
continue
# * 1 41 C9300-24P 17.07.01 CAT9K_IOSXE INSTALL
m = iosxe_backup_model_version_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model']
ret_dict['version'] = m.groupdict()['version']
continue
# Model Number : C9300-24P
m = iosxe_backup_model_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model']
continue
# ********************************************
# * IOSXR *
# ********************************************
# Cisco IOS XR Software, Version 6.1.4.10I[Default]
# Cisco IOS XR Software, Version 6.2.1.23I[Default]
# Cisco IOS XR Software, Version 6.3.1.15I
# Cisco IOS XR Software, Version 6.4.2[Default]
m = iosxr_os_version_pattern.match(line)
if m:
ret_dict['os'] = 'iosxr'
ret_dict['version'] = m.groupdict()['version']
continue
# cisco ASR9K Series (Intel 686 F6M14S4) processor with 6291456K bytes of memory.
# cisco IOS XRv Series (Pentium Celeron Stepping 3) processor with 4193911K bytes of memory.
# cisco IOS-XRv 9000 () processor
# cisco CRS-16/S-B (Intel 686 F6M14S4) processor with 12582912K bytes of memory.
m = iosxr_platform_pattern.match(line)
if m:
ret_dict['platform'] = m.groupdict()['platform'].lower()
ret_dict['platform'] = \
re.sub(r'\s|\-', r'', m.groupdict()['platform'].lower())
continue
# ********************************************
# * IOS *
# ********************************************
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 15.2(2)E8, RELEASE SOFTWARE (fc1)
# IOS (tm) C2940 Software (C2940-I6K2L2Q4-M), Version 12.1(22)EA12, RELEASE SOFTWARE (fc1)
# Cisco IOS Software, C2960X Software (C2960X-UNIVERSALK9-M), Version 15.2(2)E7, RELEASE SOFTWARE (fc3)
# Cisco IOS Software, 901 Software (ASR901-UNIVERSALK9-M), Version 15.6(2)SP4, RELEASE SOFTWARE (fc3)
# Cisco IOS Software [Bengaluru], ASR1000 Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 17.5.1a, RELEASE SOFTWARE (fc3)
m = ios_os_version_platform_pattern.match(line)
if m:
group = m.groupdict()
ret_dict['version'] = group['version']
if ret_dict.get('os', None) is None:
ret_dict['os'] = 'ios'
# Clean up platform a bit before adding to ret_dict
if group['platform'].lower().startswith('x86_64_linux') \
and group['alternate_platform']:
group['platform'] = group['alternate_platform']
ret_dict['platform'] = \
re.sub(r'\_(ios).*', r'', group['platform'].lower())
ret_dict['platform'] = \
re.sub(r'cat(\d)\d{3}', r'cat\1k', ret_dict['platform'])
continue
# Cisco CISCO1941/K9 (revision 1.0) with 491520K/32768K bytes of memory.
m = ios_model_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model']
continue
# ********************************************
# * JUNOS *
# ********************************************
# Junos: 18.2R2-S1
m = junos_os_version_pattern.match(line)
if m:
ret_dict['os'] = 'junos'
ret_dict['version'] = m.groupdict()['version']
continue
# Model: ex4200-24p
m = junos_model_pattern.match(line)
if m:
ret_dict['model'] = m.groupdict()['model'].lower()
continue
# ********************************************
# * NXOS *
# ********************************************
# Cisco Nexus Operating System (NX-OS) Software
m = nxos_os_pattern.match(line)
if m:
ret_dict['os'] = 'nxos'
continue
# system: version 6.0(2)U6(10)
# NXOS: version 9.3(6uu)I9(1uu) [build 9.3(6)]
m | |
import matplotlib.image as mpimg
import numpy as np
import cv2
from skimage.feature import hog
from scipy.ndimage.measurements import label
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
# FIXED, AFTER imread() of PNGs: NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, file_type = 'jpg', color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
file_features = []
# Read in each one by one
image = mpimg.imread(file)
# FIX for "NEED TO CHANGE bins_range if reading .png files with mpimg!"
if file_type == 'png':
image = (image * 255).astype(np.uint8)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Define a function that takes
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = 1280
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = 720
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
return np.concatenate(img_features)
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
#img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 | |
# from keras.models import load_model
#Keras import
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, MaxPooling3D, UpSampling3D, Conv3D, Conv2DTranspose, Conv1D, UpSampling1D
from keras.layers import Activation, Dropout, Flatten, Dense, Input, Reshape, BatchNormalization
# from keras.layers import Conv3DTranspose as Deconvolution3D
from keras.layers import Deconvolution3D
from keras.optimizers import SGD
from keras import regularizers
from keras.layers import Input, LSTM, RepeatVector, Concatenate,Lambda
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU
import keras
#Other imports
from trainer.losses import ROI_mean_squared_error_loss,ROI_diff_mse_joint_loss,ROI_diff_temporal_loss,ROI_diff_mse_joint_loss
def diff_ROI_C3D_AE_no_pool(img_width, img_height, win_length, regularizer_list = [],channels=1,lambda_S=1,lambda_T=1,d_type=None):
"""
diff-ROI-3DCAE model
"""
def multiply(x):
image,mask = x #could be K.stack([mask]*3, axis=-1) too
return mask*image
input_shape = (win_length, img_width, img_height, channels)
input_diff_shape = (win_length-1, img_width, img_height, channels)
input_window = Input(shape = input_shape)
input_mask=Input(shape = input_shape)
input_diff_mask=Input(shape = input_diff_shape)
temp_pool = 2
temp_depth = 5
x = Conv3D(16, (5, 3,3), activation='relu', padding='same')(input_window)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (1,2,2), padding='same')(x)
if 'Dropout' in regularizer_list:
x = Dropout(0.25)(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
encoded = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
x = Deconvolution3D(8, (temp_depth, 3, 3),strides = (2,2,2), activation='relu', padding='same')(encoded)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
#Double all the dimensions if win_length is even other wise double the H and W, change T to 2*T-1
if win_length%2==0:
x = Deconvolution3D(8, (temp_depth, 3, 3), strides = (2,2,2), activation='relu', padding='same')(x)
else:
#Deconvolution formula for valid padding
#out=stride*input-stride+kernel_size
input_temporal_size=int((win_length+1)/2)
# print(input_temporal_size)
x = Deconvolution3D(8, (input_temporal_size, 2, 2), strides = (1,2,2), activation='relu', padding='valid')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Deconvolution3D(16, (temp_depth, 3, 3), strides = (1,2,2), activation='relu', padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
layer_name='decoded'
if d_type!=None:
layer_name=d_type+'_'+layer_name
decoded = Conv3D(channels, (5, 3, 3), activation='tanh', padding='same', name = layer_name)(x)
#Model_name
model_name="R"
if d_type!=None:
model_name=d_type+'_AE'
autoencoder = Model(inputs=[input_window,input_mask,input_diff_mask], outputs=decoded,name="R")
autoencoder.compile(optimizer='adadelta', loss=ROI_diff_mse_joint_loss(input_mask,input_diff_mask,lambda_S,lambda_T), \
metrics=[ROI_mean_squared_error_loss(input_mask),ROI_diff_temporal_loss(input_mask,input_diff_mask)])
# autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
model_type = '3Dconv'
model_name = 'diff_ROI_C3DAE_no_pool'
for reg in regularizer_list:
model_name += '-' + reg
model = autoencoder
return model, model_name, model_type
def ROI_C3D_AE_no_pool(img_width, img_height, win_length, regularizer_list = [],channels=1,d_type=None):
"""
ROI-3DCAE model
"""
def multiply(x):
image,mask = x #could be K.stack([mask]*3, axis=-1) too
return mask*image
input_shape = (win_length, img_width, img_height, channels)
input_window = Input(shape = input_shape)
input_mask=Input(shape = input_shape)
temp_pool = 2
temp_depth = 5
x = Conv3D(16, (5, 3,3), activation='relu', padding='same')(input_window)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (1,2,2), padding='same')(x)
if 'Dropout' in regularizer_list:
x = Dropout(0.25)(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
encoded = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
x = Deconvolution3D(8, (temp_depth, 3, 3),strides = (2,2,2), activation='relu', padding='same')(encoded)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
#Double all the dimensions if win_length is even other wise double the H and W, change T to 2*T-1
if win_length%2==0:
x = Deconvolution3D(8, (temp_depth, 3, 3), strides = (2,2,2), activation='relu', padding='same')(x)
else:
#Deconvolution formula for valid padding
#out=stride*input-stride+kernel_size
input_temporal_size=int((win_length+1)/2)
# print(input_temporal_size)
x = Deconvolution3D(8, (input_temporal_size, 2, 2), strides = (1,2,2), activation='relu', padding='valid')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Deconvolution3D(16, (temp_depth, 3, 3), strides = (1,2,2), activation='relu', padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
#naming layer to be use in stacked
layer_name='decoded'
model_name="R"
if d_type!=None:
model_name=d_type+'_AE'
layer_name=d_type+'_decoded'
decoded = Conv3D(channels, (5, 3, 3), activation='tanh', padding='same', name = layer_name)(x)
autoencoder = Model(inputs=[input_window,input_mask], outputs=decoded,name=model_name)
autoencoder.compile(optimizer='adadelta', loss=ROI_mean_squared_error_loss(input_mask))
model_type = '3Dconv'
model_name = 'ROI_C3DAE-no_pool'
for reg in regularizer_list:
model_name += '-' + reg
model = autoencoder
return model, model_name, model_type
def C3D_AE_no_pool(img_width, img_height, win_length, regularizer_list = [],channels=1):
"""
3DCAE model
"""
input_shape = (win_length, img_width, img_height, channels)
input_window = Input(shape = input_shape)
temp_pool = 2
temp_depth = 5
x = Conv3D(16, (5, 3,3), activation='relu', padding='same')(input_window)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (1,2,2), padding='same')(x)
if 'Dropout' in regularizer_list:
x = Dropout(0.25)(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
x = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
encoded = Conv3D(8, (5, 3,3), activation='relu', strides = (2,2,2),padding='same')(x)
x = Deconvolution3D(8, (temp_depth, 3, 3),strides = (2,2,2), activation='relu', padding='same')(encoded)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
#Double all the dimensions if win_length is even other wise double the H and W, change T to 2*T-1
if win_length%2==0:
x = Deconvolution3D(8, (temp_depth, 3, 3), strides = (2,2,2), activation='relu', padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
else:
#Deconvolution formula for valid padding
#out=stride*input-stride+kernel_size
input_temporal_size=int((win_length+1)/2)
# print(input_temporal_size)
x = Deconvolution3D(8, (input_temporal_size, 2, 2), strides = (1,2,2), activation='relu', padding='valid')(x)
# if 'BN' in regularizer_list:
# x= BatchNormalization()(x)
x = Deconvolution3D(16, (temp_depth, 3, 3), strides = (1,2,2), activation='relu', padding='same')(x)
if 'BN' in regularizer_list:
x= BatchNormalization()(x)
decoded = Conv3D(channels, (5, 3, 3), activation='tanh', padding='same', name = 'decoded')(x)
autoencoder = Model(input_window, decoded,name="R")
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
model_type = '3Dconv'
model_name = 'C3DAE-no_pool'
for reg in regularizer_list:
model_name += '-' + reg
model = autoencoder
return model, model_name, model_type
'''
Deepfall- https://arxiv.org/pdf/1809.00977.pdf
Code-https://github.com/JJN123/Fall-Detection/blob/master/models.py
author- <NAME>, <NAME>
'''
def DSTCAE_C3D(img_width, img_height, win_length):
"""
int win_length: Length of window of frames
"""
input_shape = (win_length, img_width, img_height, 1)
input_window = Input(shape = input_shape)
temp_pool = 2
temp_depth = 5
x = Conv3D(16, (5, 3,3), activation='relu', padding='same')(input_window)
x = MaxPooling3D((1,2, 2), padding='same')(x)
x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
x = MaxPooling3D((temp_pool, 2, 2), padding='same')(x) #4
x = Dropout(0.25)(x)
x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling3D((temp_pool, 2, 2), padding='same')(x) #2
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(encoded)
x = UpSampling3D((temp_pool, 2, 2))(x) #4
x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
x = UpSampling3D((temp_pool, 2, 2))(x) #8
x = Conv3D(16, (5, 3, 3), activation='relu', padding = 'same')(x)
x = UpSampling3D((1, 2, 2))(x)
decoded = Conv3D(1, (5, 3, 3), activation='tanh', padding='same')(x)
autoencoder = Model(input_window, decoded)
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
model_type = 'conv'
model_name = 'DSTCAE_C3D'
encoder = None
decoder = None
model = autoencoder
return model, model_name, model_type
'''
CONVLSTM- http://individual.utoronto.ca/shehroz/files/ARIALIJCAI.pdf
Code-https://github.com/JJN123/Fall-Detection/blob/master/models.py
author- <NAME>, <NAME>
'''
def CLSTM_AE(img_width, img_height, win_len):
"""
from https://github.com/yshean/abnormal-spatiotemporal-ae/blob/master/classifier.py
"""
from keras.models import Model
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from keras.layers.wrappers import TimeDistributed
from keras.layers.core import Activation
from keras.layers import Input
input_tensor = Input(shape=(win_len, img_width, img_height, 1))
conv1 = TimeDistributed(Conv2D(128, kernel_size=(11, 11), padding='same', strides=(4, 4), name='conv1'),
input_shape=(win_len, 224, 224, 1))(input_tensor)
conv1 = TimeDistributed(BatchNormalization())(conv1)
conv1 = TimeDistributed(Activation('relu'))(conv1)
conv2 = TimeDistributed(Conv2D(64, kernel_size=(5, 5), padding='same', strides=(2, 2), name='conv2'))(conv1)
conv2 = TimeDistributed(BatchNormalization())(conv2)
conv2 = TimeDistributed(Activation('relu'))(conv2)
convlstm1 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm1')(conv2)
convlstm2 = ConvLSTM2D(32, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm2')(convlstm1)
convlstm3 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm3')(convlstm2)
deconv1 = TimeDistributed(Conv2DTranspose(128, kernel_size=(5, 5), padding='same', strides=(2, 2), name='deconv1'))(convlstm3)
deconv1 = TimeDistributed(BatchNormalization())(deconv1)
deconv1 = TimeDistributed(Activation('relu'))(deconv1)
decoded = TimeDistributed(Conv2DTranspose(1, kernel_size=(11, 11), padding='same', strides=(4, 4), name='deconv2'))(
deconv1)
model = Model(inputs=input_tensor, outputs=decoded)
model.compile(optimizer='adadelta', loss='mean_squared_error')
model_name = 'CLSTM_AE'
model_type = 'conv'
return model, model_name, model_type
"""
Discriminator Models
"""
def C3D_no_pool(img_width, img_height, win_length, regularizer_list =[],channels=1):
"""
3DCNN nodel without pooling
"""
from keras.layers.normalization import BatchNormalization
input_shape = (win_length, img_width, img_height, channels)
input_window = Input(shape = input_shape)
temp_pool = 2
temp_depth = 5
x = Conv3D(16, (5, 3,3), padding='same', strides = (1,2,2))(input_window)
x = LeakyReLU(alpha=0.2)(x)
x = Conv3D(8, (5, 3, 3), padding='same', strides = (1,2,2))(x)
x = LeakyReLU(alpha=0.2)(x)
if 'BN' in regularizer_list:
x = BatchNormalization()(x)
x = Conv3D(8, (3, 3, 3), padding='same', strides = (1,2,2))(x)
x = LeakyReLU(alpha=0.2)(x)
if 'BN' in regularizer_list:
x = BatchNormalization()(x)
encoded = Flatten()(x)
target_class_likelihood = Dense(1, activation='sigmoid')(encoded)
model = Model(input_window, target_class_likelihood,name="D")
sgd = SGD(lr=0.0002, decay=1e-7, momentum=.5)
model.compile(optimizer=sgd,
loss='binary_crossentropy')
model_type = '3Dconv'
model_name = '3DCNN-no_pool'
for reg in regularizer_list:
model_name += '-' + reg
return model, model_name, model_type
def Fusion_C3D_no_pool(img_width, img_height, win_length, regularizer_list =[],thermal_channels=1,flow_channels=3):
"""
Fusion Discriminator
"""
from keras.layers.normalization import BatchNormalization
#Input shape
thermal_input_shape = (win_length, img_width, img_height, thermal_channels)
flow_input_shape = (win_length-1, img_width, img_height, flow_channels)
temp_pool = 2
temp_depth = 5
#------------------------------------------
#Thermal 3DCNN
#------------------------------------------
thermal_input_window = Input(shape = thermal_input_shape)
thermal_x = Conv3D(16, (5, 3,3), padding='same', subsample = (1,2,2))(thermal_input_window)
thermal_x = LeakyReLU(alpha=0.2)(thermal_x)
thermal_x = Conv3D(8, (5, 3, 3), padding='same', subsample = (1,2,2))(thermal_x)
thermal_x = LeakyReLU(alpha=0.2)(thermal_x)
if 'BN' in regularizer_list:
thermal_x = BatchNormalization()(thermal_x)
thermal_x = Conv3D(8, (3, 3, 3), padding='same', subsample = (1,2,2))(thermal_x)
thermal_x = LeakyReLU(alpha=0.2)(thermal_x)
if 'BN' in regularizer_list:
thermal_x = BatchNormalization()(thermal_x)
thermal_encoded = Flatten()(thermal_x)
#------------------------------------------
#FLow 3DCNN
#------------------------------------------
flow_input_window = Input(shape = flow_input_shape)
flow_x = Conv3D(16, (5, 3,3), padding='same', subsample = (1,2,2))(flow_input_window)
flow_x = LeakyReLU(alpha=0.2)(flow_x)
| |
oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2i)and Wboard.w6e==''\
and board.s3h+board.s4g+board.s5f=='':
moves = '2i6e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2i)and Wboard.w7d==''\
and board.s3h+board.s4g+board.s5f+board.s6e=='':
moves = '2i7d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2i)and Wboard.w8c==''\
and board.s3h+board.s4g+board.s5f+board.s6e+board.s7d=='':
moves = '2i8c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2i)and Wboard.w9b==''\
and board.s3h+board.s4g+board.s5f+board.s6e+board.s7d+board.s8c=='':
moves = '2i9b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w4g==''\
and board.s3h=='':
moves = '2i4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w5f==''\
and board.s3h+board.s4g=='':
moves = '2i5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w6e==''\
and board.s3h+board.s4g+board.s5f=='':
moves = '2i6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w7d==''\
and board.s3h+board.s4g+board.s5f+board.s6e=='':
moves = '2i7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w8c==''\
and board.s3h+board.s4g+board.s5f+board.s6e+board.s7d=='':
moves = '2i8c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2i)and Wboard.w9b==''\
and board.s3h+board.s4g+board.s5f+board.s6e+board.s7d+board.s8c=='':
moves = '2i9b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w3i !='':
if re.match(r'[gk+]',Wboard.w3i)and Wboard.w2i=='':
moves = '3i2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]',Wboard.w3i)and Wboard.w4i=='':
moves = '3i4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]',Wboard.w3i)and Wboard.w3h=='':
moves = '3i3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w3i)and Wboard.w2h=='':
moves = '3i2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w3i)and Wboard.w4h=='':
moves = '3i4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w3i)and Wboard.w2i=='':
moves = '3i2i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w3i)and Wboard.w4i=='':
moves = '3i4i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w3i)and Wboard.w3h=='':
moves = '3i3h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]',Wboard.w3i)and Wboard.w2h=='':
moves = '3i2h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]',Wboard.w3i)and Wboard.w4h=='':
moves = '3i4h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3g==''\
and board.s3h=='':
moves = '3i3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3g==''\
and board.s3h=='':
moves = '3i3g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3f==''\
and board.s3h+board.s3g=='':
moves = '3i3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3f==''\
and board.s3h+board.s3g=='':
moves = '3i3f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3e==''\
and board.s3h+board.s3g+board.s3f=='':
moves = '3i3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3e==''\
and board.s3h+board.s3g+board.s3f=='':
moves = '3i3e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3d==''\
and board.s3h+board.s3g+board.s3f+board.s3e=='':
moves = '3i3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3d==''\
and board.s3h+board.s3g+board.s3f+board.s3e=='':
moves = '3i3d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3c==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d=='':
moves = '3i3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3c==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d=='':
moves = '3i3c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3b==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c=='':
moves = '3i3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3b==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c=='':
moves = '3i3b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w3a==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3i3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w3a==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3i3a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w1i==''\
and board.s2i=='':
moves = '3i1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w1i==''\
and board.s2i=='':
moves = '3i1i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w5i==''\
and board.s4i=='':
moves = '3i5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w5i==''\
and board.s4i=='':
moves = '3i5i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w6i==''\
and board.s4i+board.s5i=='':
moves = '3i6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w6i==''\
and board.s4i+board.s5i=='':
moves = '3i6i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w7i==''\
and board.s4i+board.s5i+board.s6i=='':
moves = '3i7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w7i==''\
and board.s4i+board.s5i+board.s6i=='':
moves = '3i7i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w8i==''\
and board.s4i+board.s5i+board.s6i+board.s7i=='':
moves = '3i8i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w8i==''\
and board.s4i+board.s5i+board.s6i+board.s7i=='':
moves = '3i8i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3i)and Wboard.w9i==''\
and board.s4i+board.s5i+board.s6i+board.s7i+board.s8i=='':
moves = '3i9i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w3i)and Wboard.w9i==''\
and board.s4i+board.s5i+board.s6i+board.s7i+board.s8i=='':
moves = '3i9i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w1g==''\
and board.s2h=='':
moves = '3i1g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w5g==''\
and board.s4h=='':
moves = '3i5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w6f==''\
and board.s4h+board.s5g=='':
moves = '3i6f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w6e==''\
and board.s4h+board.s5g+board.s6f=='':
moves = '3i7e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w7d==''\
and board.s4h+board.s5g+board.s6f+board.s7e=='':
moves = '3i8d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3i)and Wboard.w9c==''\
and board.s4h+board.s5g+board.s6f+board.s7e+board.s8d=='':
moves = '3i9c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w1g==''\
and board.s2h=='':
moves = '3i1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w5g==''\
and board.s4h=='':
moves = '3i5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w6f==''\
and board.s4h+board.s5g=='':
moves = '3i6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w6e==''\
and board.s4h+board.s5g+board.s6f=='':
moves = '3i7e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w7d==''\
and board.s4h+board.s5g+board.s6f+board.s7e=='':
moves = '3i8d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3i)and Wboard.w9c==''\
and board.s4h+board.s5g+board.s6f+board.s7e+board.s8d=='':
moves = '3i9c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w4i !='':
if re.match(r'[gk+]',Wboard.w4i)and Wboard.w3i=='':
moves = '4i3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]',Wboard.w4i)and Wboard.w5i=='':
moves = '4i5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]',Wboard.w4i)and Wboard.w4h=='':
moves = '4i4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w4i)and Wboard.w3h=='':
moves = '4i3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w4i)and Wboard.w5h=='':
moves = '4i5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w4i)and Wboard.w3i=='':
moves = '4i3i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w4i)and Wboard.w5i=='':
moves = '4i5i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r',Wboard.w4i)and Wboard.w4h=='':
moves = '4i4h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]',Wboard.w4i)and Wboard.w3h=='':
moves = '4i3h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]',Wboard.w4i)and Wboard.w5h=='':
moves = '4i5h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4g==''\
and board.s4h=='':
moves = '4i4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4g==''\
and board.s4h=='':
moves = '4i4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4f==''\
and board.s4h+board.s4g=='':
moves = '4i4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4f==''\
and board.s4h+board.s4g=='':
moves = '4i4f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4e==''\
and board.s4h+board.s4g+board.s4f=='':
moves = '4i4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4e==''\
and board.s4h+board.s4g+board.s4f=='':
moves = '4i4e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4d==''\
and board.s4h+board.s4g+board.s4f+board.s4e=='':
moves = '4i4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4d==''\
and board.s4h+board.s4g+board.s4f+board.s4e=='':
moves = '4i4d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4c==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d=='':
moves = '4i4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4c==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d=='':
moves = '4i4c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4b==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c=='':
moves = '4i4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4b==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c=='':
moves = '4i4b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w4a==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4i4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w4a==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4i4a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w1i==''\
and board.s2i+board.s3i=='':
moves = '4i1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w1i==''\
and board.s2i+board.s3i=='':
moves = '4i1i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w2i==''\
and board.s3i=='':
moves = '4i2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w2i==''\
and board.s3i=='':
moves = '4i2i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w6i==''\
and board.s5i=='':
moves = '4i6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and Wboard.w6i==''\
and board.s5i=='':
moves = '4i6i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4i)and Wboard.w7i==''\
and board.s5i+board.s6i=='':
moves = '4i7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w4i)and | |
Wboard.w5g)and Wboard.w7e==''\
and board.s6f=='':
moves = '5g7e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w5g)and Wboard.w8d==''\
and board.s6f+board.s7e=='':
moves = '5g8d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w5g)and Wboard.w9c==''\
and board.s6f+board.s7e+board.s8d=='':
moves = '5g9c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w5g)and Wboard.w2d==''\
and board.s3e+board.s4f=='':
moves = '5g2d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w5g)and Wboard.w3e==''\
and board.s4f=='':
moves = '5g3e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w5g)and Wboard.w2d==''\
and board.s3e+board.s4f=='':
moves = '5g2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w5g)and Wboard.w3e==''\
and board.s4f=='':
moves = '5g3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w5g)and Wboard.w1c==''\
and board.s4f+board.s3e+board.s2d=='':
moves = '5g1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w5g)and Wboard.w1c==''\
and board.s4f+board.s3e+board.s2d=='':
moves = '5g1c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w5g)and Wboard.w3i==''\
and board.s4h=='':
moves = '5g3i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w5g)and Wboard.w3i==''\
and board.s4h=='':
moves = '5g3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w5g)and Wboard.w7i==''\
and board.s6h=='':
moves = '5g7i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w5g)and Wboard.w7i==''\
and board.s6h=='':
moves = '5g7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w6g !='':
if re.match(r'[sgk+]', Wboard.w6g)and Wboard.w6h=='':
moves = '6g6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgk+]', Wboard.w6g)and Wboard.w5h=='':
moves = '6g5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgk+]', Wboard.w6g)and Wboard.w7h=='':
moves = '6g7h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w6g)and Wboard.w5g=='':
moves = '6g5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w6g)and Wboard.w7g=='':
moves = '6g7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w6g)and Wboard.w6f=='':
moves = '6g6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w6g)and Wboard.w5f=='':
moves = '6g5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w6g)and Wboard.w7f=='':
moves = '6g7f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[plsr]', Wboard.w6g)and Wboard.w6h=='':
moves = '6g6h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w6g)and Wboard.w5h=='':
moves = '6g5h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w6g)and Wboard.w7h=='':
moves = '6g7h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w5g=='':
moves = '6g5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w7g=='':
moves = '6g7g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6f=='':
moves = '6g6f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w6g)and Wboard.w5f=='':
moves = '6g5f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w6g)and Wboard.w7f=='':
moves = '6g7f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w6g)and Wboard.w5i=='':
moves = '6g5i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w6g)and Wboard.w7i=='':
moves = '6g7i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6i==''\
and board.s6h=='':
moves = '6g6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w6g)and Wboard.w6i==''\
and board.s6h=='':
moves = '6g6i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6e==''\
and board.s6f=='':
moves = '6g6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6e==''\
and board.s6f=='':
moves = '6g6e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6d==''\
and board.s6f+board.s6e=='':
moves = '6g6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6d==''\
and board.s6f+board.s6e=='':
moves = '6g6d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6c==''\
and board.s6f+board.s6e+board.s6d=='':
moves = '6g6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6c==''\
and board.s6f+board.s6e+board.s6d=='':
moves = '6g6c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6b==''\
and board.s6f+board.s6e+board.s6d+board.s6c=='':
moves = '6g6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6b==''\
and board.s6f+board.s6e+board.s6d+board.s6c=='':
moves = '6g6b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w6a==''\
and board.s6f+board.s6e+board.s6d+board.s6c+board.s6b=='':
moves = '6g6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w6a==''\
and board.s6f+board.s6e+board.s6d+board.s6c+board.s6b=='':
moves = '6g6a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w9g==''\
and board.s8g+board.s7g=='':
moves = '6g9g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w9g==''\
and board.s8g+board.s7g=='':
moves = '6g9g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w5g==''\
and board.s7g=='':
moves = '6g8g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w5g==''\
and board.s7g=='':
moves = '6g8g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w4g==''\
and board.s5g=='':
moves = '6g4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w4g==''\
and board.s5g=='':
moves = '6g4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w3g==''\
and board.s5g+board.s4g=='':
moves = '6g3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w3g==''\
and board.s5g+board.s4g=='':
moves = '6g3g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w2g==''\
and board.s5g+board.s4g+board.s3g=='':
moves = '6g2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w2g==''\
and board.s5g+board.s4g+board.s3g=='':
moves = '6g2g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w6g)and Wboard.w1g==''\
and board.s5g+board.s4g+board.s3g+board.s2g=='':
moves = '6g1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w6g)and Wboard.w1g==''\
and board.s5g+board.s4g+board.s3g+board.s2g=='':
moves = '6g1g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w4e==''\
and board.s5f=='':
moves = '6g4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w3d==''\
and board.s5f+board.s4e=='':
moves = '6g3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w2c==''\
and board.s5f+board.s4e+board.s3d=='':
moves = '6g2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w1b==''\
and board.s5f+board.s4e+board.s3d+board.s2c=='':
moves = '6g1b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Wboard.w6g)and Wboard.w4e==''\
and board.s5f=='':
moves = '6g4e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w6g)and Wboard.w3d==''\
and board.s5f+board.s4e=='':
moves = '6g3d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w6g)and Wboard.w2c==''\
and board.s5f+board.s4e+board.s3d=='':
moves = '6g2c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w6g)and Wboard.w1b==''\
and board.s5f+board.s4e+board.s3d+board.s2c=='':
moves = '6g1b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w6g)and Wboard.w9d==''\
and board.s8e+board.s7f=='':
moves = '6g9d+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w6g)and Wboard.w8e==''\
and board.s7f=='':
moves = '6g8e+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w9d==''\
and board.s8e+board.s7f=='':
moves = '6g9d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w8e==''\
and board.s7f=='':
moves = '6g8e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w6g)and Wboard.w8i==''\
and board.s7h=='':
moves = '6g8i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w8i==''\
and board.s7h=='':
moves = '6g8i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w6g)and Wboard.w4i==''\
and board.s5h=='':
moves = '6g4i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w6g)and Wboard.w4i==''\
and board.s5h=='':
moves = '6g4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w7g !='':
if re.match(r'[sgk+]', Wboard.w7g)and Wboard.w7h=='':
moves = '7g7h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgk+]', Wboard.w7g)and Wboard.w6h=='':
moves = '7g6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgk+]', Wboard.w7g)and Wboard.w8h=='':
moves = '7g8h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w7g)and Wboard.w6g=='':
moves = '7g6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w7g)and Wboard.w8g=='':
moves = '7g8g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[gk+]', Wboard.w7g)and Wboard.w7f=='':
moves = '7g7f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w7g)and Wboard.w6f=='':
moves = '7g6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|s|k',Wboard.w7g)and Wboard.w8f=='':
moves = '7g8f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[plsr]', Wboard.w7g)and Wboard.w7h=='':
moves = '7g7h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w7g)and Wboard.w6h=='':
moves = '7g6h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w7g)and Wboard.w8h=='':
moves = '7g8h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w7g)and Wboard.w6g=='':
moves = '7g6g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w7g)and Wboard.w8g=='':
moves = '7g8g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('r', Wboard.w7g)and Wboard.w7f=='':
moves = '7g7f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w7g)and Wboard.w6f=='':
moves = '7g6f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[bs]', Wboard.w7g)and Wboard.w8f=='':
moves = '7g8f+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w7g)and Wboard.w6i=='':
moves = '7g6i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w7g)and Wboard.w8i=='':
moves = '7g8i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w7g)and Wboard.w7i==''\
and board.s7h=='':
moves = '7g7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w7g)and Wboard.w7i==''\
and board.s7h=='':
| |
get_value(..) but returns all values where the
subexperiments match the additional kwargs arguments. if alpha=1.0,
beta=0.01 is given, then only those experiment values are returned,
as a list.
"""
subexps = self.get_exps(exp)
tagvalues = ['%s%s'%(k, convert_param_to_dirname(kwargs[k])) for k in kwargs]
values = [self.get_value(se, rep, tag, which) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
params = [self.get_params(se) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
return values, params
def get_histories_fix_params(self, exp, rep, tag, **kwargs):
""" this function uses get_history(..) but returns all histories where the
subexperiments match the additional kwargs arguments. if alpha=1.0,
beta = 0.01 is given, then only those experiment histories are returned,
as a list.
"""
subexps = self.get_exps(exp)
tagvalues = [re.sub("0+$", '0', '%s%f'%(k, kwargs[k])) for k in kwargs]
histories = [self.get_history(se, rep, tag) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
params = [self.get_params(se) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
return histories, params
def get_histories_over_repetitions(self, exp, tags, aggregate):
""" this function gets all histories of all repetitions using get_history() on the given
tag(s), and then applies the function given by 'aggregate' to all corresponding values
in each history over all iterations. Typical aggregate functions could be 'mean' or
'max'.
"""
params = self.get_params(exp)
# explicitly make tags list in case of 'all'
if tags == 'all':
tags = self.get_history(exp, 0, 'all').keys()
# make list of tags if it is just a string
if not hasattr(tags, '__iter__'):
tags = [tags]
results = OrderedDict()
for tag in tags:
# get all histories
histories = zeros((params['repetitions'], params['iterations']))
skipped = []
for i in range(params['repetitions']):
logging.debug("Getting history over tag {} repetition {}".format(tag, i))
try:
histories[i, :] = self.get_history(exp, i, tag)
except ValueError:
h = self.get_history(exp, i, tag)
if len(h) == 0:
# history not existent, skip it
logging.warning('Exp: %s history %i for tag "%s" has length 0 (expected: %i). all other histories will be truncated.\n'%(exp, i, tag, params['iterations']))
skipped.append(i)
elif len(h) > params['iterations']:
# if history too long, crop it
logging.warning('Expsuite: history %i has length %i (expected: %i). it will be truncated.\n'%(i, len(h), params['iterations']))
h = h[:params['iterations']]
histories[i,:] = h
elif len(h) < params['iterations']:
# if history too short, crop everything else
logging.warning('Exp: %s history %i for tag "%s" has length %i (expected: %i). all other histories will be truncated.\n'%(exp, i, tag, len(h), params['iterations']))
params['iterations'] = len(h)
histories = histories[:,:params['iterations']]
histories[i, :] = h
# remove all rows that have bieen skipped
logging.debug("removing indices {} from histories table {}".format(skipped,histories))
try:
histories = delete(histories, skipped, axis=0)
params['repetitions'] -= len(skipped)
except:
pass
# calculate result from each column with aggregation function
aggregated = zeros(params['iterations'])
for i in range(params['iterations']):
aggregated[i] = aggregate(histories[:, i])
# if only one tag is requested, return list immediately, otherwise append to dictionary
if len(tags) == 1:
return aggregated
else:
results[tag] = aggregated
return results
def haserror(self, params, rep):
""" Helper function to identify exceptions on one experiment. """
fullpath = os.path.join(params['path'], params['name'])
logname = os.path.join(fullpath, '%i.log'%rep)
if os.path.exists(logname):
logfile = open(logname, 'r')
lines = logfile.readlines()
logfile.close()
try:
if "exception:error" in lines[-1]:
return True
else:
return False
except IndexError: #if lines are empty
return False
else:
return False
def browse(self):
""" go through all subfolders (starting at '.') and return information
about the existing experiments. if the -B option is given, all
parameters are shown, -b only displays the most important ones.
this function does *not* execute any experiments.
"""
for d in self.get_exps('.'):
params = self.get_params(d)
name = params['name']
basename = name.split('/')[0]
# if -e option is used, only show requested experiments
if self.options.experiments and basename not in self.options.experiments:
continue
fullpath = os.path.join(params['path'], name)
# calculate progress
prog = 0
for i in range(params['repetitions']):
prog += progress(params, i)
prog /= params['repetitions']
haserror = self.haserror(params, i)
# if progress flag is set, only show the progress bars
if self.options.progress:
bar = "["
bar += "="*int(prog/4)
bar += " "*int(25-prog/4)
bar += "]"
if haserror:
bar += " *"
print '%3i%% %27s %s'%(prog,bar,d)
continue
print '%16s %s'%('experiment', d)
try:
minfile = min(
(os.path.join(dirname, filename)
for dirname, dirnames, filenames in os.walk(fullpath)
for filename in filenames
if filename.endswith(('.log', '.cfg'))),
key=lambda fn: os.stat(fn).st_mtime)
maxfile = max(
(os.path.join(dirname, filename)
for dirname, dirnames, filenames in os.walk(fullpath)
for filename in filenames
if filename.endswith(('.log', '.cfg'))),
key=lambda fn: os.stat(fn).st_mtime)
except ValueError:
print ' started %s'%'not yet'
else:
print ' started %s'%time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.stat(minfile).st_mtime))
if haserror:
print ' *** crashed %s'%time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.stat(maxfile).st_mtime))
else:
print ' ended %s'%time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.stat(maxfile).st_mtime))
for k in ['repetitions', 'iterations']:
print '%16s %s'%(k, params[k])
print '%16s %i%%'%('progress', prog)
if self.options.browse_big:
# more verbose output
for p in [p for p in params if p not in ('repetitions', 'iterations', 'path', 'name')]:
print '%16s %s'%(p, params[p])
print
def expand_param_list(self, paramlist):
""" expands the parameters list according to one of these schemes:
grid: every list item is combined with every other list item
list: every n-th list item of parameter lists are combined
"""
# for one single experiment, still wrap it in list
if type(paramlist) == types.DictType:
paramlist = [paramlist]
# get all options that are iteratable and build all combinations (grid) or tuples (list)
iparamlist = []
for params in paramlist:
if ('experiment' in params and params['experiment'] == 'single'):
iparamlist.append(params)
else:
iterparams = [p for p in params if hasattr(params[p], '__iter__') and not isinstance(params[p], dict)]
if len(iterparams) > 0:
# write intermediate config file
self.mkdir(os.path.join(params['path'], params['name']))
self.write_config_file(params, os.path.join(params['path'], params['name']))
# create sub experiments (check if grid or list is requested)
if 'experiment' in params and params['experiment'] == 'list':
iterfunc = itertools.izip
elif ('experiment' not in params) or ('experiment' in params and params['experiment'] == 'grid'):
iterfunc = itertools.product
else:
raise SystemExit("unexpected value '%s' for parameter 'experiment'. Use 'grid', 'list' or 'single'."%params['experiment'])
for il in iterfunc(*[params[p] for p in iterparams]):
par = params.copy()
converted = str(zip(iterparams, map(convert_param_to_dirname, il)))
par['name'] = par['name'] + '/' + re.sub("[' \[\],()]", '', converted)
for i, ip in enumerate(iterparams):
par[ip] = il[i]
iparamlist.append(par)
else:
iparamlist.append(params)
return iparamlist
def create_dir(self, params, delete=False):
""" creates a subdirectory for the experiment, and deletes existing
files, if the delete flag is true. then writes the current
experiment.cfg file in the folder.
"""
# create experiment path and subdir
fullpath = os.path.join(params['path'], params['name'])
self.mkdir(fullpath)
# delete old histories if --del flag is active
if delete:
os.system('rm %s/*' % fullpath)
# write a config file for this single exp. in the folder
self.write_config_file(params, fullpath)
def rerun_recursive(self):
matched_filenames = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, "experiment.cfg"):
matched_filenames.append(os.path.join(os.getcwd(), root,filename))
sys.stderr.write("Found nested filenames: \n{}\n\n".format("\n - ".join(matched_filenames)))
for filename in matched_filenames:
self.options.config = filename
self.options.rerun = self.options.rerun_recursive
sys.stderr.write("\n*******************\nRunning {}\n\n".format(filename))
try:
self.parse_cfg()
except IOError:
sys.stderr.write("Could not read filename {}".format(filename))
continue
self.start()
def start(self):
""" starts the experiments as given in the config file. """
# if -b, -B or -p option is set, only show information, don't
# start the experiments
if self.options.browse or self.options.browse_big or self.options.progress:
self.browse()
raise SystemExit
loglevel = logging.WARNING
if self.options.debug:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
sys.setrecursionlimit(2000)
# read main configuration file
paramlist = []
for exp in self.cfgparser.sections():
if not self.options.experiments or exp in self.options.experiments:
params = self.items_to_params(self.cfgparser.items(exp))
params['name'] = exp
paramlist.append(params)
self.do_experiment(paramlist)
def do_experiment(self, params):
""" runs one experiment programatically and returns.
params: either parameter dictionary (for one single experiment) or a list of parameter
dictionaries (for several experiments).
"""
paramlist = self.expand_param_list(params)
# create directories, write config files
for pl in paramlist:
# check for required param keys
if ('name' in pl) and | |
<filename>moldr/scan.py<gh_stars>0
""" drivers for coordinate scans
"""
import numpy
import automol
import elstruct
import autofile
import moldr
from elstruct.reader._molpro2015.molecule import hess_geometry
def hindered_rotor_scans(
spc_info, thy_level, cnf_run_fs, cnf_save_fs, script_str, overwrite,
scan_increment=30., saddle=False, tors_names='', frm_bnd_key=[],
brk_bnd_key=[], **opt_kwargs):
""" Perform 1d scans over each of the torsional coordinates
"""
min_cnf_locs = moldr.util.min_energy_conformer_locators(cnf_save_fs)
if min_cnf_locs:
min_cnf_run_path = cnf_run_fs.leaf.path(min_cnf_locs)
min_cnf_save_path = cnf_save_fs.leaf.path(min_cnf_locs)
scn_run_fs = autofile.fs.scan(min_cnf_run_path)
scn_save_fs = autofile.fs.scan(min_cnf_save_path)
geo = cnf_save_fs.leaf.file.geometry.read(min_cnf_locs)
zma = cnf_save_fs.leaf.file.zmatrix.read(min_cnf_locs)
val_dct = automol.zmatrix.values(zma)
if not saddle:
tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)
if tors_names:
tors_linspaces = automol.zmatrix.torsional_scan_linspaces(
zma, tors_names, scan_increment, frm_bnd_key=frm_bnd_key,
brk_bnd_key=brk_bnd_key)
tors_grids = [
numpy.linspace(*linspace) + val_dct[name]
for name, linspace in zip(tors_names, tors_linspaces)]
for tors_name, tors_grid in zip(tors_names, tors_grids):
save_scan(
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
coo_names=[tors_name],
)
run_scan(
zma=zma,
spc_info=spc_info,
thy_level=thy_level,
grid_dct={tors_name: tors_grid},
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
script_str=script_str,
overwrite=overwrite,
saddle=saddle,
**opt_kwargs,
)
save_scan(
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
coo_names=[tors_name],
)
def run_scan(
zma, spc_info, thy_level, grid_dct, scn_run_fs, scn_save_fs,
script_str, overwrite, update_guess=True,
reverse_sweep=True, fix_failures=True, saddle=False,
**kwargs):
""" run constrained optimization scan
"""
vma = automol.zmatrix.var_(zma)
if scn_save_fs.trunk.file.vmatrix.exists():
existing_vma = scn_save_fs.trunk.file.vmatrix.read()
assert vma == existing_vma
coo_names = []
grid_vals = []
for item in grid_dct.items():
(coo, coo_grid_vals) = item
coo_names.append(coo)
grid_vals.append(coo_grid_vals)
# for now, running only one-dimensional hindered rotor scans
scn_save_fs.branch.create([coo_names])
inf_obj = autofile.system.info.scan_branch(grid_dct)
scn_save_fs.branch.file.info.write(inf_obj, [coo_names])
npoint = 1
for coo_grid_vals in grid_vals:
npoint *= len(coo_grid_vals)
grid_idxs = tuple(range(npoint))
if len(grid_vals) == 1:
for grid_val in grid_vals[0]:
scn_run_fs.leaf.create([coo_names, [grid_val]])
run_prefixes = tuple(scn_run_fs.leaf.path([coo_names, [grid_val]])
for grid_val in grid_vals[0])
_run_1d_scan(
script_str=script_str,
run_prefixes=run_prefixes,
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_name=coo_names[0],
grid_idxs=grid_idxs,
grid_vals=grid_vals[0],
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
update_guess=update_guess,
saddle=saddle,
retry_failed=fix_failures,
**kwargs
)
if reverse_sweep:
_run_1d_scan(
script_str=script_str,
run_prefixes=list(reversed(run_prefixes)),
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_name=coo_names[0],
grid_idxs=list(reversed(grid_idxs)),
grid_vals=list(reversed(grid_vals[0])),
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
update_guess=update_guess,
saddle=saddle,
**kwargs
)
elif len(grid_vals) == 2:
run_prefixes = []
for grid_val_i in grid_vals[0]:
for grid_val_j in grid_vals[1]:
scn_run_fs.leaf.create([coo_names, [grid_val_i, grid_val_j]])
run_prefixes.append(scn_run_fs.leaf.path([coo_names, [grid_val_i, grid_val_j]]))
run_prefixes = tuple(run_prefixes)
_run_2d_scan(
script_str=script_str,
run_prefixes=run_prefixes,
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_names=coo_names,
grid_idxs=grid_idxs,
grid_vals=grid_vals,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
update_guess=update_guess,
saddle=saddle,
retry_failed=fix_failures,
**kwargs
)
if reverse_sweep:
run_prefixes = []
for grid_val_i in grid_vals[0][::-1]:
for grid_val_j in grid_vals[1][::-1]:
run_prefixes.append(scn_run_fs.leaf.path([coo_names, [grid_val_i, grid_val_j]]))
run_prefixes = tuple(run_prefixes)
_run_2d_scan(
script_str=script_str,
run_prefixes=run_prefixes,
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_names=coo_names,
grid_idxs=list(reversed(grid_idxs)),
grid_vals=[list(reversed(grid_vals[0])), list(reversed(grid_vals[1]))],
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
update_guess=update_guess,
saddle=saddle,
**kwargs
)
def run_multiref_rscan(
formula, high_mul, zma, spc_info, multi_level, dist_name, grid1, grid2,
scn_run_fs, scn_save_fs, script_str, overwrite, update_guess=True, gradient=False, hessian=False, num_act_elc=None, num_act_orb=None,
**kwargs):
""" run constrained optimization scan
"""
vma = automol.zmatrix.var_(zma)
if scn_save_fs.trunk.file.vmatrix.exists():
existing_vma = scn_save_fs.trunk.file.vmatrix.read()
assert vma == existing_vma
grid = numpy.append(grid1, grid2)
grid_dct = {dist_name: grid}
if len(grid_dct) > 1:
raise NotImplementedError
coo_names = []
grid_vals = []
for item in grid_dct.items():
(coo, coo_grid_vals) = item
coo_names.append(coo)
grid_vals.append(coo_grid_vals)
scn_save_fs.branch.create([coo_names])
inf_obj = autofile.system.info.scan_branch(grid_dct)
scn_save_fs.branch.file.info.write(inf_obj, [coo_names])
prog = multi_level[0]
method = multi_level[1]
_, opt_script_str, _, opt_kwargs = moldr.util.run_qchem_par(prog, method)
if num_act_elc is None and num_act_orb is None:
num_act_elc = high_mul - 1
num_act_orb = num_act_elc
ref_zma = automol.zmatrix.set_values(zma, {coo_names[0]: grid_vals[0][0]})
cas_opt = ['', '']
cas_opt[0], _ = moldr.ts.cas_options_1(spc_info, formula, num_act_elc, num_act_orb, high_mul)
cas_opt[1], _ = moldr.ts.cas_options_2(spc_info, formula, num_act_elc, num_act_orb, high_mul)
guess_str = moldr.ts.multiref_wavefunction_guess(
high_mul, ref_zma, spc_info, multi_level, cas_opt)
guess_lines = guess_str.splitlines()
opt_kwargs['casscf_options'] = cas_opt[1]
opt_kwargs['mol_options'] = ['nosym']
opt_kwargs['gen_lines'] = {1: guess_lines}
coo_names = []
grid1_vals = []
grid1_dct = {dist_name: grid1}
for item in grid1_dct.items():
(coo, coo_grid1_vals) = item
coo_names.append(coo)
grid1_vals.append(coo_grid1_vals)
npoint = 1
for coo_grid1_vals in grid1_vals:
npoint *= len(coo_grid1_vals)
grid1_idxs = tuple(range(npoint))
if len(grid1_vals) == 1:
for grid1_val in grid1_vals[0]:
scn_run_fs.leaf.create([coo_names, [grid1_val]])
run_prefixes = tuple(scn_run_fs.leaf.path([coo_names, [grid1_val]])
for grid1_val in grid1_vals[0])
_run_1d_scan(
script_str=opt_script_str,
run_prefixes=run_prefixes,
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_name=coo_names[0],
grid_idxs=grid1_idxs,
grid_vals=grid1_vals[0],
spc_info=spc_info,
thy_level=multi_level,
overwrite=overwrite,
update_guess=update_guess,
gradient=gradient,
hessian=hessian,
**opt_kwargs,
)
coo_names = []
grid2_vals = []
grid2_dct = {dist_name: grid2}
for item in grid2_dct.items():
(coo, coo_grid2_vals) = item
coo_names.append(coo)
grid2_vals.append(coo_grid2_vals)
npoint = 1
for coo_grid2_vals in grid2_vals:
npoint *= len(coo_grid2_vals)
grid2_idxs = tuple(range(npoint))
if len(grid2_vals) == 1:
for grid2_val in grid2_vals[0]:
scn_run_fs.leaf.create([coo_names, [grid2_val]])
run_prefixes = tuple(scn_run_fs.leaf.path([coo_names, [grid2_val]])
for grid2_val in grid2_vals[0])
_run_1d_scan(
script_str=opt_script_str,
run_prefixes=run_prefixes,
scn_save_fs=scn_save_fs,
guess_zma=zma,
coo_name=coo_names[0],
grid_idxs=grid2_idxs,
grid_vals=grid2_vals[0],
spc_info=spc_info,
thy_level=multi_level,
overwrite=overwrite,
update_guess=update_guess,
gradient=gradient,
hessian=hessian,
**opt_kwargs,
)
def _run_1d_scan(
script_str, run_prefixes, scn_save_fs, guess_zma, coo_name, grid_idxs, grid_vals,
spc_info, thy_level, overwrite, errors=(), options_mat=(),
retry_failed=True, update_guess=True, saddle=False, gradient=False, hessian=False,
**kwargs):
""" run 1 dimensional scan with constrained optimization
"""
npoints = len(grid_idxs)
assert len(grid_vals) == len(run_prefixes) == npoints
for grid_idx, grid_val, run_prefix in zip(grid_idxs, grid_vals, run_prefixes):
print("Point {}/{}".format(grid_idx+1, npoints))
zma = automol.zmatrix.set_values(guess_zma, {coo_name: grid_val})
run_fs = autofile.fs.run(run_prefix)
if not scn_save_fs.leaf.file.geometry.exists([[coo_name], [grid_val]]) or overwrite:
moldr.driver.run_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geom=zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
frozen_coordinates=[coo_name],
errors=errors,
options_mat=options_mat,
retry_failed=retry_failed,
saddle=saddle,
**kwargs
)
ret = moldr.driver.read_job(job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)
if ret is not None:
inf_obj, _, out_str = ret
prog = inf_obj.prog
opt_zma = elstruct.reader.opt_zmatrix(prog, out_str)
if update_guess:
guess_zma = opt_zma
if gradient:
moldr.driver.run_job(
job=elstruct.Job.GRADIENT,
script_str=script_str,
run_fs=run_fs,
geom=opt_zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
frozen_coordinates=[coo_name],
errors=errors,
options_mat=options_mat,
retry_failed=retry_failed,
**kwargs
)
ret = moldr.driver.read_job(job=elstruct.Job.GRADIENT, run_fs=run_fs)
if hessian:
moldr.driver.run_job(
job=elstruct.Job.HESSIAN,
script_str=script_str,
run_fs=run_fs,
geom=opt_zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
frozen_coordinates=[coo_name],
errors=errors,
options_mat=options_mat,
retry_failed=retry_failed,
**kwargs
)
ret = moldr.driver.read_job(job=elstruct.Job.HESSIAN, run_fs=run_fs)
def _run_2d_scan(
script_str, run_prefixes, scn_save_fs, guess_zma, coo_names, grid_idxs, grid_vals,
spc_info, thy_level, overwrite, errors=(),
options_mat=(), retry_failed=True, update_guess=True, saddle=False, **kwargs):
""" run 2-dimensional scan with constrained optimization
"""
npoints = len(grid_idxs)
assert len(grid_vals[0]) * len(grid_vals[1]) == len(run_prefixes) == npoints
idx = 0
for grid_val_i in grid_vals[0]:
for grid_val_j in grid_vals[1]:
grid_idx = grid_idxs[idx]
run_prefix = run_prefixes[idx]
print("Point {}/{}".format(grid_idx+1, npoints))
zma = automol.zmatrix.set_values(
guess_zma, {coo_names[0]: grid_val_i, coo_names[1]: grid_val_j})
run_fs = autofile.fs.run(run_prefix)
idx += 1
if not scn_save_fs.leaf.file.geometry.exists(
[coo_names, [grid_val_i, grid_val_j]]) or overwrite:
moldr.driver.run_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geom=zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
frozen_coordinates=coo_names,
errors=errors,
options_mat=options_mat,
retry_failed=retry_failed,
saddle=saddle,
**kwargs
)
ret = moldr.driver.read_job(job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)
if update_guess and ret is not None:
inf_obj, _, out_str = ret
prog = inf_obj.prog
guess_zma = elstruct.reader.opt_zmatrix(prog, out_str)
def save_scan(scn_run_fs, scn_save_fs, coo_names, gradient=False, hessian=False):
""" save the scans that have been run so far
"""
if not scn_run_fs.branch.exists([coo_names]):
print("No scan to save. Skipping...")
else:
locs_lst = []
for locs in scn_run_fs.leaf.existing([coo_names]):
if not isinstance(locs[1][0], float):
continue
run_path = scn_run_fs.leaf.path(locs)
run_fs = autofile.fs.run(run_path)
print("Reading from scan run at {}".format(run_path))
ret = moldr.driver.read_job(job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)
if ret:
inf_obj, inp_str, out_str = ret
prog = inf_obj.prog
method = inf_obj.method
ene = elstruct.reader.energy(prog, method, out_str)
geo = elstruct.reader.opt_geometry(prog, out_str)
zma = elstruct.reader.opt_zmatrix(prog, out_str)
save_path = scn_save_fs.leaf.path(locs)
print(" - Saving...")
print(" - Save path: {}".format(save_path))
scn_save_fs.leaf.create(locs)
scn_save_fs.leaf.file.geometry_info.write(inf_obj, locs)
scn_save_fs.leaf.file.geometry_input.write(inp_str, locs)
scn_save_fs.leaf.file.energy.write(ene, locs)
scn_save_fs.leaf.file.geometry.write(geo, locs)
scn_save_fs.leaf.file.zmatrix.write(zma, locs)
locs_lst.append(locs)
if gradient:
ret = moldr.driver.read_job(job=elstruct.Job.GRADIENT, run_fs=run_fs)
if ret:
inf_obj, inp_str, out_str = ret
prog = inf_obj.prog
method = inf_obj.method
grad = elstruct.reader.gradient(prog, out_str)
scn_save_fs.leaf.file.gradient.write(grad, locs)
if hessian:
ret = moldr.driver.read_job(job=elstruct.Job.HESSIAN, run_fs=run_fs)
if ret:
inf_obj, inp_str, out_str = ret
prog = inf_obj.prog
method = inf_obj.method
hess = elstruct.reader.hessian(prog, out_str)
scn_save_fs.leaf.file.hessian.write(hess, locs)
if prog == 'molpro2015':
geo = hess_geometry(out_str)
scn_save_fs.leaf.file.geometry.write(geo, locs)
if locs_lst:
idxs_lst = [locs[-1] for locs in locs_lst]
enes = [scn_save_fs.leaf.file.energy.read(locs)
for locs in locs_lst]
geos = [scn_save_fs.leaf.file.geometry.read(locs)
for locs in locs_lst]
traj = []
for idxs, ene, geo in zip(idxs_lst, enes, geos):
comment = 'energy: {:>15.10f}, grid idxs: {}'.format(ene, idxs)
traj.append((comment, geo))
traj_path = scn_save_fs.branch.file.trajectory.path([coo_names])
print("Updating scan trajectory file at {}".format(traj_path))
scn_save_fs.branch.file.trajectory.write(traj, [coo_names])
def infinite_separation_energy(
spc_1_info, spc_2_info, ts_info, high_mul, ref_zma, ini_thy_info, thy_info,
multi_info, run_prefix, save_prefix, scn_run_fs, scn_save_fs, locs, overwrite=False,
num_act_elc=None, num_act_orb=None):
""" Obtain the infinite separation energy from the multireference energy at a given
reference point, the high-spin low-spin splitting at that reference point, and the
high level energy for the high spin state at the reference geometry and for the fragments
"""
# set up all the file systems for the TS
# start with the geo and reference theory info
geo_run_path = scn_run_fs.leaf.path(locs)
geo_save_path = scn_save_fs.leaf.path(locs)
geo = scn_save_fs.leaf.file.geometry.read(locs)
sp_run_fs = autofile.fs.single_point(geo_run_path)
sp_save_fs = autofile.fs.single_point(geo_save_path)
# get the multi reference energy for the low spin state for the reference point on the scan
# file system for low spin multireference calculation
multi_info[0] = 'molpro2015'
multi_info[1] = 'caspt2'
# ultimately the above should be properly passed
prog = multi_info[0]
method = multi_info[1]
# orb_restr = moldr.util.orbital_restriction(ts_info, multi_info)
# multi_lvl = multi_info[0:3]
# multi_lvl.append(orb_restr)
# sp_run_fs.leaf.create(multi_lvl[1:4])
# sp_save_fs.leaf.create(multi_lvl[1:4])
# sp_mr_run_path = sp_run_fs.leaf.path(multi_lvl[1:4])
# sp_mr_save_path = sp_save_fs.leaf.path(multi_lvl[1:4])
# run_mr_fs = autofile.fs.run(sp_mr_run_path)
# mr_script_str, _, mr_kwargs, _ = moldr.util.run_qchem_par(prog, method)
# num_act_elc = high_mul
# num_act_orb = num_act_elc
# ts_formula = automol.geom.formula(automol.zmatrix.geometry(ref_zma))
# cas_opt, _ = moldr.ts.cas_options_2(ts_info, ts_formula, num_act_elc, num_act_orb, high_mul)
# guess_str = moldr.ts.multiref_wavefunction_guess(high_mul, ref_zma, ts_info, multi_lvl, cas_opt)
# guess_lines = guess_str.splitlines()
# mr_kwargs['casscf_options'] = cas_opt
# mr_kwargs['mol_options'] = ['nosym']
# mr_kwargs['gen_lines'] = {1: | |
<reponame>Craftint/CSF_TZ
# Copyright (c) 2013, Aakvatech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _, scrub
from frappe.utils import getdate, nowdate, flt, cint, formatdate, cstr, now, time_diff_in_seconds
from collections import OrderedDict
from erpnext.accounts.utils import get_currency_precision
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions, get_dimension_with_children
# This report gives a summary of all Outstanding Invoices considering the following
# 1. Invoice can be booked via Sales/Purchase Invoice or Journal Entry
# 2. Report handles both receivable and payable
# 3. Key balances for each row are "Invoiced Amount", "Paid Amount", "Credit/Debit Note Amount", "Oustanding Amount"
# 4. For explicit payment terms in invoice (example: 30% advance, 30% on delivery, 40% post delivery),
# the invoice will be broken up into multiple rows, one for each payment term
# 5. If there are payments after the report date (post dated), these will be updated in additional columns
# for future amount
# 6. Configurable Ageing Groups (0-30, 30-60 etc) can be set via filters
# 7. For overpayment against an invoice with payment terms, there will be an additional row
# 8. Invoice details like Sales Persons, Delivery Notes are also fetched comma separated
# 9. Report amounts are in "Party Currency" if party is selected, or company currency for multi-party
# 10. This reports is based on all GL Entries that are made against account_type "Receivable" or "Payable"
def execute(filters=None):
args = {
"party_type": "Customer",
"naming_by": ["Selling Settings", "cust_master_name"],
}
return ReceivablePayableReport(filters).run(args)
class ReceivablePayableReport(object):
def __init__(self, filters=None):
self.filters = frappe._dict(filters or {})
self.filters.report_date = getdate(self.filters.report_date or nowdate())
self.age_as_on = getdate(nowdate()) \
if self.filters.report_date > getdate(nowdate()) \
else self.filters.report_date
def run(self, args):
self.filters.update(args)
self.set_defaults()
self.party_naming_by = frappe.db.get_value(args.get("naming_by")[0], None, args.get("naming_by")[1])
self.get_columns()
self.get_data()
self.get_chart_data()
return self.columns, self.data, None, self.chart, None, self.skip_total_row
def set_defaults(self):
if not self.filters.get("company"):
self.filters.company = frappe.db.get_single_value('Global Defaults', 'default_company')
self.company_currency = frappe.get_cached_value('Company', self.filters.get("company"), "default_currency")
self.currency_precision = get_currency_precision() or 2
self.dr_or_cr = "debit" if self.filters.party_type == "Customer" else "credit"
self.party_type = self.filters.party_type
self.party_details = {}
self.invoices = set()
self.skip_total_row = 0
if self.filters.get('group_by_party'):
self.previous_party=''
self.total_row_map = {}
self.skip_total_row = 1
def get_data(self):
self.get_gl_entries()
self.get_sales_invoices_or_customers_based_on_sales_person()
self.voucher_balance = OrderedDict()
self.init_voucher_balance() # invoiced, paid, credit_note, outstanding
# Build delivery note map against all sales invoices
self.build_delivery_note_map()
# Get invoice details like bill_no, due_date etc for all invoices
self.get_invoice_details()
# fetch future payments against invoices
self.get_future_payments()
# Get return entries
self.get_return_entries()
self.data = []
for gle in self.gl_entries:
self.update_voucher_balance(gle)
self.build_data()
def init_voucher_balance(self):
# build all keys, since we want to exclude vouchers beyond the report date
for gle in self.gl_entries:
# get the balance object for voucher_type
key = (gle.voucher_type, gle.voucher_no, gle.party)
if not key in self.voucher_balance:
self.voucher_balance[key] = frappe._dict(
voucher_type = gle.voucher_type,
voucher_no = gle.voucher_no,
party = gle.party,
posting_date = gle.posting_date,
remarks = gle.remarks,
account_currency = gle.account_currency,
invoiced = 0.0,
paid = 0.0,
credit_note = 0.0,
outstanding = 0.0
)
self.get_invoices(gle)
if self.filters.get('group_by_party'):
self.init_subtotal_row(gle.party)
if self.filters.get('group_by_party'):
self.init_subtotal_row('Total')
def get_invoices(self, gle):
if gle.voucher_type in ('Sales Invoice', 'Purchase Invoice'):
if self.filters.get("sales_person"):
if gle.voucher_no in self.sales_person_records.get("Sales Invoice", []) \
or gle.party in self.sales_person_records.get("Customer", []):
self.invoices.add(gle.voucher_no)
else:
self.invoices.add(gle.voucher_no)
def init_subtotal_row(self, party):
if not self.total_row_map.get(party):
self.total_row_map.setdefault(party, {
'party': party,
'bold': 1
})
for field in self.get_currency_fields():
self.total_row_map[party][field] = 0.0
def get_currency_fields(self):
return ['invoiced', 'foreign_amount', 'paid', 'credit_note', 'outstanding', 'range1',
'range2', 'range3', 'range4', 'range5']
def update_voucher_balance(self, gle):
# get the row where this balance needs to be updated
# if its a payment, it will return the linked invoice or will be considered as advance
row = self.get_voucher_balance(gle)
if not row: return
# gle_balance will be the total "debit - credit" for receivable type reports and
# and vice-versa for payable type reports
gle_balance = self.get_gle_balance(gle)
if gle_balance > 0:
if gle.voucher_type in ('Journal Entry', 'Payment Entry') and gle.against_voucher:
# debit against sales / purchase invoice
row.paid -= gle_balance
else:
# invoice
row.invoiced += gle_balance
else:
# payment or credit note for receivables
if self.is_invoice(gle):
# stand alone debit / credit note
row.credit_note -= gle_balance
else:
# advance / unlinked payment or other adjustment
row.paid -= gle_balance
if row.account_currency != self.company_currency:
if row.voucher_type in ["Sales Invoice","Purchase Invoice"]:
row.foreign_currency, row.foreign_amount = frappe.get_value(row.voucher_type, row.voucher_no, ["currency","net_total"]) or ""
elif row.voucher_type in ["Payment Entry","Journal Entry"]:
row.foreign_amount = gle.credit_in_account_currency or gle.debit_in_account_currency
row.foreign_currency = row.account_currency
def update_sub_total_row(self, row, party):
total_row = self.total_row_map.get(party)
for field in self.get_currency_fields():
total_row[field] += row.get(field, 0.0)
def append_subtotal_row(self, party):
sub_total_row = self.total_row_map.get(party)
if sub_total_row:
self.data.append(sub_total_row)
self.data.append({})
self.update_sub_total_row(sub_total_row, 'Total')
def get_voucher_balance(self, gle):
if self.filters.get("sales_person"):
against_voucher = gle.against_voucher or gle.voucher_no
if not (gle.party in self.sales_person_records.get("Customer", []) or \
against_voucher in self.sales_person_records.get("Sales Invoice", [])):
return
voucher_balance = None
if gle.against_voucher:
# find invoice
against_voucher = gle.against_voucher
# If payment is made against credit note
# and credit note is made against a Sales Invoice
# then consider the payment against original sales invoice.
if gle.against_voucher_type in ('Sales Invoice', 'Purchase Invoice'):
if gle.against_voucher in self.return_entries:
return_against = self.return_entries.get(gle.against_voucher)
if return_against:
against_voucher = return_against
voucher_balance = self.voucher_balance.get((gle.against_voucher_type, against_voucher, gle.party))
if not voucher_balance:
# no invoice, this is an invoice / stand-alone payment / credit note
voucher_balance = self.voucher_balance.get((gle.voucher_type, gle.voucher_no, gle.party))
return voucher_balance
def build_data(self):
# set outstanding for all the accumulated balances
# as we can use this to filter out invoices without outstanding
for key, row in self.voucher_balance.items():
row.outstanding = flt(row.invoiced - row.paid - row.credit_note, self.currency_precision)
row.invoice_grand_total = row.invoiced
if abs(row.outstanding) > 1.0/10 ** self.currency_precision:
# non-zero oustanding, we must consider this row
if self.is_invoice(row) and self.filters.based_on_payment_terms:
# is an invoice, allocate based on fifo
# adds a list `payment_terms` which contains new rows for each term
self.allocate_outstanding_based_on_payment_terms(row)
if row.payment_terms:
# make separate rows for each payment term
for d in row.payment_terms:
if d.outstanding > 0:
self.append_row(d)
# if there is overpayment, add another row
self.allocate_extra_payments_or_credits(row)
else:
self.append_row(row)
else:
self.append_row(row)
if self.filters.get('group_by_party'):
self.append_subtotal_row(self.previous_party)
if self.data:
self.data.append(self.total_row_map.get('Total'))
def append_row(self, row):
self.allocate_future_payments(row)
self.set_invoice_details(row)
self.set_party_details(row)
self.set_ageing(row)
if self.filters.get('group_by_party'):
self.update_sub_total_row(row, row.party)
if self.previous_party and (self.previous_party != row.party):
self.append_subtotal_row(self.previous_party)
self.previous_party = row.party
self.data.append(row)
def set_invoice_details(self, row):
invoice_details = self.invoice_details.get(row.voucher_no, {})
if row.due_date:
invoice_details.pop("due_date", None)
row.update(invoice_details)
if row.voucher_type == 'Sales Invoice':
if self.filters.show_delivery_notes:
self.set_delivery_notes(row)
if self.filters.show_sales_person and row.sales_team:
row.sales_person = ", ".join(row.sales_team)
del row['sales_team']
def set_delivery_notes(self, row):
delivery_notes = self.delivery_notes.get(row.voucher_no, [])
if delivery_notes:
row.delivery_notes = ', '.join(delivery_notes)
def build_delivery_note_map(self):
if self.invoices and self.filters.show_delivery_notes:
self.delivery_notes = frappe._dict()
# delivery note link inside sales invoice
si_against_dn = frappe.db.sql("""
select parent, delivery_note
from `tabSales Invoice Item`
where docstatus=1 and parent in (%s)
""" % (','.join(['%s'] * len(self.invoices))), tuple(self.invoices), as_dict=1)
for d in si_against_dn:
if d.delivery_note:
self.delivery_notes.setdefault(d.parent, set()).add(d.delivery_note)
dn_against_si = frappe.db.sql("""
select distinct parent, against_sales_invoice
from `tabDelivery Note Item`
where against_sales_invoice in (%s)
""" % (','.join(['%s'] * len(self.invoices))), tuple(self.invoices) , as_dict=1)
for d in dn_against_si:
self.delivery_notes.setdefault(d.against_sales_invoice, set()).add(d.parent)
def get_invoice_details(self):
self.invoice_details = frappe._dict()
if self.party_type == "Customer":
si_list = frappe.db.sql("""
select name, due_date, po_no
from `tabSales Invoice`
where posting_date <= %s
""",self.filters.report_date, as_dict=1)
for d in si_list:
self.invoice_details.setdefault(d.name, d)
# Get Sales Team
if self.filters.show_sales_person:
sales_team = frappe.db.sql("""
select parent, sales_person
from `tabSales Team`
where parenttype = 'Sales Invoice'
""", as_dict=1)
for d in sales_team:
self.invoice_details.setdefault(d.parent, {})\
.setdefault('sales_team', []).append(d.sales_person)
if self.party_type == "Supplier":
for pi in frappe.db.sql("""
select name, due_date, bill_no, bill_date
from `tabPurchase Invoice`
where posting_date <= %s
""", self.filters.report_date, as_dict=1):
self.invoice_details.setdefault(pi.name, pi)
# Invoices booked via Journal Entries
journal_entries = frappe.db.sql("""
select name, due_date, bill_no, bill_date
from `tabJournal Entry`
where posting_date <= %s
""", self.filters.report_date, as_dict=1)
for je in journal_entries:
if je.bill_no:
self.invoice_details.setdefault(je.name, je)
def set_party_details(self, row):
# customer / supplier name
party_details = self.get_party_details(row.party) or {}
row.update(party_details)
if self.filters.get(scrub(self.filters.party_type)):
row.currency = row.account_currency
else:
row.currency = self.company_currency
def allocate_outstanding_based_on_payment_terms(self, row):
self.get_payment_terms(row)
for term in row.payment_terms:
# update "paid" and "oustanding" for this term
if not term.paid:
self.allocate_closing_to_term(row, term, 'paid')
# update "credit_note" and "oustanding" for this term
if term.outstanding:
self.allocate_closing_to_term(row, term, 'credit_note')
row.payment_terms = sorted(row.payment_terms, key=lambda x: x['due_date'])
def get_payment_terms(self, row):
# build payment_terms for row
payment_terms_details = frappe.db.sql("""
select
si.name, si.party_account_currency, si.currency, si.conversion_rate,
ps.due_date, ps.payment_amount, ps.description, ps.paid_amount
from `tab{0}` si, `tabPayment Schedule` ps
where
si.name = ps.parent and
si.name = %s
order by ps.paid_amount desc, due_date
""".format(row.voucher_type), row.voucher_no, as_dict = 1)
original_row = frappe._dict(row)
row.payment_terms = []
# If no or single payment terms, no need to split the row
if len(payment_terms_details) <= 1:
return
for d in payment_terms_details:
term = frappe._dict(original_row)
self.append_payment_term(row, d, term)
def append_payment_term(self, row, d, term):
if (self.filters.get("customer") or self.filters.get("supplier")) and d.currency == d.party_account_currency:
invoiced = d.payment_amount
else:
invoiced = flt(flt(d.payment_amount) * flt(d.conversion_rate), self.currency_precision)
row.payment_terms.append(term.update({
"due_date": d.due_date,
"invoiced": invoiced,
"invoice_grand_total": row.invoiced,
"payment_term": d.description,
"paid": d.paid_amount,
"credit_note": 0.0,
"outstanding": invoiced - d.paid_amount
}))
if d.paid_amount:
row['paid'] -= d.paid_amount
def allocate_closing_to_term(self, row, term, key):
if row[key]:
if row[key] > term.outstanding:
term[key] = term.outstanding
row[key] -= term.outstanding
else:
term[key] = row[key]
row[key] = 0
term.outstanding -= term[key]
def allocate_extra_payments_or_credits(self, row):
# allocate extra payments / credits
additional_row = None
for key in ('paid', 'credit_note'):
if row[key] > 0:
if not additional_row:
additional_row = frappe._dict(row)
additional_row.invoiced = 0.0
additional_row[key] = row[key]
if additional_row:
additional_row.outstanding = additional_row.invoiced - additional_row.paid - additional_row.credit_note
self.append_row(additional_row)
def get_future_payments(self):
if self.filters.show_future_payments:
self.future_payments = frappe._dict()
future_payments = list(self.get_future_payments_from_payment_entry())
future_payments += list(self.get_future_payments_from_journal_entry())
if future_payments:
for d in future_payments:
if d.future_amount and d.invoice_no:
self.future_payments.setdefault((d.invoice_no, d.party), []).append(d)
def get_future_payments_from_payment_entry(self):
return frappe.db.sql("""
select
ref.reference_name as invoice_no,
payment_entry.party,
payment_entry.party_type,
payment_entry.posting_date as future_date,
ref.allocated_amount as future_amount,
payment_entry.reference_no as future_ref
from
`tabPayment Entry` as payment_entry inner join `tabPayment Entry Reference` as ref
on
(ref.parent = payment_entry.name)
where
payment_entry.docstatus < 2
and payment_entry.posting_date > %s
and payment_entry.party_type = %s
""", (self.filters.report_date, self.party_type), as_dict=1)
def get_future_payments_from_journal_entry(self):
if self.filters.get('party'):
amount_field = ("jea.debit_in_account_currency - jea.credit_in_account_currency"
if self.party_type == 'Supplier' else "jea.credit_in_account_currency - jea.debit_in_account_currency")
else:
amount_field = ("jea.debit - " if self.party_type == 'Supplier' else "jea.credit")
return frappe.db.sql("""
select
jea.reference_name as invoice_no,
jea.party,
jea.party_type,
je.posting_date as future_date,
sum({0}) as future_amount,
je.cheque_no as future_ref
from
`tabJournal Entry` as je inner join `tabJournal Entry Account` as jea
on
(jea.parent = je.name)
where
je.docstatus < 2
and je.posting_date > %s
and jea.party_type = %s
and jea.reference_name is not null and jea.reference_name != ''
group by je.name, jea.reference_name
having future_amount > 0
""".format(amount_field), (self.filters.report_date, self.party_type), as_dict=1)
def allocate_future_payments(self, row):
# future payments are captured in additional columns
# this method allocates pending future payments against a voucher to
# the current row (which could be generated from payment terms)
if not self.filters.show_future_payments:
return
row.remaining_balance = row.outstanding
row.future_amount = 0.0
for future in self.future_payments.get((row.voucher_no, row.party), []):
if row.remaining_balance > 0 and future.future_amount:
if future.future_amount > row.outstanding:
row.future_amount = row.outstanding
future.future_amount = future.future_amount - row.outstanding
row.remaining_balance = 0
else:
row.future_amount += future.future_amount
future.future_amount = 0
row.remaining_balance = | |
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:bd-pseudowire-evpns'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bd_pseudowire_evpn is not None:
for child_ref in self.bd_pseudowire_evpn:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns']['meta_info']
class IpSourceGuard(object):
"""
IP Source Guard
.. attribute:: enable
Enable IP Source Guard
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: logging
Enable Logging
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.logging = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:ip-source-guard'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.logging is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.IpSourceGuard']['meta_info']
class Dai(object):
"""
Dynamic ARP Inspection
.. attribute:: dai_address_validation
Address Validation
**type**\: :py:class:`DaiAddressValidation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation>`
.. attribute:: enable
Enable Dynamic ARP Inspection
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: logging
Enable Logging
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dai_address_validation = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation()
self.dai_address_validation.parent = self
self.enable = None
self.logging = None
class DaiAddressValidation(object):
"""
Address Validation
.. attribute:: destination_mac_verification
Enable Destination MAC Verification
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: enable
Enable Address Validation
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ipv4_verification
Enable IPv4 Verification
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: source_mac_verification
Enable Source MAC Verification
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_mac_verification = None
self.enable = None
self.ipv4_verification = None
self.source_mac_verification = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:dai-address-validation'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_mac_verification is not None:
return True
if self.enable is not None:
return True
if self.ipv4_verification is not None:
return True
if self.source_mac_verification is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:dai'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dai_address_validation is not None and self.dai_address_validation._has_data():
return True
if self.enable is not None:
return True
if self.logging is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai']['meta_info']
class RoutedInterfaces(object):
"""
Bridge Domain Routed Interface Table
.. attribute:: routed_interface
Bridge Domain Routed Interface
**type**\: list of :py:class:`RoutedInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.routed_interface = YList()
self.routed_interface.parent = self
self.routed_interface.name = 'routed_interface'
class RoutedInterface(object):
"""
Bridge Domain Routed Interface
.. attribute:: interface_name <key>
The name of the Routed Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:routed-interface[Cisco-IOS-XR-l2vpn-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:routed-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.routed_interface is not None:
for child_ref in self.routed_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:bridge-domain[Cisco-IOS-XR-l2vpn-cfg:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.bd_attachment_circuits is not None and self.bd_attachment_circuits._has_data():
return True
if self.bd_pseudowire_evpns is not None and self.bd_pseudowire_evpns._has_data():
return True
if self.bd_pseudowires is not None and self.bd_pseudowires._has_data():
return True
if self.bd_storm_controls is not None and self.bd_storm_controls._has_data():
return True
if self.bridge_domain_evis is not None and self.bridge_domain_evis._has_data():
return True
if self.bridge_domain_mac is not None and self.bridge_domain_mac._has_data():
return True
if self.bridge_domain_mtu is not None:
return True
if self.bridge_domain_pbb is not None and self.bridge_domain_pbb._has_data():
return True
if self.coupled_mode is not None:
return True
if self.dai is not None and self.dai._has_data():
return True
if self.dhcp is not None:
return True
if self.flooding is not None:
return True
if self.flooding_unknown_unicast is not None:
return True
if self.igmp_snooping is not None:
return True
if self.igmp_snooping_disable is not None:
return True
if self.ip_source_guard is not None and self.ip_source_guard._has_data():
return True
if self.member_vnis is not None and self.member_vnis._has_data():
return True
if self.mld_snooping is not None:
return True
if self.nv_satellite is not None and self.nv_satellite._has_data():
return True
if self.routed_interfaces is not None and self.routed_interfaces._has_data():
return True
if self.shutdown is not None:
return True
if self.transport_mode is not None:
return True
if self.vfis is not None and self.vfis._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:bridge-domains'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bridge_domain is not None:
for child_ref in self.bridge_domain:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:bridge-domain-groups/Cisco-IOS-XR-l2vpn-cfg:bridge-domain-group[Cisco-IOS-XR-l2vpn-cfg:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.bridge_domains is not None and self.bridge_domains._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:bridge-domain-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bridge_domain_group is not None:
for child_ref in self.bridge_domain_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups']['meta_info']
class PseudowireClasses(object):
"""
List of pseudowire classes
.. attribute:: pseudowire_class
Pseudowire class
**type**\: list of :py:class:`PseudowireClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.PseudowireClasses.PseudowireClass>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.pseudowire_class = YList()
self.pseudowire_class.parent = self
self.pseudowire_class.name = 'pseudowire_class'
class PseudowireClass(object):
"""
Pseudowire class
.. attribute:: name <key>
Name of the pseudowire class
**type**\: str
**length:** 0..32
.. attribute:: backup_disable_delay
Back Up Pseudowire class
**type**\: :py:class:`BackupDisableDelay <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.PseudowireClasses.PseudowireClass.BackupDisableDelay>`
.. attribute:: enable
Enable pseudowire class
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: l2tpv3_encapsulation
L2TPv3 encapsulation
**type**\: :py:class:`L2Tpv3Encapsulation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation>`
.. attribute:: mac_withdraw
| |
import logging
import math
import random
from pajbot.managers.db import DBManager
from pajbot.managers.handler import HandlerManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ", ".join(winner.name for winner in winners)
def format_win(points_amount):
if points_amount >= 0:
return f"won {points_amount}"
else:
return f"lost {-points_amount}"
class RaffleModule(BaseModule):
MULTI_RAFFLE_MIN_WIN_POINTS_AMOUNT = 100
MULTI_RAFFLE_MAX_WINNERS_RATIO = 0.26
MULTI_RAFFLE_MAX_WINNERS_AMOUNT = 200
ID = __name__.split(".")[-1]
NAME = "Raffle"
DESCRIPTION = "Users can participate in a raffle to win points."
CATEGORY = "Game"
SETTINGS = [
ModuleSetting(
key="message_start",
label="Start message | Available arguments: {length}, {points}",
type="text",
required=True,
placeholder=".me A raffle has begun for {points} points. type !join to join the raffle! The raffle will end in {length} seconds",
default=".me A raffle has begun for {points} points. type !join to join the raffle! The raffle will end in {length} seconds",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="message_running",
label="Running message | Available arguments: {length}, {points}",
type="text",
required=True,
placeholder=".me The raffle for {points} points ends in {length} seconds! Type !join to join the raffle!",
default=".me The raffle for {points} points ends in {length} seconds! Type !join to join the raffle!",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="message_start_multi",
label="Start message (multi) | Available arguments: {length}, {points}",
type="text",
required=True,
placeholder=".me A multi-raffle has begun for {points} points. type !join to join the raffle! The raffle will end in {length} seconds",
default=".me A multi-raffle has begun for {points} points. type !join to join the raffle! The raffle will end in {length} seconds",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="message_running_multi",
label="Running message (multi) | Available arguments: {length}, {points}",
type="text",
required=True,
placeholder=".me The multi-raffle for {points} points ends in {length} seconds! Type !join to join the raffle!",
default=".me The multi-raffle for {points} points ends in {length} seconds! Type !join to join the raffle!",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="single_max_points",
label="Max points for a single raffle",
type="number",
required=True,
placeholder="",
default=3000,
constraints={"min_value": 0, "max_value": 35000},
),
ModuleSetting(
key="max_length",
label="Max length for a single raffle in seconds",
type="number",
required=True,
placeholder="",
default=120,
constraints={"min_value": 0, "max_value": 1200},
),
ModuleSetting(
key="allow_negative_raffles", label="Allow negative raffles", type="boolean", required=True, default=True
),
ModuleSetting(
key="max_negative_points",
label="Max negative points for a single raffle",
type="number",
required=True,
placeholder="",
default=3000,
constraints={"min_value": 1, "max_value": 35000},
),
ModuleSetting(
key="multi_enabled",
label="Enable multi-raffles (!multiraffle/!mraffle)",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="multi_max_points",
label="Max points for a multi raffle",
type="number",
required=True,
placeholder="",
default=100000,
constraints={"min_value": 0, "max_value": 1000000},
),
ModuleSetting(
key="multi_max_length",
label="Max length for a multi raffle in seconds",
type="number",
required=True,
placeholder="",
default=600,
constraints={"min_value": 0, "max_value": 1200},
),
ModuleSetting(
key="multi_allow_negative_raffles",
label="Allow negative multi raffles",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="multi_max_negative_points",
label="Max negative points for a multi raffle",
type="number",
required=True,
placeholder="",
default=10000,
constraints={"min_value": 1, "max_value": 100000},
),
ModuleSetting(
key="multi_raffle_on_sub",
label="Start a multi raffle when someone subscribes",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="default_raffle_type",
label="Default raffle (What raffle type !raffle should invoke)",
type="options",
required=True,
default="Single Raffle",
options=["Single Raffle", "Multi Raffle"],
),
ModuleSetting(
key="show_on_clr", label="Show raffles on the clr overlay", type="boolean", required=True, default=True
),
]
def __init__(self, bot):
super().__init__(bot)
self.raffle_running = False
self.raffle_users = set()
self.raffle_points = 0
self.raffle_length = 0
def load_commands(self, **options):
self.commands["singleraffle"] = Command.raw_command(
self.raffle,
delay_all=0,
delay_user=0,
level=500,
description="Start a raffle for points",
command="raffle",
examples=[
CommandExample(
None,
"Start a raffle for 69 points",
chat="user:!raffle 69\n"
"bot:A raffle has begun for 69 points. Type !join to join the raffle! The raffle will end in 60 seconds.",
description="Start a 60-second raffle for 69 points",
).parse(),
CommandExample(
None,
"Start a raffle with a different length",
chat="user:!raffle 69 30\n"
"bot:A raffle has begun for 69 points. Type !join to join the raffle! The raffle will end in 30 seconds.",
description="Start a 30-second raffle for 69 points",
).parse(),
],
)
self.commands["sraffle"] = self.commands["singleraffle"]
self.commands["join"] = Command.raw_command(
self.join,
delay_all=0,
delay_user=5,
description="Join a running raffle",
examples=[
CommandExample(
None,
"Join a running raffle",
chat="user:!join",
description="You don't get confirmation whether you joined the raffle or not.",
).parse()
],
)
if self.settings["multi_enabled"]:
self.commands["multiraffle"] = Command.raw_command(
self.multi_raffle,
delay_all=0,
delay_user=0,
level=500,
description="Start a multi-raffle for points",
command="multiraffle",
examples=[
CommandExample(
None,
"Start a multi-raffle for 69 points",
chat="user:!multiraffle 69\n"
"bot:A multi-raffle has begun for 69 points. Type !join to join the raffle! The raffle will end in 60 seconds.",
description="Start a 60-second raffle for 69 points",
).parse(),
CommandExample(
None,
"Start a multi-raffle with a different length",
chat="user:!multiraffle 69 30\n"
"bot:A multi-raffle has begun for 69 points. Type !join to join the raffle! The raffle will end in 30 seconds.",
description="Start a 30-second multi-raffle for 69 points",
).parse(),
],
)
self.commands["mraffle"] = self.commands["multiraffle"]
if self.settings["default_raffle_type"] == "Multi Raffle" and self.settings["multi_enabled"]:
self.commands["raffle"] = self.commands["multiraffle"]
else:
self.commands["raffle"] = self.commands["singleraffle"]
def raffle(self, bot, source, message, **rest):
if self.raffle_running is True:
bot.say(f"{source}, a raffle is already running OMGScoots")
return False
self.raffle_users = set()
self.raffle_running = True
self.raffle_points = 100
self.raffle_length = 60
try:
if message is not None and self.settings["allow_negative_raffles"] is True:
self.raffle_points = int(message.split()[0])
if message is not None and self.settings["allow_negative_raffles"] is False:
if int(message.split()[0]) >= 0:
self.raffle_points = int(message.split()[0])
except (IndexError, ValueError, TypeError):
pass
try:
if message is not None:
if int(message.split()[1]) >= 5:
self.raffle_length = int(message.split()[1])
except (IndexError, ValueError, TypeError):
pass
if self.raffle_points >= 0:
self.raffle_points = min(self.raffle_points, self.settings["single_max_points"])
if self.raffle_points <= -1:
self.raffle_points = max(self.raffle_points, -self.settings["max_negative_points"])
self.raffle_length = min(self.raffle_length, self.settings["max_length"])
if self.settings["show_on_clr"]:
bot.websocket_manager.emit("notification", {"message": "A raffle has been started!"})
bot.execute_delayed(0.75, bot.websocket_manager.emit, "notification", {"message": "Type !join to enter!"})
arguments = {"length": self.raffle_length, "points": self.raffle_points}
bot.say(self.get_phrase("message_start", **arguments))
arguments = {"length": round(self.raffle_length * 0.75), "points": self.raffle_points}
bot.execute_delayed(self.raffle_length * 0.25, bot.say, self.get_phrase("message_running", **arguments))
arguments = {"length": round(self.raffle_length * 0.50), "points": self.raffle_points}
bot.execute_delayed(self.raffle_length * 0.50, bot.say, self.get_phrase("message_running", **arguments))
arguments = {"length": round(self.raffle_length * 0.25), "points": self.raffle_points}
bot.execute_delayed(self.raffle_length * 0.75, bot.say, self.get_phrase("message_running", **arguments))
bot.execute_delayed(self.raffle_length, self.end_raffle)
def join(self, source, **rest):
if not self.raffle_running:
return False
if source.id in self.raffle_users:
return False
# Added user to the raffle
self.raffle_users.add(source.id)
def end_raffle(self):
if not self.raffle_running:
return False
self.raffle_running = False
if len(self.raffle_users) == 0:
self.bot.me("Wow, no one joined the raffle DansGame")
return False
with DBManager.create_session_scope() as db_session:
winner_id = random.choice(list(self.raffle_users))
winner = User.find_by_id(db_session, winner_id)
if winner is None:
return False
self.raffle_users = set()
if self.settings["show_on_clr"]:
self.bot.websocket_manager.emit(
"notification", {"message": f"{winner} {format_win(self.raffle_points)} points in the raffle!"}
)
self.bot.me(f"The raffle has finished! {winner} {format_win(self.raffle_points)} points! PogChamp")
winner.points += self.raffle_points
HandlerManager.trigger("on_raffle_win", winner=winner, points=self.raffle_points)
def multi_start_raffle(self, points, length):
if self.raffle_running:
return False
self.raffle_users = set()
self.raffle_running = True
self.raffle_points = points
self.raffle_length = length
if self.raffle_points >= 0:
self.raffle_points = min(self.raffle_points, self.settings["multi_max_points"])
if self.raffle_points <= -1:
self.raffle_points = max(self.raffle_points, -self.settings["multi_max_negative_points"])
self.raffle_length = min(self.raffle_length, self.settings["multi_max_length"])
if self.settings["show_on_clr"]:
self.bot.websocket_manager.emit("notification", {"message": "A raffle has been started!"})
self.bot.execute_delayed(
0.75, self.bot.websocket_manager.emit, "notification", {"message": "Type !join to enter!"}
)
arguments = {"length": self.raffle_length, "points": self.raffle_points}
self.bot.say(self.get_phrase("message_start_multi", **arguments))
arguments = {"length": round(self.raffle_length * 0.75), "points": self.raffle_points}
self.bot.execute_delayed(
self.raffle_length * 0.25, self.bot.say, self.get_phrase("message_running_multi", **arguments)
)
arguments = {"length": round(self.raffle_length * 0.50), "points": self.raffle_points}
self.bot.execute_delayed(
self.raffle_length * 0.50, self.bot.say, self.get_phrase("message_running_multi", **arguments)
)
arguments = {"length": round(self.raffle_length * 0.25), "points": self.raffle_points}
self.bot.execute_delayed(
self.raffle_length * 0.75, self.bot.say, self.get_phrase("message_running_multi", **arguments)
)
self.bot.execute_delayed(self.raffle_length, self.multi_end_raffle)
def multi_raffle(self, bot, source, message, **rest):
if self.raffle_running is True:
bot.say(f"{source}, a raffle is already running OMGScoots")
return False
points = 100
try:
if message is not None and self.settings["multi_allow_negative_raffles"] is True:
points = int(message.split()[0])
if message is not None and self.settings["multi_allow_negative_raffles"] is False:
if int(message.split()[0]) >= 0:
points = int(message.split()[0])
except (IndexError, ValueError, TypeError):
pass
length = 60
try:
if message is not None:
if int(message.split()[1]) >= 5:
length = int(message.split()[1])
except (IndexError, ValueError, TypeError):
pass
self.multi_start_raffle(points, length)
def multi_end_raffle(self):
if not self.raffle_running:
return False
self.raffle_running = False
if len(self.raffle_users) == 0:
self.bot.me("Wow, no one joined the raffle DansGame")
return False
num_participants = len(self.raffle_users)
# start out with the theoretical maximum: everybody wins
num_winners = num_participants
# we want to impose three limits on the winner picking:
# - a winner should get 100 points at minimum,
num_winners = min(num_winners, math.floor(abs(self.raffle_points) / self.MULTI_RAFFLE_MIN_WIN_POINTS_AMOUNT))
# - winner percentage | |
11, 29)),
"DAU": pnp.Vendor("Daou Tech Inc", "DAU", datetime.date(1996, 11, 29)),
"HCA": pnp.Vendor("DAT", "HCA", datetime.date(2001, 3, 15)),
"DAX": pnp.Vendor("Data Apex Ltd", "DAX", datetime.date(1996, 11, 29)),
"DDI": pnp.Vendor("Data Display AG", "DDI", datetime.date(2002, 7, 17)),
"DXP": pnp.Vendor("Data Expert Corporation", "DXP", datetime.date(1996, 11, 29)),
"EXP": pnp.Vendor("Data Export Corporation", "EXP", datetime.date(1996, 11, 29)),
"DMO": pnp.Vendor("Data Modul AG", "DMO", datetime.date(2013, 12, 3)),
"EBH": pnp.Vendor("Data Price Informatica", "EBH", datetime.date(2001, 5, 24)),
"DRI": pnp.Vendor("Data Race Inc", "DRI", datetime.date(1997, 7, 30)),
"DRC": pnp.Vendor("Data Ray Corp.", "DRC", datetime.date(2001, 11, 30)),
"DTX": pnp.Vendor("Data Translation", "DTX", datetime.date(1996, 11, 29)),
"DVT": pnp.Vendor("Data Video", "DVT", datetime.date(2007, 2, 13)),
"DBK": pnp.Vendor("Databook Inc", "DBK", datetime.date(1996, 11, 29)),
"DCD": pnp.Vendor("Datacast LLC", "DCD", datetime.date(1997, 12, 2)),
"TRN": pnp.Vendor("Datacommunicatie Tron B.V.", "TRN", datetime.date(1996, 11, 29)),
"DQB": pnp.Vendor("Datacube Inc", "DQB", datetime.date(1996, 11, 29)),
"DDT": pnp.Vendor("Datadesk Technologies Inc", "DDT", datetime.date(1998, 11, 27)),
"DKY": pnp.Vendor("Datakey Inc", "DKY", datetime.date(1998, 4, 6)),
"LJX": pnp.Vendor("Datalogic Corporation", "LJX", datetime.date(1996, 11, 29)),
"DTN": pnp.Vendor("Datang Telephone Co", "DTN", datetime.date(1998, 9, 23)),
"DII": pnp.Vendor("Dataq Instruments Inc", "DII", datetime.date(1996, 11, 29)),
"DDE": pnp.Vendor("Datasat Digital Entertainment", "DDE", datetime.date(2011, 11, 18)),
"DCV": pnp.Vendor("Datatronics Technology Inc", "DCV", datetime.date(1997, 1, 2)),
"DAT": pnp.Vendor("Datel Inc", "DAT", datetime.date(1996, 11, 29)),
"MSD": pnp.Vendor("Datenerfassungs- und Informationssysteme", "MSD", datetime.date(1998, 3, 16)),
"DAV": pnp.Vendor("Davicom Semiconductor Inc", "DAV", datetime.date(1997, 1, 15)),
"DAS": pnp.Vendor("DAVIS AS", "DAS", datetime.date(1998, 2, 3)),
"DBN": pnp.Vendor("DB Networks Inc", "DBN", datetime.date(1997, 12, 1)),
"HWC": pnp.Vendor("DBA <NAME>", "HWC", datetime.date(1999, 3, 20)),
"DCM": pnp.Vendor("DCM Data Products", "DCM", datetime.date(1996, 11, 29)),
"DGT": pnp.Vendor("Dearborn Group Technology", "DGT", datetime.date(1997, 11, 11)),
"DXD": pnp.Vendor("DECIMATOR DESIGN PTY LTD", "DXD", datetime.date(2012, 3, 6)),
"DCR": pnp.Vendor("Decros Ltd", "DCR", datetime.date(1996, 11, 29)),
"MLD": pnp.Vendor("Deep Video Imaging Ltd", "MLD", datetime.date(2003, 8, 14)),
"DFT": pnp.Vendor("DEI Holdings dba Definitive Technology", "DFT", datetime.date(2011, 12, 9)),
"DEI": pnp.Vendor("Deico Electronics", "DEI", datetime.date(1996, 11, 29)),
"DLL": pnp.Vendor("Dell Inc", "DLL", datetime.date(2009, 3, 27)),
"DEL": pnp.Vendor("Dell Inc.", "DEL", datetime.date(2009, 12, 9)),
"DPH": pnp.Vendor("Delphi Automotive LLP", "DPH", datetime.date(2013, 10, 15)),
"DPC": pnp.Vendor("Delta Electronics Inc", "DPC", datetime.date(1996, 11, 29)),
"DDV": pnp.Vendor("Delta Information Systems, Inc", "DDV", datetime.date(2012, 1, 3)),
"DTA": pnp.Vendor("DELTATEC", "DTA", datetime.date(2009, 3, 13)),
"FPS": pnp.Vendor("Deltec Corporation", "FPS", datetime.date(1996, 11, 29)),
"DON": pnp.Vendor("DENON, Ltd.", "DON", datetime.date(2004, 4, 1)),
"DHD": pnp.Vendor("Dension Audio Systems", "DHD", datetime.date(2013, 3, 4)),
"DEN": pnp.Vendor("Densitron Computers Ltd", "DEN", datetime.date(1999, 9, 13)),
"DTT": pnp.Vendor("Design & Test Technology, Inc.", "DTT", datetime.date(2010, 9, 30)),
"LPI": pnp.Vendor("Design Technology", "LPI", datetime.date(1996, 11, 29)),
"DNI": pnp.Vendor("Deterministic Networks Inc.", "DNI", datetime.date(2000, 4, 19)),
"BCQ": pnp.Vendor("Deutsche Telekom Berkom GmbH", "BCQ", datetime.date(1997, 8, 12)),
"DTO": pnp.Vendor("Deutsche <NAME>", "DTO", datetime.date(2007, 6, 14)),
"DVL": pnp.Vendor("Devolo AG", "DVL", datetime.date(2002, 5, 30)),
"DXL": pnp.Vendor("Dextera Labs Inc", "DXL", datetime.date(2009, 12, 9)),
"DFI": pnp.Vendor("DFI", "DFI", datetime.date(1996, 11, 29)),
"DHP": pnp.Vendor("DH Print", "DHP", datetime.date(1996, 11, 29)),
"DIA": pnp.Vendor("Diadem", "DIA", datetime.date(1996, 11, 29)),
"DGS": pnp.Vendor("Diagsoft Inc", "DGS", datetime.date(1996, 11, 29)),
"DCO": pnp.Vendor("Dialogue Technology Corporation", "DCO", datetime.date(2004, 6, 16)),
"DCS": pnp.Vendor("Diamond Computer Systems Inc", "DCS", datetime.date(1996, 11, 29)),
"DLC": pnp.Vendor("Diamond Lane Comm. Corporation", "DLC", datetime.date(1996, 11, 29)),
"DNV": pnp.Vendor("DiCon", "DNV", datetime.date(2004, 12, 15)),
"DVD": pnp.Vendor("Dictaphone Corporation", "DVD", datetime.date(1998, 4, 3)),
"DBD": pnp.Vendor("Diebold Inc.", "DBD", datetime.date(2006, 9, 5)),
"DAE": pnp.Vendor("Digatron Industrie Elektronik GmbH", "DAE", datetime.date(1997, 2, 24)),
"DGI": pnp.Vendor("DIGI International", "DGI", datetime.date(1996, 11, 29)),
"DBI": pnp.Vendor("DigiBoard Inc", "DBI", datetime.date(1996, 11, 29)),
"DIG": pnp.Vendor("Digicom S.p.A.", "DIG", datetime.date(1996, 11, 29)),
"DMB": pnp.Vendor("Digicom Systems Inc", "DMB", datetime.date(1998, 3, 13)),
"DGP": pnp.Vendor("Digicorp European sales S.A.", "DGP", datetime.date(1997, 5, 22)),
"DGA": pnp.Vendor("Digiital Arts Inc", "DGA", datetime.date(2007, 6, 14)),
"DXC": pnp.Vendor("Digipronix Control Systems", "DXC", datetime.date(1999, 7, 16)),
"DAC": pnp.Vendor("Digital Acoustics Corporation", "DAC", datetime.date(2000, 5, 24)),
"DAL": pnp.Vendor("Digital Audio Labs Inc", "DAL", datetime.date(1996, 11, 29)),
"DCA": pnp.Vendor("Digital Communications Association", "DCA", datetime.date(1996, 11, 29)),
"SHR": pnp.Vendor("Digital Discovery", "SHR", datetime.date(1997, 9, 24)),
"PRF": pnp.Vendor("Schneider Electric Japan Holdings, Ltd.", "PRF", datetime.date(2003, 1, 2)),
"DEC": pnp.Vendor("Digital Equipment Corporation", "DEC", datetime.date(1996, 11, 29)),
"DPS": pnp.Vendor("Digital Processing Systems", "DPS", datetime.date(1996, 11, 29)),
"DPL": pnp.Vendor("Digital Projection Limited", "DPL", datetime.date(2002, 7, 9)),
"DRD": pnp.Vendor("DIGITAL REFLECTION INC.", "DRD", datetime.date(2000, 2, 21)),
"DVS": pnp.Vendor("Digital Video System", "DVS", datetime.date(1996, 11, 29)),
"DPA": pnp.Vendor("DigiTalk Pro AV", "DPA", datetime.date(2000, 10, 23)),
"DLG": pnp.Vendor("Digital-Logic GmbH", "DLG", datetime.date(2003, 9, 2)),
"DSI": pnp.Vendor("Digitan Systems Inc", "DSI", datetime.date(1996, 11, 29)),
"DLT": pnp.Vendor("Digitelec Informatique Park Cadera", "DLT", datetime.date(1996, 11, 29)),
"DTE": pnp.Vendor("Dimension Technologies, Inc.", "DTE", datetime.date(2010, 5, 3)),
"DMM": pnp.Vendor("Dimond Multimedia Systems Inc", "DMM", datetime.date(1996, 11, 29)),
"DIS": pnp.Vendor("Diseda S.A.", "DIS", datetime.date(1996, 11, 29)),
"DMT": pnp.Vendor("Distributed Management Task Force, Inc. (DMTF)", "DMT", datetime.date(2009, 3, 31)),
"DTI": pnp.Vendor("Diversified Technology, Inc.", "DTI", datetime.date(1996, 11, 29)),
"ABO": pnp.Vendor("D-Link Systems Inc", "ABO", datetime.date(1996, 11, 29)),
"DLK": pnp.Vendor("D-Link Systems Inc", "DLK", datetime.date(1996, 11, 29)),
"DNA": pnp.Vendor("DNA Enterprises, Inc.", "DNA", datetime.date(1998, 9, 1)),
"AUO": pnp.Vendor("DO NOT USE - AUO", "AUO", datetime.date(2008, 9, 16)),
"LPL": pnp.Vendor("DO NOT USE - LPL", "LPL", datetime.date(2008, 9, 16)),
"PHI": pnp.Vendor("DO NOT USE - PHI", "PHI", datetime.date(1996, 11, 29)),
"PTW": pnp.Vendor("DO NOT USE - PTW", "PTW", datetime.date(2009, 9, 9)),
"PVC": pnp.Vendor("DO NOT USE - PVC", "PVC", datetime.date(2009, 9, 9)),
"RTK": pnp.Vendor("DO NOT USE - RTK", "RTK", datetime.date(2009, 9, 9)),
"SEG": pnp.Vendor("DO NOT USE - SEG", "SEG", datetime.date(2009, 9, 9)),
"TNJ": pnp.Vendor("DO NOT USE - TNJ", "TNJ", datetime.date(2009, 9, 9)),
"UND": pnp.Vendor("DO NOT USE - UND", "UND", datetime.date(1996, 11, 29)),
"UNE": pnp.Vendor("DO NOT USE - UNE", "UNE", datetime.date(1996, 11, 29)),
"UNF": pnp.Vendor("DO NOT USE - UNF", "UNF", datetime.date(1996, 11, 29)),
"WAN": pnp.Vendor("DO NOT USE - WAN", "WAN", datetime.date(2009, 9, 9)),
"XER": pnp.Vendor("DO NOT USE - XER", "XER", datetime.date(2009, 9, 9)),
"XOC": pnp.Vendor("DO NOT USE - XOC", "XOC", datetime.date(2009, 9, 9)),
"DBL": pnp.Vendor("Doble Engineering Company", "DBL", datetime.date(1996, 11, 29)),
"DPI": pnp.Vendor("DocuPoint", "DPI", datetime.date(1996, 11, 29)),
"DLB": pnp.Vendor("Dolby Laboratories Inc.", "DLB", datetime.date(2010, 1, 27)),
"DOL": pnp.Vendor("Dolman Technologies Group Inc", "DOL", datetime.date(1997, 11, 11)),
"DSP": pnp.Vendor("Domain Technology Inc", "DSP", datetime.date(1996, 11, 29)),
"DMS": pnp.Vendor("DOME imaging systems", "DMS", datetime.date(2000, 10, 23)),
"DOM": pnp.Vendor("Dome Imaging Systems", "DOM", datetime.date(1996, 11, 29)),
"AIK": pnp.Vendor("Dongguan Alllike Electronics Co., Ltd.", "AIK", datetime.date(2015, 4, 11)),
"DUA": pnp.Vendor("Dosch & Amand GmbH & Company KG", "DUA", datetime.date(1997, 12, 2)),
"DOT": pnp.Vendor("Dotronic Mikroelektronik GmbH", "DOT", datetime.date(2002, 6, 28)),
"DIM": pnp.Vendor("dPict Imaging, Inc.", "DIM", datetime.date(2008, 2, 12)),
"DPX": pnp.Vendor("DpiX, Inc.", "DPX", datetime.date(1998, 9, 23)),
"DPT": pnp.Vendor("DPT", "DPT", datetime.date(1996, 11, 29)),
"DRB": pnp.Vendor("Dr. Bott KG", "DRB", datetime.date(2002, 4, 25)),
"DNT": pnp.Vendor("Dr. Neuhous Telekommunikation GmbH", "DNT", datetime.date(1996, 11, 29)),
"DIT": pnp.Vendor("Dragon Information Technology", "DIT", datetime.date(1996, 11, 29)),
"DRS": pnp.Vendor("DRS Defense Solutions, LLC", "DRS", datetime.date(2011, 10, 18)),
"DSD": pnp.Vendor("DS Multimedia Pte Ltd", "DSD", datetime.date(2006, 2, 14)),
"DSM": pnp.Vendor("DSM Digital Services GmbH", "DSM", datetime.date(1996, 11, 29)),
"DCE": pnp.Vendor("dSPACE GmbH", "DCE", datetime.date(1996, 12, 16)),
"DTC": pnp.Vendor("DTC Tech Corporation", "DTC", datetime.date(1996, 11, 29)),
"DGK": pnp.Vendor("DugoTech Co., LTD", "DGK", datetime.date(2007, 6, 14)),
"DMC": pnp.Vendor("Dune Microsystems Corporation", "DMC", datetime.date(1996, 11, 29)),
"DYC": pnp.Vendor("Dycam Inc", "DYC", datetime.date(1998, 1, 8)),
"DYM": pnp.Vendor("Dymo-CoStar Corporation", "DYM", datetime.date(1998, 12, 28)),
"DCL": pnp.Vendor("Dynamic Controls Ltd", "DCL", datetime.date(2000, 5, 24)),
"DTK": pnp.Vendor("Dynax Electronics (HK) Ltd", "DTK", datetime.date(1996, 11, 29)),
"DYX": pnp.Vendor("Dynax Electronics (HK) Ltd", "DYX", datetime.date(1996, 11, 29)),
"EDC": pnp.Vendor("e.Digital Corporation", "EDC", datetime.date(2000, 10, 23)),
"EEP": pnp.Vendor("E.E.P.D. GmbH", "EEP", datetime.date(2007, 6, 14)),
"EGL": pnp.Vendor("Eagle Technology", "EGL", datetime.date(1996, 11, 29)),
"KOD": pnp.Vendor("Eastman Kodak Company", "KOD", datetime.date(2000, 5, 24)),
"EKC": pnp.Vendor("Eastman Kodak Company", "EKC", datetime.date(1996, 11, 29)),
"TWI": pnp.Vendor("Easytel oy", "TWI", datetime.date(1999, 7, 16)),
"EBS": pnp.Vendor("EBS Euchner Büro- und Schulsysteme GmbH", "EBS", datetime.date(2013, 2, 5)),
"ECO": pnp.Vendor("Echo Speech Corporation", "ECO", datetime.date(1996, 11, 29)),
"ETI": pnp.Vendor("Eclipse Tech Inc", "ETI", datetime.date(1996, 11, 29)),
"ECM": pnp.Vendor("E-Cmos Tech Corporation", "ECM", datetime.date(1996, 11, 29)),
"ESC": pnp.Vendor("Eden Sistemas de Computacao S/A", "ESC", datetime.date(1996, 11, 29)),
"EDI": pnp.Vendor("Edimax Tech. Company Ltd", "EDI", datetime.date(1996, 11, 29)),
"EDM": pnp.Vendor("EDMI", "EDM", datetime.date(1998, 7, 16)),
"ELI": pnp.Vendor("Edsun Laboratories", "ELI", datetime.date(1996, 11, 29)),
"EES": pnp.Vendor("EE Solutions, Inc.", "EES", datetime.date(2003, 4, 16)),
"EEH": pnp.Vendor("EEH Datalink GmbH", "EEH", datetime.date(1997, 7, 3)),
"ENI": pnp.Vendor("Efficient Networks", "ENI", datetime.date(1996, 11, 29)),
"EGN": pnp.Vendor("Egenera, Inc.", "EGN", datetime.date(2002, 10, 8)),
"EIC": pnp.Vendor("Eicon Technology Corporation", "EIC", datetime.date(1996, 11, 29)),
"EGD": pnp.Vendor("EIZO GmbH Display Technologies", "EGD", datetime.date(2009, 2, 13)),
"ENC": pnp.Vendor("Eizo Nanao Corporation", "ENC", datetime.date(1998, 12, 28)),
"EKS": pnp.Vendor("<NAME>", "EKS", datetime.date(2002, 4, 25)),
"ELA": pnp.Vendor("ELAD srl", "ELA", datetime.date(2002, 4, 25)),
"ETD": pnp.Vendor("ELAN MICROELECTRONICS CORPORATION", "ETD", | |
<filename>chandra_aca/aca_image.py<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from math import floor
from itertools import count, chain
from copy import deepcopy
from pathlib import Path
import six
from six.moves import zip
import numba
import numpy as np
from astropy.utils.compat.misc import override__dir__
__all__ = ['ACAImage', 'centroid_fm', 'AcaPsfLibrary', 'EIGHT_LABELS']
EIGHT_LABELS = np.array([['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1'],
['I1', 'J1', 'K1', 'L1', 'M1', 'N1', 'O1', 'P1'],
['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2'],
['I2', 'J2', 'K2', 'L2', 'M2', 'N2', 'O2', 'P2'],
['A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3'],
['I3', 'J3', 'K3', 'L3', 'M3', 'N3', 'O3', 'P3'],
['A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4'],
['I4', 'J4', 'K4', 'L4', 'M4', 'N4', 'O4', 'P4']])
"""Constant for labeling ACA image pixels using the EQ-278 spec format.
Pixel A1 has the lowest values of row and column; pixel H1 has the lowest
row and highest col; pixel I4 has the highest row and lowest column."""
def _operator_factory(operator, inplace=False):
"""
Generate data model methods like __add__(self, other) and
__iadd__(self, other). These always operate in the coordinate
system of the left and right operands. If both are in ACA
coordinates then any non-overlapping pixels are ignored.
"""
# Define the operator and the in-place version (which might be the
# same if op is already in-place)
op = getattr(np.ndarray, '__{}__'.format(operator))
inplace_op = op if inplace else getattr(np.ndarray, '__i{}__'.format(operator))
def _operator(self, other):
if isinstance(other, ACAImage) and (other._aca_coords or self._aca_coords):
# If inplace then work on the original self, else use a copy
out = self if inplace else self.copy()
sz_r0, sz_c0 = self.shape
sz_r1, sz_c1 = other.shape
# If images overlap do this process, else return unmodified ``out``.
if all(diff > 0 for diff in [self.row0 + sz_r0 - other.row0,
self.col0 + sz_c0 - other.col0,
other.row0 + sz_r1 - self.row0,
other.col0 + sz_c1 - self.col0]):
dr = other.row0 - self.row0
dc = other.col0 - self.col0
r_min, r_max = -min(0, dr), min(sz_r1, sz_r0 - dr)
c_min, c_max = -min(0, dc), min(sz_c1, sz_c0 - dc)
row0 = max(self.row0, other.row0)
col0 = max(self.col0, other.col0)
sz_r = r_max - r_min
sz_c = c_max - c_min
section = ACAImage(shape=(sz_r, sz_c), row0=row0, col0=col0)
# Always use the inplace operator, but remember that ``out`` is a copy of
# self for inplace=False (thus mimicking the non-inplace version).
inplace_op(out[section], other.view(np.ndarray)[r_min:r_max, c_min:c_max])
else:
out = op(self, other) # returns self for inplace ops
return out
return _operator
class ACAImage(np.ndarray):
"""
ACAImage is an ndarray subclass that supports functionality for the Chandra
ACA. Most importantly it allows image indexing and slicing in absolute
"aca" coordinates, where the image lower left coordinate is specified
by object ``row0`` and ``col0`` attributes.
It also provides a ``meta`` dict that can be used to store additional useful
information. Any keys which are all upper-case will be exposed as object
attributes, e.g. ``img.BGDAVG`` <=> ``img.meta['BGDAVG']``. The ``row0``
attribute is a proxy for ``img.meta['IMGROW0']``, and likewise for ``col0``.
When initializing an ``ACAImage``, additional ``*args`` and ``**kwargs`` are
used to try initializing via ``np.array(*args, **kwargs)``. If this fails
then ``np.zeros(*args, **kwargs)`` is tried. In this way one can either
initialize from array data or create a new array of zeros.
Examples::
>>> import numpy as np
>>> from chandra_aca.aca_image import ACAImage
>>> dat = np.random.uniform(size=(1024, 1024))
>>> a = ACAImage(dat, row0=-512, col0=-512)
>>> a = ACAImage([[1,2], [3,4]], meta={'BGDAVG': 5.2})
>>> a = ACAImage(shape=(1024, 1024), row0=-512, col0=-512)
:param row0: row coordinate of lower left image pixel (int, default=0)
:param col0: col coordinate of lower left image pixel (int, default=0)
:param meta: dict of object attributes
:param ``*args``: additional args passed to np.array() or np.zeros()
:param ``**kwargs``: additional kwargs passed to np.array() or np.zeros()
"""
@property
def aca(self):
"""
Return a light copy (same data) of self but with the _aca_coords
attribute switched on so that indexing is absolute.
"""
obj = self.view(self.__class__)
obj.meta = self.meta
obj._aca_coords = True
return obj
def __new__(cls, *args, **kwargs):
meta = kwargs.pop('meta', {})
# Set default row0 and col0 to 0 (if not already in meta), and
# then override with like-named kwargs. row0 attribute => meta['IMGROW0']
for ax in ('row0', 'col0'):
imgax = 'IMG' + ax.upper()
meta.setdefault(imgax, 0)
if ax in kwargs:
meta[imgax] = np.int64(kwargs.pop(ax))
try:
arr = np.array(*args, **kwargs)
except Exception:
arr = np.zeros(*args, **kwargs)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = arr.view(cls)
if obj.ndim != 2:
raise ValueError('{} must be 2-d'.format(cls.__name__))
# add the new attribute to the created instance
obj.meta = meta
obj._aca_coords = False
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.meta = deepcopy(getattr(obj, 'meta', {}))
self._aca_coords = getattr(obj, '_aca_coords', False)
__add__ = _operator_factory('add')
__sub__ = _operator_factory('sub')
__mul__ = _operator_factory('mul')
if not six.PY3:
__div__ = _operator_factory('div')
__truediv__ = _operator_factory('truediv')
__floordiv__ = _operator_factory('floordiv')
__mod__ = _operator_factory('mod')
__pow__ = _operator_factory('pow')
__iadd__ = _operator_factory('iadd', inplace=True)
__isub__ = _operator_factory('isub', inplace=True)
__imul__ = _operator_factory('imul', inplace=True)
if not six.PY3:
__idiv__ = _operator_factory('idiv', inplace=True)
__itruediv__ = _operator_factory('itruediv', inplace=True)
__ifloordiv__ = _operator_factory('ifloordiv', inplace=True)
__imod__ = _operator_factory('imod', inplace=True)
__ipow__ = _operator_factory('ipow', inplace=True)
def _adjust_item(self, item):
"""
This is the money method that does all the work of manipulating
an item and subsequent row0/col0 when accessing and slicing.
"""
# Allow slicing via an existing ACAImage object
aca_coords = self._aca_coords
if isinstance(item, ACAImage):
item = (slice(item.row0, item.row0 + item.shape[0]),
slice(item.col0, item.col0 + item.shape[1]))
aca_coords = True
out_rc = [None, None] # New [row0, col0]
if isinstance(item, (int, np.int)):
item = (item,)
if isinstance(item, tuple):
if aca_coords:
# Interpret input `item` indices as being expressed in absolute
# terms and subtract row0/col0 as needed.
item = list(item)
for i, it, rc0 in zip(count(), item, (self.row0, self.col0)):
if isinstance(it, slice):
start = None if it.start is None else it.start - rc0
stop = None if it.stop is None else it.stop - rc0
item[i] = slice(start, stop, it.step)
else:
item[i] = it - rc0
item = tuple(item)
# Compute new row0, col0 (stored in out_rc) based on input item
for i, it, rc0 in zip(count(), item, (self.row0, self.col0)):
if isinstance(it, slice):
if it.start is not None:
out_rc[i] = rc0 + it.start
else:
out_rc[i] = rc0 + it
return item, out_rc[0], out_rc[1]
def __getitem__(self, item):
item, row0, col0 = self._adjust_item(item)
out = super(ACAImage, self).__getitem__(item)
if isinstance(out, ACAImage):
if row0 is not None:
out.row0 = row0
if col0 is not None:
out.col0 = col0
out._aca_coords = False
return out
def __setitem__(self, item, value):
item, row0, col0 = self._adjust_item(item)
aca_coords = self._aca_coords
try:
self._aca_coords = False
super(ACAImage, self).__setitem__(item, value)
finally:
self._aca_coords = aca_coords
def __repr__(self):
# Make an integerized version for viewing more nicely
outarr = np.asarray(np.round(self)).astype(int)
out = '<{} row0={} col0={}\n{}>'.format(self.__class__.__name__,
self.row0, self.col0,
outarr.__repr__())
return out
def __getattr__(self, attr):
if attr.isupper():
try:
return self.meta[attr]
except KeyError:
pass
return super(ACAImage, self).__getattribute__(attr)
def __setattr__(self, attr, value):
if attr.isupper():
self.meta[attr] = value
else:
super(ACAImage, self).__setattr__(attr, value)
def centroid_fm(self, bgd=None, pix_zero_loc='center', norm_clip=None):
"""
First moment centroid of ``self`` using 6x6 mousebitten image for input
6x6 or 8x8 images.
Note that the returned ``norm`` is the sum of the background-subtracted 6x6
mousebitten image, not the entire image.
:param bgd: background to subtract, scalar or NxN ndarray (float)
:param pix_zero_loc: row/col coords are integral at 'edge' or 'center'
:param norm_clip: clip image norm at this min value (default is None and
implies Exception for non-positive norm)
:returns: row, col, norm float
"""
row, col, norm = centroid_fm(self, bgd=bgd, pix_zero_loc=pix_zero_loc,
norm_clip=norm_clip)
if self._aca_coords:
row += self.row0
col += self.col0
return row, col, norm
@override__dir__
def __dir__(self):
return list(self.meta)
@property
def row0(self):
return self.meta['IMGROW0']
@row0.setter
def row0(self, value):
self.meta['IMGROW0'] = np.int64(value)
@property
def col0(self):
return self.meta['IMGCOL0']
@col0.setter
def col0(self, value):
self.meta['IMGCOL0'] = np.int64(value)
@classmethod
def _read_flicker_cdfs(cls):
"""Read flickering pixel model cumulative distribution functions
and associated metadata. Set up class variables accordingly.
The flicker_cdf file here was created using:
/proj/sot/ska/www/ASPECT/ipynb/chandra_aca/flickering-pixel-model.ipynb
| |
async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int interface_type_id: The ID number of the interface type to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: InterfaceType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'interface_type_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_interface_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `describe_interface_type`") # noqa: E501
# verify the required parameter 'interface_type_id' is set
if ('interface_type_id' not in params or
params['interface_type_id'] is None):
raise ValueError("Missing the required parameter `interface_type_id` when calling `describe_interface_type`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_interface_type`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `describe_interface_type`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'interface_type_id' in params and not re.search('\\d+', str(params['interface_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `interface_type_id` when calling `describe_interface_type`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'interface_type_id' in params:
path_params['interfaceTypeID'] = params['interface_type_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/interfacetypes/{interfaceTypeID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InterfaceType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_interface_types(self, policy_id, api_version, **kwargs): # noqa: E501
"""List Interface Types # noqa: E501
Lists all interface types. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_interface_types(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:return: InterfaceTypes
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_interface_types_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_interface_types_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
return data
def list_interface_types_with_http_info(self, policy_id, api_version, **kwargs): # noqa: E501
"""List Interface Types # noqa: E501
Lists all interface types. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_interface_types_with_http_info(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:return: InterfaceTypes
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_interface_types" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `list_interface_types`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_interface_types`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `list_interface_types`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/interfacetypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InterfaceTypes', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_interface_type(self, policy_id, interface_type_id, interface_type, api_version, **kwargs): # noqa: E501
"""Modify an Interface Type # noqa: E501
Modify an interface type by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_interface_type(policy_id, interface_type_id, interface_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int interface_type_id: The ID number of the interface type to modify. (required)
:param InterfaceType interface_type: The settings of the interface type to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: InterfaceType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_interface_type_with_http_info(policy_id, interface_type_id, interface_type, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_interface_type_with_http_info(policy_id, interface_type_id, interface_type, api_version, **kwargs) # noqa: E501
return data
def modify_interface_type_with_http_info(self, policy_id, interface_type_id, interface_type, api_version, **kwargs): # noqa: E501
"""Modify an Interface Type # noqa: E501
Modify an interface type by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_interface_type_with_http_info(policy_id, interface_type_id, interface_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int interface_type_id: The ID number of the interface type to modify. (required)
:param InterfaceType interface_type: The settings of the interface type to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: InterfaceType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'interface_type_id', 'interface_type', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_interface_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `modify_interface_type`") # noqa: E501
# verify the required parameter 'interface_type_id' is set
if ('interface_type_id' not in params or
params['interface_type_id'] is None):
raise ValueError("Missing the required parameter `interface_type_id` when calling `modify_interface_type`") # noqa: E501
# verify the required parameter 'interface_type' is set
if ('interface_type' not in params or
params['interface_type'] is None):
raise ValueError("Missing the required parameter `interface_type` when calling `modify_interface_type`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_interface_type`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `modify_interface_type`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'interface_type_id' in params and not re.search('\\d+', str(params['interface_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `interface_type_id` when calling `modify_interface_type`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
| |
Constraint(expr= - m.x2045 + m.x2445 + m.x4292 == 70.268084)
m.c2158 = Constraint(expr= - m.x2046 + m.x2446 + m.x4293 == 17.535931)
m.c2159 = Constraint(expr= - m.x2047 + m.x2447 + m.x4237 == 75.702325)
m.c2160 = Constraint(expr= - m.x2048 + m.x2448 == 68.860513)
m.c2161 = Constraint(expr= - m.x2049 + m.x2449 + m.x4266 + m.x4294 == 215.8066469789)
m.c2162 = Constraint(expr= - m.x2050 + m.x2450 + m.x4267 + m.x4295 == 18.0140415244236)
m.c2163 = Constraint(expr= - m.x2051 + m.x2451 + m.x4238 + m.x4268 + m.x4296 == 96.3472245412095)
m.c2164 = Constraint(expr= - m.x2052 + m.x2452 + m.x4239 + m.x4297 == 16.12215585051)
m.c2165 = Constraint(expr= - m.x2053 + m.x2453 + m.x4240 + m.x4298 == 21.223035453376)
m.c2166 = Constraint(expr= - m.x2054 + m.x2454 + m.x4241 + m.x4269 + m.x4299 == 25.67510048824)
m.c2167 = Constraint(expr= - m.x2055 + m.x2455 + m.x4242 + m.x4270 + m.x4300 == 35.8239721864888)
m.c2168 = Constraint(expr= - m.x2056 + m.x2456 == 58.3304919073372)
m.c2169 = Constraint(expr= - m.x2057 + m.x2457 + m.x4243 + m.x4271 + m.x4301 == 72.072587270004)
m.c2170 = Constraint(expr= - m.x2058 + m.x2458 + m.x4244 + m.x4272 + m.x4302 == 4.368431796)
m.c2171 = Constraint(expr= - m.x2059 + m.x2459 + m.x4245 + m.x4273 + m.x4303 == 21.810494297966)
m.c2172 = Constraint(expr= - m.x2060 + m.x2460 + m.x4246 + m.x4274 + m.x4304 == 30.4998399993058)
m.c2173 = Constraint(expr= - m.x2061 + m.x2461 + m.x4247 + m.x4275 + m.x4305 == 55.0112298075122)
m.c2174 = Constraint(expr= - m.x2062 + m.x2462 + m.x4248 + m.x4276 + m.x4306 == 12.691095059)
m.c2175 = Constraint(expr= - m.x2063 + m.x2463 + m.x4249 + m.x4277 + m.x4307 == 37.39906375)
m.c2176 = Constraint(expr= - m.x2064 + m.x2464 + m.x4250 + m.x4278 + m.x4308 == 21.8983642214437)
m.c2177 = Constraint(expr= - m.x2065 + m.x2465 + m.x4251 + m.x4279 + m.x4309 == 52.77677299665)
m.c2178 = Constraint(expr= - m.x2066 + m.x2466 + m.x4252 + m.x4280 + m.x4310 == 51.47314962823)
m.c2179 = Constraint(expr= - m.x2067 + m.x2467 + m.x4253 + m.x4281 + m.x4311 == 29.8326493030813)
m.c2180 = Constraint(expr= - m.x2068 + m.x2468 == 48.547749096)
m.c2181 = Constraint(expr= - m.x2069 + m.x2469 == 149.23057111)
m.c2182 = Constraint(expr= - m.x2070 + m.x2470 == 27.47191645805)
m.c2183 = Constraint(expr= - m.x2071 + m.x2471 + m.x4254 == 47.187816)
m.c2184 = Constraint(expr= - m.x2072 + m.x2472 + m.x4255 == 278.56948)
m.c2185 = Constraint(expr= - m.x2073 + m.x2473 + m.x4256 == 254.81257)
m.c2186 = Constraint(expr= - m.x2074 + m.x2474 == 117.202966)
m.c2187 = Constraint(expr= - m.x2075 + m.x2475 + m.x4257 == 20.038874)
m.c2188 = Constraint(expr= - m.x2076 + m.x2476 + m.x4258 == 32.388255)
m.c2189 = Constraint(expr= - m.x2077 + m.x2477 + m.x4259 == 46.311428)
m.c2190 = Constraint(expr= - m.x2078 + m.x2478 + m.x4260 == 119.829036912)
m.c2191 = Constraint(expr= - m.x2079 + m.x2479 == 54.5829056)
m.c2192 = Constraint(expr= - m.x2080 + m.x2480 + m.x4261 == 23.136576696)
m.c2193 = Constraint(expr= - m.x2081 + m.x2481 + m.x4262 == 11.398734)
m.c2194 = Constraint(expr= - m.x2082 + m.x4067 + m.x4077 + m.x4090 + m.x4108 + m.x4123 + m.x4136 + m.x4149 + m.x4160
+ m.x4173 + m.x4191 + m.x4211 + m.x4223 == 133.671263941387)
m.c2195 = Constraint(expr= - m.x2083 + m.x4068 + m.x4078 + m.x4091 + m.x4109 + m.x4124 + m.x4137 + m.x4161 + m.x4174
+ m.x4192 + m.x4212 + m.x4224 == 115.737915970578)
m.c2196 = Constraint(expr= - m.x2084 + m.x4069 + m.x4079 + m.x4092 + m.x4110 + m.x4125 + m.x4138 + m.x4150 + m.x4162
+ m.x4175 + m.x4193 + m.x4213 + m.x4225 == 96.8913016661464)
m.c2197 = Constraint(expr= - m.x2085 + m.x4080 + m.x4093 + m.x4111 + m.x4126 + m.x4139 + m.x4151 + m.x4163 + m.x4176
+ m.x4194 + m.x4214 + m.x4226 == 130.803845459431)
m.c2198 = Constraint(expr= - m.x2086 + m.x4070 + m.x4081 + m.x4094 + m.x4112 + m.x4127 + m.x4140 + m.x4152 + m.x4164
+ m.x4177 + m.x4195 + m.x4215 + m.x4227 == 28.3983640089884)
m.c2199 = Constraint(expr= - m.x2087 + m.x4071 + m.x4082 + m.x4095 + m.x4113 + m.x4128 + m.x4141 + m.x4153 + m.x4165
+ m.x4178 + m.x4196 + m.x4216 + m.x4228 == 15.7478032090063)
m.c2200 = Constraint(expr= - m.x2088 + m.x4083 + m.x4096 + m.x4114 + m.x4129 + m.x4142 + m.x4166 + m.x4179 + m.x4197
+ m.x4217 + m.x4229 == 8.34516172547079)
m.c2201 = Constraint(expr= - m.x2089 + m.x4084 + m.x4097 + m.x4115 + m.x4130 + m.x4143 + m.x4154 + m.x4167 + m.x4180
+ m.x4198 + m.x4218 + m.x4230 == 11.4163569396134)
m.c2202 = Constraint(expr= - m.x2090 + m.x4234 == 704.195604713805)
m.c2203 = Constraint(expr= - m.x2091 + m.x2495 + m.x2536 + m.x2559 + m.x2586 + m.x2603 + m.x2620 + m.x2650 + m.x2668
+ m.x2692 + m.x2731 + m.x2749 + m.x2786 + m.x2835 + m.x2851 + m.x2864 + m.x2894 + m.x2941
+ m.x2995 + m.x3057 + m.x3084 + m.x3185 + m.x3234 + m.x3347 + m.x3441 + m.x3468 + m.x3492
+ m.x3508 + m.x3559 + m.x3611 + m.x3639 + m.x3657 + m.x3698 + m.x4098 + m.x4181 + m.x4199
== 6.95367819652136)
m.c2204 = Constraint(expr= - m.x2092 + m.x2496 + m.x2516 + m.x2537 + m.x2560 + m.x2587 + m.x2604 + m.x2621 + m.x2651
+ m.x2669 + m.x2693 + m.x2732 + m.x2750 + m.x2787 + m.x2836 + m.x2852 + m.x2865 + m.x2879
+ m.x2895 + m.x2942 + m.x2996 + m.x3058 + m.x3085 + m.x3130 + m.x3186 + m.x3235 + m.x3348
+ m.x3442 + m.x3469 + m.x3493 + m.x3509 + m.x3560 + m.x3612 + m.x3640 + m.x3658 + m.x3699
+ m.x4099 + m.x4182 + m.x4200 + m.x4312 == 68.611061605179)
m.c2205 = Constraint(expr= - m.x2093 + m.x2497 + m.x2517 + m.x2538 + m.x2561 + m.x2588 + m.x2605 + m.x2622 + m.x2652
+ m.x2670 + m.x2694 + m.x2733 + m.x2751 + m.x2788 + m.x2837 + m.x2853 + m.x2866 + m.x2880
+ m.x2896 + m.x2943 + m.x2997 + m.x3059 + m.x3086 + m.x3131 + m.x3187 + m.x3236 + m.x3349
+ m.x3443 + m.x3470 + m.x3494 + m.x3510 + m.x3561 + m.x3613 + m.x3641 + m.x3659 + m.x3700
+ m.x4183 + m.x4201 + m.x4282 + m.x4313 == 149.982358690318)
m.c2206 = Constraint(expr= - m.x2094 + m.x2498 + m.x2518 + m.x2539 + m.x2562 + m.x2589 + m.x2606 + m.x2623 + m.x2653
+ m.x2671 + m.x2695 + m.x2734 + m.x2752 + m.x2789 + m.x2838 + m.x2854 + m.x2867 + m.x2881
+ m.x2897 + m.x2944 + m.x2998 + m.x3060 + m.x3087 + m.x3132 + m.x3188 + m.x3237 + m.x3350
+ m.x3444 + m.x3471 + m.x3495 + m.x3511 + m.x3562 + m.x3614 + m.x3642 + m.x3660 + m.x3701
+ m.x4072 + m.x4085 + m.x4100 + m.x4184 + m.x4202 + m.x4283 + m.x4314 == 175.844560388705)
m.c2207 = Constraint(expr= - m.x2095 + m.x2499 + m.x2519 + m.x2540 + m.x2563 + m.x2590 + m.x2607 + m.x2624 + m.x2654
+ m.x2672 + m.x2696 + m.x2735 + m.x2753 + m.x2790 + m.x2810 + m.x2839 + m.x2855 + m.x2868
+ m.x2882 + m.x2898 + m.x2945 + m.x2999 + m.x3061 + m.x3088 + m.x3133 + m.x3189 + m.x3238
+ m.x3351 + m.x3445 + m.x3472 + m.x3496 + m.x3512 + m.x3563 + m.x3615 + m.x3643 + m.x3661
+ m.x3702 == 10.1522671595645)
m.c2208 = Constraint(expr= - m.x2096 + m.x2500 + m.x2520 + m.x2541 + m.x2564 + m.x2591 + m.x2608 + m.x2625 + m.x2655
+ m.x2673 + m.x2697 + m.x2736 + m.x2754 + m.x2791 + m.x2811 + m.x2840 + m.x2856 + m.x2869
+ m.x2883 + m.x2899 + m.x2946 + m.x3000 + m.x3062 + m.x3089 + m.x3134 + m.x3190 + m.x3239
+ m.x3352 + m.x3446 + m.x3473 + m.x3497 + m.x3513 + m.x3564 + m.x3616 + m.x3644 + m.x3662
+ m.x3703 == 121.104830353398)
m.c2209 = Constraint(expr= - m.x2097 + m.x2501 + m.x2521 + m.x2542 + m.x2565 + m.x2592 + m.x2609 + m.x2626 + m.x2656
+ m.x2674 + m.x2698 + m.x2737 + m.x2755 + m.x2792 + m.x2812 + m.x2841 + m.x2857 + m.x2870
+ m.x2884 + m.x2900 + m.x2947 + m.x3001 + m.x3063 + m.x3090 + m.x3135 + m.x3191 + m.x3240
+ m.x3353 + m.x3447 + m.x3474 + m.x3498 + m.x3514 + m.x3565 + m.x3617 + m.x3645 + m.x3663
+ m.x3704 + m.x4073 + m.x4086 + m.x4101 + m.x4116 + m.x4185 + m.x4203 + m.x4315
== 8.00892516581441)
m.c2210 = Constraint(expr= - m.x2098 + m.x2502 + m.x2522 + m.x2543 + m.x2566 + m.x2593 + m.x2610 + m.x2627 + m.x2657
+ m.x2675 + m.x2699 + m.x2738 + m.x2756 + m.x2793 + m.x2813 + m.x2842 + m.x2858 + m.x2871
+ m.x2885 + | |
import sys
import re
import os
from ccp_util import _IPV6_REGEX_STR_COMPRESSED1, _IPV6_REGEX_STR_COMPRESSED2
from ccp_util import _IPV6_REGEX_STR_COMPRESSED3
from ccp_util import IPv4Obj, IPv6Obj
from ccp_abc import BaseCfgLine
### HUGE UGLY WARNING:
### Anything in models_cisco.py could change at any time, until I remove this
### warning. I have good reason to believe that these methods are stable and
### function correctly, but I've been wrong before. There are no unit tests
### for this functionality yet, so I consider all this code alpha quality.
###
### Use models_cisco.py at your own risk. You have been warned :-)
""" models_cisco.py - Parse, Query, Build, and Modify IOS-style configurations
Copyright (C) 2014-2015 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you need to contact the author, you can do so by emailing:
mike [~at~] pennington [/dot\] net
"""
##
##------------- IOS Configuration line object
##
class IOSCfgLine(BaseCfgLine):
"""An object for a parsed IOS-style configuration line.
:class:`~models_cisco.IOSCfgLine` objects contain references to other
parent and child :class:`~models_cisco.IOSCfgLine` objects.
.. note::
Originally, :class:`~models_cisco.IOSCfgLine` objects were only
intended for advanced ciscoconfparse users. As of ciscoconfparse
version 0.9.10, *all users* are strongly encouraged to prefer the
methods directly on :class:`~models_cisco.IOSCfgLine` objects.
Ultimately, if you write scripts which call methods on
:class:`~models_cisco.IOSCfgLine` objects, your scripts will be much
more efficient than if you stick strictly to the classic
:class:`~ciscoconfparse.CiscoConfParse` methods.
Args:
- text (str): A string containing a text copy of the IOS configuration line. :class:`~ciscoconfparse.CiscoConfParse` will automatically identify the parent and children (if any) when it parses the configuration.
- comment_delimiter (str): A string which is considered a comment for the configuration format. Since this is for Cisco IOS-style configurations, it defaults to ``!``.
Attributes:
- text (str): A string containing the parsed IOS configuration statement
- linenum (int): The line number of this configuration statement in the original config; default is -1 when first initialized.
- parent (:class:`~models_cisco.IOSCfgLine()`): The parent of this object; defaults to ``self``.
- children (list): A list of ``IOSCfgLine()`` objects which are children of this object.
- child_indent (int): An integer with the indentation of this object's children
- indent (int): An integer with the indentation of this object's ``text`` oldest_ancestor (bool): A boolean indicating whether this is the oldest ancestor in a family
- is_comment (bool): A boolean indicating whether this is a comment
Returns:
- An instance of :class:`~models_cisco.IOSCfgLine`.
"""
def __init__(self, *args, **kwargs):
"""Accept an IOS line number and initialize family relationship
attributes"""
super(IOSCfgLine, self).__init__(*args, **kwargs)
@classmethod
def is_object_for(cls, line="", re=re):
## Default object, for now
return True
@property
def is_intf(self):
# Includes subinterfaces
"""Returns a boolean (True or False) to answer whether this
:class:`~models_cisco.IOSCfgLine` is an interface; subinterfaces
also return True.
Returns:
- bool.
This example illustrates use of the method.
.. code-block:: python
:emphasize-lines: 17,20
>>> config = [
... '!',
... 'interface Serial1/0',
... ' ip address 1.1.1.1 255.255.255.252',
... '!',
... 'interface ATM2/0',
... ' no ip address',
... '!',
... 'interface ATM2/0.100 point-to-point',
... ' ip address 1.1.1.5 255.255.255.252',
... ' pvc 0/100',
... ' vbr-nrt 704 704',
... '!',
... ]
>>> parse = CiscoConfParse(config)
>>> obj = parse.find_objects('^interface\sSerial')[0]
>>> obj.is_intf
True
>>> obj = parse.find_objects('^interface\sATM')[0]
>>> obj.is_intf
True
>>>
"""
#intf_regex = r'^interface\s+(\S+.+)'
#if self.re_match(intf_regex):
if self.text[0:10]=='interface ' and self.text[10]!=' ':
return True
return False
@property
def is_subintf(self):
"""Returns a boolean (True or False) to answer whether this
:class:`~models_cisco.IOSCfgLine` is a subinterface.
Returns:
- bool.
This example illustrates use of the method.
.. code-block:: python
:emphasize-lines: 17,20
>>> config = [
... '!',
... 'interface Serial1/0',
... ' ip address 1.1.1.1 255.255.255.252',
... '!',
... 'interface ATM2/0',
... ' no ip address',
... '!',
... 'interface ATM2/0.100 point-to-point',
... ' ip address 1.1.1.5 255.255.255.252',
... ' pvc 0/100',
... ' vbr-nrt 704 704',
... '!',
... ]
>>> parse = CiscoConfParse(config)
>>> obj = parse.find_objects('^interface\sSerial')[0]
>>> obj.is_subintf
False
>>> obj = parse.find_objects('^interface\sATM')[0]
>>> obj.is_subintf
True
>>>
"""
intf_regex = r'^interface\s+(\S+?\.\d+)'
if self.re_match(intf_regex):
return True
return False
_VIRTUAL_INTF_REGEX_STR = r"""^interface\s+(Loopback|Tunnel|Dialer|Virtual-Template|Port-[Cc]hannel)"""
_VIRTUAL_INTF_REGEX = re.compile(_VIRTUAL_INTF_REGEX_STR)
@property
def is_virtual_intf(self):
if self.re_match(self._VIRTUAL_INTF_REGEX):
return True
return False
@property
def is_loopback_intf(self):
"""Returns a boolean (True or False) to answer whether this
:class:`~models_cisco.IOSCfgLine` is a loopback interface.
Returns:
- bool.
This example illustrates use of the method.
.. code-block:: python
:emphasize-lines: 11,14
>>> config = [
... '!',
... 'interface FastEthernet1/0',
... ' ip address 1.1.1.1 255.255.255.252',
... '!',
... 'interface Loopback0',
... ' ip address 1.1.1.5 255.255.255.255',
... '!',
... ]
>>> parse = CiscoConfParse(config)
>>> obj = parse.find_objects('^interface\sFast')[0]
>>> obj.is_loopback_intf
False
>>> obj = parse.find_objects('^interface\sLoop')[0]
>>> obj.is_loopback_intf
True
>>>
"""
intf_regex = r'^interface\s+(\Soopback)'
if self.re_match(intf_regex):
return True
return False
@property
def is_ethernet_intf(self):
"""Returns a boolean (True or False) to answer whether this
:class:`~models_cisco.IOSCfgLine` is an ethernet interface.
Any ethernet interface (10M through 10G) is considered an ethernet
interface.
Returns:
- bool.
This example illustrates use of the method.
.. code-block:: python
:emphasize-lines: 17,20
>>> config = [
... '!',
... 'interface FastEthernet1/0',
... ' ip address 1.1.1.1 255.255.255.252',
... '!',
... 'interface ATM2/0',
... ' no ip address',
... '!',
... 'interface ATM2/0.100 point-to-point',
... ' ip address 1.1.1.5 255.255.255.252',
... ' pvc 0/100',
... ' vbr-nrt 704 704',
... '!',
... ]
>>> parse = CiscoConfParse(config)
>>> obj = parse.find_objects('^interface\sFast')[0]
>>> obj.is_ethernet_intf
True
>>> obj = parse.find_objects('^interface\sATM')[0]
>>> obj.is_ethernet_intf
False
>>>
"""
intf_regex = r'^interface\s+(.*?\Sthernet)'
if self.re_match(intf_regex):
return True
return False
##
##------------- IOS Interface ABC
##
# Valid method name substitutions:
# switchport -> switch
# spanningtree -> stp
# interfce -> intf
# address -> addr
# default -> def
class BaseIOSIntfLine(IOSCfgLine):
def __init__(self, *args, **kwargs):
super(BaseIOSIntfLine, self).__init__(*args, **kwargs)
self.ifindex = None # Optional, for user use
self.default_ipv4_addr_object = IPv4Obj('127.0.0.1/32',
strict=False)
def __repr__(self):
if not self.is_switchport:
if self.ipv4_addr_object==self.default_ipv4_addr_object:
addr = "No IPv4"
else:
ip = str(self.ipv4_addr_object.ip)
prefixlen = str(self.ipv4_addr_object.prefixlen)
addr = "{0}/{1}".format(ip, prefixlen)
return "<%s # %s '%s' info: '%s'>" % (self.classname,
self.linenum, self.name, addr)
else:
return "<%s # %s '%s' info: 'switchport'>" % (self.classname, self.linenum, self.name)
def _build_abbvs(self):
"""Build a set of valid abbreviations (lowercased) for the interface"""
retval = set([])
port_type_chars = self.port_type.lower()
subinterface_number = self.subinterface_number
for sep in ['', ' ']:
for ii in range(1, len(port_type_chars)+1):
retval.add('{0}{1}{2}'.format(port_type_chars[0:ii], sep,
subinterface_number))
return retval
def reset(self, atomic=True):
# Insert build_reset_string() before this line...
self.insert_before(self.build_reset_string(), atomic=atomic)
def build_reset_string(self):
# IOS interfaces are defaulted like this...
return "default " + self.text
@property
def verbose(self):
if not self.is_switchport:
return "<%s # %s '%s' info: '%s' (child_indent: %s / len(children): %s / family_endpoint: %s)>" % (self.classname, self.linenum, self.text, self.ipv4_addr_object or "No IPv4", self.child_indent, len(self.children), self.family_endpoint)
else:
return "<%s # %s '%s' info: 'switchport' (child_indent: %s / len(children): %s / family_endpoint: %s)>" % (self.classname, self.linenum, self.text, self.child_indent, len(self.children), self.family_endpoint)
@classmethod
def is_object_for(cls, line="", re=re):
return False
##------------- Basic interface properties
@property
def abbvs(self):
"""A python set of valid abbreviations (lowercased) for the interface"""
return self._build_abbvs()
_INTF_NAME_RE_STR = r'^interface\s+(\S+[0-9\/\.\s]+)\s*'
_INTF_NAME_REGEX = re.compile(_INTF_NAME_RE_STR)
@property
def name(self):
"""Return the interface name as a string, such as 'GigabitEthernet0/1'
Returns:
- str. The interface name as a string, or '' if the object is not an interface.
This example illustrates use of the method.
.. code-block:: python
:emphasize-lines: 17,20,23
>>> config = [
... '!',
... 'interface FastEthernet1/0',
... ' ip address 1.1.1.1 255.255.255.252',
... '!',
... 'interface ATM2/0',
... ' no ip | |
the volume.</ul>
"""
return self._status
@status.setter
def status(self, val):
if val != None:
self.validate('status', val)
self._status = val
_schedule = None
@property
def schedule(self):
"""
The schedule for sis operation on the
volume. See sis-set-config for the format of the
schedule.
Attributes: non-creatable, modifiable
"""
return self._schedule
@schedule.setter
def schedule(self, val):
if val != None:
self.validate('schedule', val)
self._schedule = val
_changelog_size = None
@property
def changelog_size(self):
"""
Size of changelog in bytes.
Returned only if verbose option is set.
This parameter is not supported on Infinite Volumes.
"""
return self._changelog_size
@changelog_size.setter
def changelog_size(self, val):
if val != None:
self.validate('changelog_size', val)
self._changelog_size = val
_checkpoint_stage = None
@property
def checkpoint_stage(self):
"""
Checkpoint stage information.
This parameter is not supported on Infinite Volumes.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "Gathering" - Scanning the volume for
fingerprints,
<li> "Sorting" - Sorting the gathered
fingerprints,
<li> "Compress_preproc" - Preprocessing volume data
for compression,
<li> "Compressing" - Compressing the volume
data,
<li> "Saving_pass1" - Creating duplicate list
from the newly gathered fingerprints,
<li> "Saving_pass2" - Creating duplicate list
from the fingerprint database,
<li> "Saving_sharing" - Creating shared data
structures in the volume,
<li> "Saving_end" - Completing sis operations,
<li> "Checking_pass0" - Organizing data in the
fingerprint database for block sharing,
<li> "Checking_pass1" - Organizing data in the
fingerprint database for block sharing,
<li> "Checking_pass2" - Organizing data in the
fingerprint database for block sharing,
<li> "Unknown_stage" - Invalid stage</ul>
"""
return self._checkpoint_stage
@checkpoint_stage.setter
def checkpoint_stage(self, val):
if val != None:
self.validate('checkpoint_stage', val)
self._checkpoint_stage = val
_last_success_op_end_timestamp = None
@property
def last_success_op_end_timestamp(self):
"""
End timestamp of the last successful sis operation.
The value is in seconds since January 1, 1970.
Returned only if verbose option is set and when
there is last successfully completed operation.
This parameter is not supported on Infinite Volumes.
"""
return self._last_success_op_end_timestamp
@last_success_op_end_timestamp.setter
def last_success_op_end_timestamp(self, val):
if val != None:
self.validate('last_success_op_end_timestamp', val)
self._last_success_op_end_timestamp = val
_vault_transfer_log_size = None
@property
def vault_transfer_log_size(self):
"""
Size of vault transfer log in bytes.
Returned only if verbose option is set.
This parameter is not supported on Infinite Volumes.
"""
return self._vault_transfer_log_size
@vault_transfer_log_size.setter
def vault_transfer_log_size(self, val):
if val != None:
self.validate('vault_transfer_log_size', val)
self._vault_transfer_log_size = val
_logical_data_size = None
@property
def logical_data_size(self):
"""
The size of logical data in the volume in bytes.
This is calculated as [size-saved + size-used +
+ compressed-data bytes].
This parameter is not supported on Infinite Volumes.
"""
return self._logical_data_size
@logical_data_size.setter
def logical_data_size(self, val):
if val != None:
self.validate('logical_data_size', val)
self._logical_data_size = val
_path = None
@property
def path(self):
"""
Volume for which sis information is
returned. Path is of the format /vol/<vol_name>.
Attributes: key, non-creatable, non-modifiable
"""
return self._path
@path.setter
def path(self, val):
if val != None:
self.validate('path', val)
self._path = val
_checkpoint_sub_stage = None
@property
def checkpoint_sub_stage(self):
"""
Checkpoint sub-stage information.
This parameter is not supported on Infinite Volumes.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "-" - No sub stage check point
present,
<li> "Sort_pass1" - Sorting the fingerprints for
deduplication,
<li> "Sort_p1merge" - Merging the fingerprints for
deduplication,
<li> "Sort_pass2" - Merging the fingerprints for
deduplication,
<li> "Bucket_sort_init" - Sorting the fingerprints for
deduplication,
<li> "Bucket_sort" - Sorting the fingerprints for
deduplication,
<li> "Bucket_sort_done" - Sorting the fingerprints for
deduplication completed</ul>
"""
return self._checkpoint_sub_stage
@checkpoint_sub_stage.setter
def checkpoint_sub_stage(self, val):
if val != None:
self.validate('checkpoint_sub_stage', val)
self._checkpoint_sub_stage = val
_compression_changelog_size = None
@property
def compression_changelog_size(self):
"""
Size of compression log in bytes.
Returned only if verbose option is set.
This parameter is not supported on Infinite Volumes.
"""
return self._compression_changelog_size
@compression_changelog_size.setter
def compression_changelog_size(self, val):
if val != None:
self.validate('compression_changelog_size', val)
self._compression_changelog_size = val
_last_op_state = None
@property
def last_op_state(self):
"""
Completion status for the last operation.
Possible values:
<ul>
<li> "success",
<li> "failure"
</ul>
Returned only if verbose option is set and when
there is last completed operation.
This parameter is not supported on Infinite Volumes.
"""
return self._last_op_state
@last_op_state.setter
def last_op_state(self, val):
if val != None:
self.validate('last_op_state', val)
self._last_op_state = val
_last_op_begin_timestamp = None
@property
def last_op_begin_timestamp(self):
"""
Start timestamp of the last sis operation.
The value is in seconds since January 1, 1970. Returned
only if verbose option is requested.
This parameter is not supported on Infinite Volumes.
Attributes: non-creatable, non-modifiable
"""
return self._last_op_begin_timestamp
@last_op_begin_timestamp.setter
def last_op_begin_timestamp(self, val):
if val != None:
self.validate('last_op_begin_timestamp', val)
self._last_op_begin_timestamp = val
_checkpoint_op_type = None
@property
def checkpoint_op_type(self):
"""
Checkpoint operation Type.
This parameter is not supported on Infinite Volumes.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "Scan" - Scanning volume for fingerprints,
<li> "Start" - Starting a sis
operation,
<li> "Check" - Checking for stale data in the
fingerprint database,
<li> "Undo" - Undoing sis on the volume,
<li> "Downgrade" - Downgrading sis metafiles to a
previous Data ONTAP release.</ul>
"""
return self._checkpoint_op_type
@checkpoint_op_type.setter
def checkpoint_op_type(self, val):
if val != None:
self.validate('checkpoint_op_type', val)
self._checkpoint_op_type = val
_changelog_used_percent = None
@property
def changelog_used_percent(self):
"""
Percentage of changelog used.
This parameter is not supported on Infinite Volumes.
"""
return self._changelog_used_percent
@changelog_used_percent.setter
def changelog_used_percent(self, val):
if val != None:
self.validate('changelog_used_percent', val)
self._changelog_used_percent = val
_last_op_end_timestamp = None
@property
def last_op_end_timestamp(self):
"""
End timestamp of the last sis operation.
This parameter is not supported on Infinite Volumes.
Attributes: non-creatable, non-modifiable
"""
return self._last_op_end_timestamp
@last_op_end_timestamp.setter
def last_op_end_timestamp(self, val):
if val != None:
self.validate('last_op_end_timestamp', val)
self._last_op_end_timestamp = val
_blocks_skipped_sharing = None
@property
def blocks_skipped_sharing(self):
"""
Number of blocks not considered for sharing
because contiguous duplicate blocks were less than the
value
set for minimum-blocks-shared.
This parameter is not supported on Infinite Volumes.
"""
return self._blocks_skipped_sharing
@blocks_skipped_sharing.setter
def blocks_skipped_sharing(self, val):
if val != None:
self.validate('blocks_skipped_sharing', val)
self._blocks_skipped_sharing = val
_is_inline_compression_enabled = None
@property
def is_inline_compression_enabled(self):
"""
Inline compression state of the volume
Attributes: non-creatable, modifiable
"""
return self._is_inline_compression_enabled
@is_inline_compression_enabled.setter
def is_inline_compression_enabled(self, val):
if val != None:
self.validate('is_inline_compression_enabled', val)
self._is_inline_compression_enabled = val
_queued_job_type = None
@property
def queued_job_type(self):
"""
Type of sis operation that is queued for the volume.
Possible values:
<ul>
<li> "-" - No sis operation is queued for the volume,
<li> "Scan",
<li> "Start",
<li> "Check",
<li> "Downgrade"
</ul>
This parameter is not supported on Infinite Volumes.
"""
return self._queued_job_type
@queued_job_type.setter
def queued_job_type(self, val):
if val != None:
self.validate('queued_job_type', val)
self._queued_job_type = val
@staticmethod
def get_api_name():
return "sis-status-info"
@staticmethod
def get_desired_attrs():
return [
'logical-data-limit',
'last-op-error',
'is-idd-enabled',
'is-content-available',
'checkpoint-time',
'checkpoint-progress',
'is-compression-enabled',
'stale-fingerprint-percentage',
'is-constituent',
'last-success-op-begin-timestamp',
'vserver',
'state',
'quick-check-fsize',
'policy',
'progress',
'type',
'minimum-blocks-shared',
'last-op-size',
'status',
'schedule',
'changelog-size',
'checkpoint-stage',
'last-success-op-end-timestamp',
'vault-transfer-log-size',
'logical-data-size',
'path',
'checkpoint-sub-stage',
'compression-changelog-size',
'last-op-state',
'last-op-begin-timestamp',
'checkpoint-op-type',
'changelog-used-percent',
'last-op-end-timestamp',
'blocks-skipped-sharing',
'is-inline-compression-enabled',
'queued-job-type',
]
def describe_properties(self):
return {
'logical_data_limit': { 'class': int, 'is_list': False, 'required': 'optional' },
'last_op_error': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'is_idd_enabled': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_content_available': { 'class': bool, 'is_list': False, 'required': 'optional' },
'checkpoint_time': { 'class': int, 'is_list': False, 'required': 'optional' },
'checkpoint_progress': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'is_compression_enabled': { 'class': bool, 'is_list': False, 'required': 'optional' },
'stale_fingerprint_percentage': { 'class': int, 'is_list': False, 'required': 'optional' },
'is_constituent': { 'class': bool, 'is_list': False, 'required': 'optional' },
'last_success_op_begin_timestamp': { 'class': int, 'is_list': False, 'required': 'optional' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'state': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'quick_check_fsize': { 'class': int, 'is_list': False, 'required': 'optional' },
'policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'progress': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'type': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'minimum_blocks_shared': { 'class': int, 'is_list': False, 'required': 'optional' },
'last_op_size': { 'class': int, 'is_list': False, 'required': 'optional' },
'status': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'schedule': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'changelog_size': { 'class': int, 'is_list': False, 'required': 'optional' },
'checkpoint_stage': { 'class': basestring, | |
DFA81_eof = DFA.unpack(
u"\152\uffff"
)
DFA81_min = DFA.unpack(
u"\1\5\1\171\4\uffff\1\0\12\uffff\2\0\127\uffff"
)
DFA81_max = DFA.unpack(
u"\1\u0087\1\u0085\4\uffff\1\0\12\uffff\2\0\127\uffff"
)
DFA81_accept = DFA.unpack(
u"\2\uffff\1\2\1\3\3\uffff\1\4\1\5\1\6\1\10\1\11\1\12\1\13\1\14"
u"\1\15\1\16\27\uffff\1\7\1\1\25\uffff\1\17\24\uffff\1\20\24\uffff"
u"\1\21"
)
DFA81_special = DFA.unpack(
u"\1\uffff\1\0\4\uffff\1\1\12\uffff\1\2\1\3\127\uffff"
)
DFA81_transition = [
DFA.unpack(u"\1\21\22\uffff\1\22\1\16\1\17\2\6\1\uffff\1\1\36\uffff"
u"\1\1\1\2\6\uffff\1\10\2\uffff\1\14\3\uffff\1\13\3\uffff\1\3\7\uffff"
u"\1\1\1\15\2\uffff\1\20\1\uffff\1\3\5\uffff\1\3\37\uffff\1\7\1\uffff"
u"\1\11\1\12"),
DFA.unpack(u"\1\50\13\uffff\1\10"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #81
class DFA81(DFA):
pass
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA81_1 = input.LA(1)
index81_1 = input.index()
input.rewind()
s = -1
if (LA81_1 == 133):
s = 8
elif (LA81_1 == 121):
s = 40
elif (self.synpred185_sol()):
s = 41
elif (self.synpred187_sol()):
s = 3
input.seek(index81_1)
if s >= 0:
return s
elif s == 1:
LA81_6 = input.LA(1)
index81_6 = input.index()
input.rewind()
s = -1
if (self.synpred187_sol()):
s = 3
elif (self.synpred199_sol()):
s = 63
input.seek(index81_6)
if s >= 0:
return s
elif s == 2:
LA81_17 = input.LA(1)
index81_17 = input.index()
input.rewind()
s = -1
if (self.synpred187_sol()):
s = 3
elif (self.synpred200_sol()):
s = 84
input.seek(index81_17)
if s >= 0:
return s
elif s == 3:
LA81_18 = input.LA(1)
index81_18 = input.index()
input.rewind()
s = -1
if (self.synpred187_sol()):
s = 3
elif (True):
s = 105
input.seek(index81_18)
if s >= 0:
return s
if self._state.backtracking >0:
raise BacktrackingFailed
nvae = NoViableAltException(self_.getDescription(), 81, _s, input)
self_.error(nvae)
raise nvae
# lookup tables for DFA #86
DFA86_eot = DFA.unpack(
u"\40\uffff"
)
DFA86_eof = DFA.unpack(
u"\1\2\37\uffff"
)
DFA86_min = DFA.unpack(
u"\2\5\32\uffff\1\0\3\uffff"
)
DFA86_max = DFA.unpack(
u"\1\u0089\1\144\32\uffff\1\0\3\uffff"
)
DFA86_accept = DFA.unpack(
u"\2\uffff\1\2\26\uffff\1\1\6\uffff"
)
DFA86_special = DFA.unpack(
u"\34\uffff\1\0\3\uffff"
)
DFA86_transition = [
DFA.unpack(u"\1\2\22\uffff\5\2\1\uffff\1\2\36\uffff\4\2\4\uffff"
u"\1\1\1\2\1\uffff\1\2\3\uffff\1\2\3\uffff\1\2\7\uffff\2\2\2\uffff"
u"\1\2\1\uffff\1\2\5\uffff\1\2\37\uffff\1\2\1\uffff\4\2"),
DFA.unpack(u"\1\31\22\uffff\1\31\2\uffff\2\31\1\uffff\1\34\36\uffff"
u"\1\34\1\uffff\1\31\6\uffff\1\31\11\uffff\1\31\7\uffff\1\34\5\uffff"
u"\1\31\5\uffff\1\31"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #86
class DFA86(DFA):
pass
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA86_28 = input.LA(1)
index86_28 = input.index()
input.rewind()
s = -1
if (self.synpred207_sol()):
s = 25
elif (True):
s = 2
input.seek(index86_28)
if s >= 0:
return s
if self._state.backtracking >0:
raise BacktrackingFailed
nvae = NoViableAltException(self_.getDescription(), 86, _s, input)
self_.error(nvae)
raise nvae
# lookup tables for DFA #87
DFA87_eot = DFA.unpack(
u"\26\uffff"
)
DFA87_eof = DFA.unpack(
u"\1\2\25\uffff"
)
DFA87_min = DFA.unpack(
u"\1\5\25\uffff"
)
DFA87_max = DFA.unpack(
u"\1\u0087\25\uffff"
)
DFA87_accept = DFA.unpack(
u"\1\uffff\1\1\1\2\23\uffff"
)
DFA87_special = DFA.unpack(
u"\26\uffff"
)
DFA87_transition = [
DFA.unpack(u"\1\2\22\uffff\5\2\1\uffff\1\2\36\uffff\2\2\1\uffff"
u"\1\2\4\uffff\1\2\2\uffff\1\2\3\uffff\1\2\3\uffff\1\2\7\uffff\2"
u"\2\2\uffff\1\2\1\uffff\1\2\5\uffff\1\2\37\uffff\1\2\1\1\2\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #87
class DFA87(DFA):
pass
# lookup tables for DFA #90
DFA90_eot = DFA.unpack(
u"\27\uffff"
)
DFA90_eof = DFA.unpack(
u"\1\1\26\uffff"
)
DFA90_min = DFA.unpack(
u"\1\5\26\uffff"
)
DFA90_max = DFA.unpack(
u"\1\u0089\26\uffff"
)
DFA90_accept = DFA.unpack(
u"\1\uffff\1\2\23\uffff\1\1\1\uffff"
)
DFA90_special = DFA.unpack(
u"\27\uffff"
)
DFA90_transition = [
DFA.unpack(u"\1\1\22\uffff\5\1\1\uffff\1\1\36\uffff\2\1\1\uffff"
u"\1\1\4\uffff\1\1\2\uffff\1\1\3\uffff\1\1\3\uffff\1\1\7\uffff\2"
u"\1\2\uffff\1\1\1\uffff\1\1\5\uffff\1\1\37\uffff\1\1\1\uffff\2\1"
u"\2\25"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #90
class DFA90(DFA):
pass
# lookup tables for DFA #96
DFA96_eot = DFA.unpack(
u"\21\uffff"
)
DFA96_eof = DFA.unpack(
u"\21\uffff"
)
DFA96_min = DFA.unpack(
u"\1\5\20\uffff"
)
DFA96_max = DFA.unpack(
u"\1\154\20\uffff"
)
DFA96_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2\1\uffff"
)
DFA96_special = DFA.unpack(
u"\21\uffff"
)
DFA96_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\1\uffff\1\17\5\uffff\1\1\1\17\11\uffff"
u"\2\1\6\uffff\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #96
class DFA96(DFA):
pass
# lookup tables for DFA #97
DFA97_eot = DFA.unpack(
u"\21\uffff"
)
DFA97_eof = DFA.unpack(
u"\21\uffff"
)
DFA97_min = DFA.unpack(
u"\1\5\20\uffff"
)
DFA97_max = DFA.unpack(
u"\1\154\20\uffff"
)
DFA97_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2\1\uffff"
)
DFA97_special = DFA.unpack(
u"\21\uffff"
)
DFA97_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\1\uffff\1\17\5\uffff\1\1\1\17\11\uffff"
u"\2\1\6\uffff\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #97
class DFA97(DFA):
pass
# lookup tables for DFA #100
DFA100_eot = DFA.unpack(
u"\20\uffff"
)
DFA100_eof = DFA.unpack(
u"\20\uffff"
)
DFA100_min = DFA.unpack(
u"\1\5\17\uffff"
)
DFA100_max = DFA.unpack(
u"\1\154\17\uffff"
)
DFA100_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2"
)
DFA100_special = DFA.unpack(
u"\20\uffff"
)
DFA100_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\7\uffff\1\1\12\uffff\2\1\1\17\5\uffff"
u"\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #100
class DFA100(DFA):
pass
# lookup tables for DFA #102
DFA102_eot = DFA.unpack(
u"\53\uffff"
)
DFA102_eof = DFA.unpack(
u"\1\2\52\uffff"
)
DFA102_min = DFA.unpack(
u"\1\5\52\uffff"
)
DFA102_max = DFA.unpack(
u"\1\u0087\52\uffff"
)
DFA102_accept = DFA.unpack(
u"\1\uffff\1\1\1\2\50\uffff"
)
DFA102_special = DFA.unpack(
u"\53\uffff"
)
DFA102_transition = [
DFA.unpack(u"\1\2\22\uffff\5\2\1\1\1\2\23\uffff\2\2\1\uffff\5\2"
u"\2\uffff\5\2\4\uffff\2\2\1\uffff\1\2\3\uffff\1\2\3\uffff\4\2\4"
u"\uffff\2\2\2\uffff\1\2\1\uffff\1\2\5\uffff\1\2\1\uffff\4\2\3\uffff"
u"\30\2\1\uffff\2\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #102
class DFA102(DFA):
pass
# lookup tables for DFA #113
DFA113_eot = DFA.unpack(
u"\20\uffff"
)
DFA113_eof = DFA.unpack(
u"\20\uffff"
)
DFA113_min = DFA.unpack(
u"\1\5\17\uffff"
)
DFA113_max = DFA.unpack(
u"\1\154\17\uffff"
)
DFA113_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2"
)
DFA113_special = DFA.unpack(
u"\20\uffff"
)
DFA113_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\7\uffff\1\1\12\uffff\2\1\1\17\5\uffff"
u"\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #113
class DFA113(DFA):
pass
FOLLOW_pragmaDirective_in_sourceUnit52 = frozenset([49, 59, 65, 66, 67])
FOLLOW_importDirective_in_sourceUnit56 = frozenset([49, 59, 65, 66, 67])
FOLLOW_contractDefinition_in_sourceUnit60 = frozenset([49, 59, 65, 66, 67])
FOLLOW_EOF_in_sourceUnit64 = frozenset([1])
FOLLOW_49_in_pragmaDirective75 = frozenset([30, 61, 88])
FOLLOW_pragmaName_in_pragmaDirective77 = frozenset([4, 5, 17, 18, 19, 20, 21, 23, 24, 27, 28, 30, 51, 52, 53, 54, 55, 56, 57, 61, 69, 80, 81, 88, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108])
FOLLOW_pragmaValue_in_pragmaDirective79 = frozenset([50])
FOLLOW_50_in_pragmaDirective81 = frozenset([1])
FOLLOW_identifier_in_pragmaName92 = frozenset([1])
FOLLOW_version_in_pragmaValue103 = frozenset([1])
FOLLOW_expression_in_pragmaValue107 = frozenset([1])
FOLLOW_versionConstraint_in_version118 = frozenset([1, 4, 51, 52, 53, 54, 55, 56, 57])
FOLLOW_versionConstraint_in_version120 = frozenset([1])
FOLLOW_set_in_versionOperator0 = frozenset([1])
FOLLOW_versionOperator_in_versionConstraint167 = frozenset([4])
FOLLOW_VersionLiteral_in_versionConstraint170 = frozenset([1])
FOLLOW_identifier_in_importDeclaration181 = frozenset([1, 58])
FOLLOW_58_in_importDeclaration184 = frozenset([30, 61, 88])
FOLLOW_identifier_in_importDeclaration186 = frozenset([1])
FOLLOW_59_in_importDirective199 = frozenset([5])
FOLLOW_StringLiteral_in_importDirective201 = frozenset([50, 58])
FOLLOW_58_in_importDirective204 = frozenset([30, 61, 88])
FOLLOW_identifier_in_importDirective206 = frozenset([50])
FOLLOW_50_in_importDirective210 = frozenset([1])
FOLLOW_59_in_importDirective216 = frozenset([30, 60, 61, 88])
FOLLOW_60_in_importDirective219 = frozenset([58, 61])
FOLLOW_identifier_in_importDirective223 = frozenset([58, 61])
FOLLOW_58_in_importDirective227 = frozenset([30, 61, 88])
FOLLOW_identifier_in_importDirective229 = frozenset([61])
FOLLOW_61_in_importDirective233 = frozenset([5])
FOLLOW_StringLiteral_in_importDirective235 = frozenset([50])
FOLLOW_50_in_importDirective237 = frozenset([1])
FOLLOW_59_in_importDirective243 = frozenset([62])
FOLLOW_62_in_importDirective245 = frozenset([30, 61, 88])
FOLLOW_importDeclaration_in_importDirective247 = frozenset([63, 64])
FOLLOW_63_in_importDirective251 = frozenset([30, 61, 88])
FOLLOW_importDeclaration_in_importDirective253 = frozenset([63, 64])
FOLLOW_64_in_importDirective258 = frozenset([61])
FOLLOW_61_in_importDirective260 = frozenset([5])
FOLLOW_StringLiteral_in_importDirective262 = frozenset([50])
FOLLOW_50_in_importDirective264 = frozenset([1])
FOLLOW_set_in_contractDefinition275 = frozenset([30, 61, 88])
FOLLOW_identifier_in_contractDefinition289 = frozenset([62, 68])
FOLLOW_68_in_contractDefinition297 = frozenset([30, 61, 88])
FOLLOW_inheritanceSpecifier_in_contractDefinition299 = frozenset([62, 63])
FOLLOW_63_in_contractDefinition302 = frozenset([30, 61, 88])
FOLLOW_inheritanceSpecifier_in_contractDefinition304 = frozenset([62, 63])
FOLLOW_62_in_contractDefinition316 = frozenset([4, 5, 17, 18, 19, | |
"""
/********************************************************************************/
/* */
/* Copyright (c) 2020 Analog Devices, Inc. All Rights Reserved. */
/* This software is proprietary to Analog Devices, Inc. and its licensors. */
/* */
/********************************************************************************/
"""
#################################################################################
# #
# TOF sdk tests #
# #
# test_fsf.py - Tests specific to SDK are kept here #
# #
# API's tested #
# #
# Test cases correspond to the test plan #
#################################################################################
import pytest
import sys
import fsf_python as fsf
from helper import *
import os.path as path
from math import ceil
##############################
#
# FSF parser Create File API test
#
class TestFSFCreateFsfFile:
@pytest.mark.smoke
def test_fsf_001_00(self, cmdopt_write_dir):
"""
Exercise FSF 'Create File' API
Instantiate FSF and call CreateFsfFile with valid FSF file name
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_00) : "
"Instantiate FSF and call CreateFsfFile with valid FSF file name\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_001_00.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status
def test_fsf_001_01(self, cmdopt_write_dir):
"""
Exercise FSF 'Create File' API
Instantiate FSF using write utility and call CreateFsfFile twice
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_01) : "
"Instantiate FSF using write utility and call CreateFsfFile twice\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_001_01.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status
def test_fsf_001_02(self):
"""
Exercise FSF 'Create File' API
Instantiate FSF using write utility and call CreateFsfFile with NULL as argument
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_02) : "
"Instantiate FSF using write utility and call CreateFsfFile with NULL as argument\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename to NULL
write_file = ""
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert (status is False)
def test_fsf_001_03(self, cmdopt_write_dir):
"""
Exercise FSF 'Create File' API
Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_03) : "
"Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
file_name = "\\test_fsf_001_03.fsf"
read_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_read.CreateFsfFile(read_file)
check_status("fsf_read.CreateFsfFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Check if file exists
status = path.isfile(read_file)
check_status("path.isfile", status)
assert (status is False)
def test_fsf_001_04(self, cmdopt_write_dir):
"""
Exercise FSF 'Create File' API
Instantiate FSF using read utility and call CreateFsfFile twice
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_04) : "
"Instantiate FSF using read utility and call CreateFsfFile twice\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
file_name = "\\test_fsf_001_04.fsf"
read_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_read.CreateFsfFile(read_file)
check_status("fsf_read.CreateFsfFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Create FSF file for writing
status = fsf_read.CreateFsfFile(read_file)
check_status("fsf_read.CreateFsfFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Check if file exists
status = path.isfile(read_file)
check_status("path.isfile", status)
assert (status is False)
def test_fsf_001_05(self, cmdopt_write_dir):
"""
Exercise FSF 'Create File' API
Instantiate FSF using read utility and call CreateFsfFile with NULL as argument
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_001_05) : "
"Instantiate FSF using read utility and call CreateFsfFile with NULL as argument\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename to NULL
read_file = ""
# Create FSF file for writing
status = fsf_read.CreateFsfFile(read_file)
check_status("fsf_read.CreateFsfFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Check if file exists
status = path.isfile(read_file)
check_status("path.isfile", status)
assert (status is False)
##############################
#
# FSF parser Open File API test
#
class TestFSFOpenFile:
@pytest.mark.smoke
def test_fsf_002_00(self, cmdopt_read_dir):
"""
Exercise FSF 'Open File' API
Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name then OpenFile
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_00) : "
"Instantiate FSF using read utility and call OpenFile\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName
# Open FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
def test_fsf_002_01(self, cmdopt_read_dir):
"""
Exercise FSF 'Open File' API
Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name then OpenFile
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_01) : "
"Instantiate FSF using read utility and call OpenFile\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName + "non existent"
# Create FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.FILE_DOESNOT_EXIST)
def test_fsf_002_02(self, cmdopt_read_dir):
"""
Exercise FSF 'Open File' API
Instantiate FSF using read utility and call OpenFile with NULL as argument
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_02) : "
"Instantiate FSF using read utility and call OpenFile with NULL as argument\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = ""
# Create FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.FILE_DOESNOT_EXIST)
def test_fsf_002_03(self, cmdopt_read_dir):
"""
Exercise FSF 'Open File' API
Instantiate FSF using write utility and call OpenFile with valid FSF file name
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_03) : "
"Instantiate FSF using write utility and call OpenFile with valid FSF file name\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
write_file = cmdopt_read_dir + readDataFileName
# Open FSF file for writing
status = fsf_write.OpenFile(write_file)
check_status("fsf_write.OpenFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_002_04(self, cmdopt_read_dir):
"""
Exercise FSF 'Open File' API
Instantiate FSF using write utility and call OpenFile with non-existing FSF file name
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_04) : "
"Instantiate FSF using write utility and call OpenFile with non-existing FSF file name\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
write_file = cmdopt_read_dir + readDataFileName + "non existent"
# Create FSF file for writing
status = fsf_write.OpenFile(write_file)
check_status("fsf_write.OpenFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_002_05(self):
"""
Exercise FSF 'Open File' API
Instantiate FSF using write utility and call OpenFile with NULL as argument
"""
print("\n===================================================="
"\nTestFSFCreateFsfFile (test_fsf_002_05) : "
"Instantiate FSF using write utility and call OpenFile with NULL as argument\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
write_file = ""
# Create FSF file for writing
status = fsf_write.OpenFile(write_file)
check_status("fsf_write.OpenFile", status)
assert (status == fsf.Status.INVALID_OPERATION)
##############################
#
# FSF parser Close File API test
#
class TestFSFCloseFile:
@pytest.mark.smoke
def test_fsf_003_00(self, cmdopt_write_dir):
"""
Exercise FSF 'Close File' API
Instantiate FSF using write utility and call CreateFsfFile with existing FSF file name then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_00) : "
"Instantiate FSF using write utility and call CreateFsfFile with existing FSF file name then CloseFile\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_003_00.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# CloseFile
status = fsf_write.CloseFile()
check_status("fsf_write.CloseFile", status)
assert (status == fsf.Status.SUCCESS)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status is True
def test_fsf_003_01(self, cmdopt_read_dir):
"""
Exercise FSF 'Close File' API
Instantiate FSF using read utility and call OpenFile with existing FSF file name then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_00) : "
"Instantiate FSF using read utility and call OpenFile with existing FSF file name then CloseFile\n ")
# Instantiate FSF Read utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName
# Open FSF file for reading
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
# Close FSF file
status = fsf_read.CloseFile()
assert (status == fsf.Status.SUCCESS)
def test_fsf_003_02(self):
"""
Exercise FSF 'Close File' API
Instantiate FSF using write utility, then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_02) : "
"Instantiate FSF using write utility, then CloseFile\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# CloseFile
# TODO: This should | |
False),
_MetaInfoClassMember('flow-control-start-character', ATTRIBUTE, 'int' , None, None,
[('-128', '127')], [],
''' Software flow control start char
''',
'flow_control_start_character',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('flow-control-stop-character', ATTRIBUTE, 'int' , None, None,
[('-128', '127')], [],
''' Software flow control stop char
''',
'flow_control_stop_character',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('idle-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' TTY idle time
''',
'idle_time',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('motd-banner-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' MOTD banner enabled
''',
'motd_banner_enabled',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('private-flag', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TTY private flag
''',
'private_flag',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('terminal-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Terminal length
''',
'terminal_length',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('terminal-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Terminal type
''',
'terminal_type',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('terminal-width', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Line width
''',
'terminal_width',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'general-statistics',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.VtyStatistics.Exec_' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.VtyStatistics.Exec_',
False,
[
_MetaInfoClassMember('time-stamp-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specifies whether timestamp is enabled or not
''',
'time_stamp_enabled',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'exec',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.VtyStatistics.Aaa' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.VtyStatistics.Aaa',
False,
[
_MetaInfoClassMember('user-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' The authenticated username
''',
'user_name',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'aaa',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.VtyStatistics' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.VtyStatistics',
False,
[
_MetaInfoClassMember('aaa', REFERENCE_CLASS, 'Aaa' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.VtyStatistics.Aaa',
[], [],
''' AAA related statistics
''',
'aaa',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('connection', REFERENCE_CLASS, 'Connection' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.VtyStatistics.Connection',
[], [],
''' Connection related statistics
''',
'connection',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('exec', REFERENCE_CLASS, 'Exec_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.VtyStatistics.Exec_',
[], [],
''' Exec related statistics
''',
'exec_',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('general-statistics', REFERENCE_CLASS, 'GeneralStatistics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.VtyStatistics.GeneralStatistics',
[], [],
''' General statistics of line
''',
'general_statistics',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'vty-statistics',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.State.Template' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.State.Template',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the template
''',
'name',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'template',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.State.General' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.State.General',
False,
[
_MetaInfoClassMember('general-state', REFERENCE_ENUM_CLASS, 'LineStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'LineStateEnum',
[], [],
''' State of the line
''',
'general_state',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('operation', REFERENCE_ENUM_CLASS, 'SessionOperationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'SessionOperationEnum',
[], [],
''' application running of on the tty line
''',
'operation',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'general',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.State' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.State',
False,
[
_MetaInfoClassMember('general', REFERENCE_CLASS, 'General' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.State.General',
[], [],
''' General information
''',
'general',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('template', REFERENCE_CLASS, 'Template' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.State.Template',
[], [],
''' Information related to template applied to the
line
''',
'template',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'state',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration.TransportInput' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration.TransportInput',
False,
[
_MetaInfoClassMember('none', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Not used
''',
'none',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('protocol1', REFERENCE_ENUM_CLASS, 'TtyTransportProtocolEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_management_datatypes', 'TtyTransportProtocolEnum',
[], [],
''' Transport protocol1
''',
'protocol1',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('protocol2', REFERENCE_ENUM_CLASS, 'TtyTransportProtocolEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_management_datatypes', 'TtyTransportProtocolEnum',
[], [],
''' Transport protocol2
''',
'protocol2',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('select', REFERENCE_ENUM_CLASS, 'TtyTransportProtocolSelectEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_management_datatypes', 'TtyTransportProtocolSelectEnum',
[], [],
''' Choose transport protocols
''',
'select',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'transport-input',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration',
False,
[
_MetaInfoClassMember('acl-in', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL for inbound traffic
''',
'acl_in',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('acl-out', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL for outbound traffic
''',
'acl_out',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('transport-input', REFERENCE_CLASS, 'TransportInput' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration.TransportInput',
[], [],
''' Protocols to use when connecting to the
terminal server
''',
'transport_input',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'connection-configuration',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Configuration' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Configuration',
False,
[
_MetaInfoClassMember('connection-configuration', REFERENCE_CLASS, 'ConnectionConfiguration' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Configuration.ConnectionConfiguration',
[], [],
''' Conection configuration information
''',
'connection_configuration',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'configuration',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Sessions.OutgoingConnection.HostAddress' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Sessions.OutgoingConnection.HostAddress',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'HostAfIdBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_management_oper', 'HostAfIdBaseIdentity',
[], [],
''' AFName
''',
'af_name',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-tty-management-oper', False),
],
'Cisco-IOS-XR-tty-management-oper',
'host-address',
_yang_ns._namespaces['Cisco-IOS-XR-tty-management-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Sessions.OutgoingConnection' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Sessions.OutgoingConnection',
False,
[
_MetaInfoClassMember('connection-id', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Connection ID [1-20]
''',
'connection_id',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('host-address', REFERENCE_CLASS, 'HostAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Sessions.OutgoingConnection.HostAddress',
[], [],
''' Host address
''',
'host_address',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('host-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Host name
''',
'host_name',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('idle-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Elapsed time since session was suspended (in
seconds)
''',
'idle_time',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('is-last-active-session', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True indicates last active session
''',
'is_last_active_session',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('transport-protocol', REFERENCE_ENUM_CLASS, 'TransportServiceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_management_oper', 'TransportServiceEnum',
[], [],
''' Session transport protocol
''',
'transport_protocol',
'Cisco-IOS-XR-tty-management-oper', False),
],
'Cisco-IOS-XR-tty-management-oper',
'outgoing-connection',
_yang_ns._namespaces['Cisco-IOS-XR-tty-management-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine.Sessions' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine.Sessions',
False,
[
_MetaInfoClassMember('outgoing-connection', REFERENCE_LIST, 'OutgoingConnection' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Sessions.OutgoingConnection',
[], [],
''' List of outgoing sessions
''',
'outgoing_connection',
'Cisco-IOS-XR-tty-management-oper', False),
],
'Cisco-IOS-XR-tty-management-oper',
'sessions',
_yang_ns._namespaces['Cisco-IOS-XR-tty-management-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines.VtyLine' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines.VtyLine',
False,
[
_MetaInfoClassMember('line-number', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' VTY Line number
''',
'line_number',
'Cisco-IOS-XR-tty-server-oper', True),
_MetaInfoClassMember('configuration', REFERENCE_CLASS, 'Configuration' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Configuration',
[], [],
''' Configuration information of the line
''',
'configuration',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('sessions', REFERENCE_CLASS, 'Sessions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.Sessions',
[], [],
''' Outgoing sessions
''',
'sessions',
'Cisco-IOS-XR-tty-management-oper', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.State',
[], [],
''' Line state information
''',
'state',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('vty-statistics', REFERENCE_CLASS, 'VtyStatistics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine.VtyStatistics',
[], [],
''' Statistics of the VTY line
''',
'vty_statistics',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'vty-line',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.VtyLines' : {
'meta_info' : _MetaInfoClass('Tty.VtyLines',
False,
[
_MetaInfoClassMember('vty-line', REFERENCE_LIST, 'VtyLine' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper', 'Tty.VtyLines.VtyLine',
[], [],
''' VTY Line
''',
'vty_line',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'vty-lines',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.AuxiliaryNodes.AuxiliaryNode.AuxiliaryLine.AuxiliaryStatistics.Rs232' : {
'meta_info' : _MetaInfoClass('Tty.AuxiliaryNodes.AuxiliaryNode.AuxiliaryLine.AuxiliaryStatistics.Rs232',
False,
[
_MetaInfoClassMember('baud-rate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Inbound/Outbound baud rate in bps
''',
'baud_rate',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('data-bits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of databits
''',
'data_bits',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('exec-disabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Exec disabled on TTY
''',
'exec_disabled',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('framing-error-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing error count
''',
'framing_error_count',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('hardware-flow-control-status', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Hardware flow control status
''',
'hardware_flow_control_status',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('overrun-error-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Overrun error count
''',
'overrun_error_count',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('parity-error-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Parity error count
''',
'parity_error_count',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('parity-status', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Parity status
''',
'parity_status',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('stop-bits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of stopbits
''',
'stop_bits',
'Cisco-IOS-XR-tty-server-oper', False),
],
'Cisco-IOS-XR-tty-server-oper',
'rs232',
_yang_ns._namespaces['Cisco-IOS-XR-tty-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_tty_server_oper'
),
},
'Tty.AuxiliaryNodes.AuxiliaryNode.AuxiliaryLine.AuxiliaryStatistics.GeneralStatistics' : {
'meta_info' : _MetaInfoClass('Tty.AuxiliaryNodes.AuxiliaryNode.AuxiliaryLine.AuxiliaryStatistics.GeneralStatistics',
False,
[
_MetaInfoClassMember('absolute-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Absolute timeout period
''',
'absolute_timeout',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('async-interface', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Usable as async interface
''',
'async_interface',
'Cisco-IOS-XR-tty-server-oper', False),
_MetaInfoClassMember('domain-lookup-enabled', ATTRIBUTE, 'bool' , | |
# Compare the non-conformity score to the validation set non-conformity scores
quantiles = self.val_scores.view(1, 1, -1)
cdf = (scores.unsqueeze(-1) > quantiles).type(value.dtype).sum(dim=-1) # Compute the ranking of the value among all validation values. This value should be between [0, len(val_score)]
# If cdf is 0, then set it to 0
# If cdf is 1, then set it to U[0, 1]
# ...
# If cdf is N, then set it to U[N-1, N]
# If cdf is N+2, set it to N+1
# Note len(val_scores) = N+2
cdf = ((cdf - 1) + self.rand_cdf).clamp(min=0, max=len(self.val_scores)-1)
cdf = cdf / (len(self.val_scores) - 1)
return cdf.view(out_shape).to(out_device)
def icdf(self, value):
""" Get the inverse CDF. This function is NOT differentiable
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_shape], each entry should take values in [0, 1]
Supports automatic shape induction, e.g. if cdf has shape [n_evaluations, 1] it will automatically be converted to shape [n_evaluations, batch_shape]
Returns:
tensor: the value of inverse CDF function at the queried cdf values
"""
cdf, out_shape = self.shape_inference(value) # Convert cdf to have shape [n_evaluations, batch_shape]
# self.to(cdf.device) # Move all assets in this class to the same device as value to avoid device mismatch error
# Move cdf to the device of test_predictions to avoid device mismatch error
out_device = cdf.device
cdf = cdf.to(self.device)
quantiles = cdf * (len(self.val_scores) - 1)
# The following is carefully crafted to exactly invert the cdf function. This code must be exactly as it is
quantiles = torch.floor(quantiles + 1 - self.rand_cdf).type(torch.long).clamp(min=0, max=len(self.val_scores)-1)
target_score = self.val_scores[quantiles]
value = self.iscore(self.test_predictions, target_score)
return value.view(out_shape).to(out_device) # Output the original device
class DistributionConformalNAF(DistributionConformal):
""" Using NAF interpolation for conformal calibration. This function behaves like torch Distribution.
"""
def __init__(self, val_predictions, val_labels, test_predictions, score_func, iscore_func, verbose=True):
super(DistributionConformalNAF, self).__init__(val_predictions, val_labels, test_predictions, score_func, iscore_func)
# Train both a flow and an inverse flow to avoid the numerical instability of inverting a flow
self.flow = NafFlow(feature_size=200).to(val_labels.device)
self.iflow = NafFlow(feature_size=200).to(val_labels.device)
target_cdf = torch.linspace(0, 1, len(self.val_scores), device=val_labels.device) # The goal of the flow is to map non-conformity scores to CDF values uniformly in [0, 1]
flow_optim = optim.Adam(list(self.flow.parameters()) + list(self.iflow.parameters()), lr=1e-3)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(flow_optim, mode='min', patience=2, threshold=1e-2, threshold_mode='rel', factor=0.5)
for iteration in range(50000):
flow_optim.zero_grad()
cdfs, _ = self.flow(self.val_scores.view(-1, 1).type(torch.float32))
scores, _ = self.iflow(target_cdf.view(-1, 1).type(torch.float32))
loss = (cdfs.flatten() - target_cdf).pow(2).mean() + (scores.flatten() - self.val_scores).pow(2).mean()
loss.backward()
flow_optim.step()
if iteration % 100 == 0:
lr_scheduler.step(loss) # Reduce the learning rate
if flow_optim.param_groups[0]['lr'] < 1e-5 or loss < 1e-5: # Hitchhike the lr scheduler to terminate if no progress, or the loss is extremely small
break
if verbose and iteration % 1000 == 0:
print("Iteration %d, loss=%.5f, lr=%.5f" % (iteration, loss, flow_optim.param_groups[0]['lr']))
def cdf(self, value):
""" The CDF at value. This function is differentiable
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_size].
Returns:
tensor: the value of CDF at the queried values.
"""
# First perform automatic shape induction and convert value into an array of shape [n_evaluations, batch_shape]
value, out_shape = self.shape_inference(value)
# self.to(value.device) # Move all assets in this class to the same device as value to avoid device mismatch error
# Move value to the device of test_predictions to avoid device mismatch error
out_device = value.device
value = value.to(self.device)
score = self.score(self.test_predictions, value)
cdf, _ = self.flow(score.view(-1, 1))
return cdf.clamp(min=1e-6, max=1-1e-6).view(out_shape).to(out_device)
def icdf(self, value):
""" Get the inverse CDF. This function is differentiable.
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_shape], each entry should take values in [0, 1]
Supports automatic shape induction, e.g. if cdf has shape [n_evaluations, 1] it will automatically be converted to shape [n_evaluations, batch_shape]
Returns:
tensor: the value of inverse CDF function at the queried cdf values
"""
cdf, out_shape = self.shape_inference(value) # Convert cdf to have shape [n_evaluations, batch_shape]
# self.to(cdf.device) # Move all assets in this class to the same device as value to avoid device mismatch error
# Move cdf to the device of test_predictions to avoid device mismatch error
out_device = cdf.device
cdf = cdf.to(self.device)
adjusted = self.iflow(cdf.view(-1, 1))[0].view(cdf.shape)
value = self.iscore(self.test_predictions, adjusted)
return value.view(out_shape).to(out_device)
def _conformal_score_point(predictions, values):
""" Compute the non-conformity score of a set of values under some baseline predictor
Args:
predictions: array [batch_shape], a batch of point predictions
values: array [n_evaluations, batch_shape], note that for values batch_shape is the last dimension while for predictions batch_shape is the first dimension
Returns:
score: array [n_evaluations, batch_shape], where score[i, j] is the non-conformity score of values[i, j] under the prediction[j]
"""
score = values - predictions.view(1, -1)
return score
def _conformal_iscore_point(predictions, score):
""" Compute the inverse of the non-conformity score defined in conformal_score_quantile.
The goal is that conformal_iscore_quantile(predictions, conformal_score_quantile(predictions, labels))) = labels
Args:
predictions: array [batch_size, n_quantiles] or [batch_size, n_quantiles, 2], a batch of quantile predictions
score: array [n_evaluations, batch_shape]
Returns:
value: array [n_evaluations, batch_shape], where value[i, j] is the inverse non-conformity score of score[i, j] under prediction[j]
"""
return predictions.view(1, -1) + score
def _conformal_score_interval(predictions, values):
""" Compute the non-conformity score of a set of values under some baseline predictor
Args:
predictions: array [batch_shape, 2], a batch of interval predictions
values: array [n_evaluations, batch_shape], note that for values batch_shape is the last dimension while for predictions batch_shape is the first dimension
Returns:
score: array [n_evaluations, batch_shape], where score[i, j] is the non-conformity score of values[i, j] under the prediction[j]
"""
score = (values - predictions.min(dim=1, keepdims=True)[0].permute(1, 0)) / (predictions[:, 1:2] - predictions[:, 0:1]).abs().permute(1, 0) - 0.5
return score
def _conformal_iscore_interval(predictions, score):
""" Compute the inverse of the non-conformity score defined in conformal_score_quantile.
The goal is that conformal_iscore_quantile(predictions, conformal_score_quantile(predictions, labels))) = labels
Args:
predictions: array [batch_size, 2], a batch of interval predictions
score: array [n_evaluations, batch_shape]
Returns:
value: array [n_evaluations, batch_shape], where value[i, j] is the inverse non-conformity score of score[i, j] under prediction[j]
"""
return predictions.min(dim=1, keepdims=True)[0].permute(1, 0) + (score + 0.5) * (predictions[:, 1:2] - predictions[:, 0:1]).abs().permute(1, 0)
def _conformal_score_interval1(predictions, values, max_interval=1e+3):
""" Compute the alternative non-conformity score of a set of values under interval predictions
"""
diff = values - predictions.mean(dim=1).view(1, -1)
return torch.sign(diff) * max_interval + diff
def _conformal_iscore_interval1(predictions, values, max_interval=1e+3):
""" Compute the alternative inverse non-conformity score of a set of values under interval predictions
"""
return values - torch.sign(values) * max_interval + predictions.mean(dim=1).view(1, -1)
def _conformal_score_quantile(predictions, values):
""" Compute the non-conformity score of a set of values under some baseline predictor
Args:
predictions: array [batch_shape, n_quantiles] or [batch_shape, n_quantiles, 2], a batch of quantile predictions
values: array [n_evaluations, batch_shape], note that for values batch_shape is the last dimension while for predictions batch_shape is the first dimension
Returns:
score: array [n_evaluations, batch_shape], where score[i, j] is the non-conformity score of values[i, j] under the prediction[j]
"""
if len(predictions.shape) == 2:
# sorted_quantile = torch.linspace(0, 1, predictions.shape[1]+2, device=predictions.device)[1:-1].view(-1, 1)
sorted_quantile = _implicit_quantiles(predictions.shape[1]).to(predictions.device).view(1, -1)
sorted_pred, _ = torch.sort(predictions, dim=1)
else:
sorted_quantile, _ = torch.sort(predictions[:, :, 1], dim=1) # [batch_shape, num_quantiles]
sorted_pred, _ = torch.sort(predictions[:, :, 0], dim=1)
sorted_quantile = sorted_quantile.permute(1, 0) # [num_quantiles, batch_shape] This is needed because torch Distribution has different convention from torchuq
sorted_pred = sorted_pred.permute(1, 0).unsqueeze(1) # [num_quantiles, 1, batch_shape]
quantile_gap = (sorted_quantile[1:] - sorted_quantile[:-1]).unsqueeze(1) # [num_quantiles-1, 1, batch_shape]
# The score is equal to how many quantiles the value exceeds
score = (values.unsqueeze(0) - sorted_pred[:-1]) / (sorted_pred[1:] - sorted_pred[:-1]) # [num_quantiles-1, n_evaluations, batch_shape]
score = sorted_quantile[:1] + (score.clamp(min=0.0, max=1.0) * quantile_gap).sum(dim=0) # If value exceeds all samples, its score so far is 1, [n_evaluations, batch_shape]
# Also consider values that are below the smallest sample or greater than | |
p.replace('{INITRAMFS_OUTPUT}', initramfs_output)
return p
# Execute initramfs build_command
execute_command(args, 'initramfs.build_command', config.initramfs.build_command, _replace_vars)
if config.initramfs.build_output:
cmd_output_file = _replace_vars(args, config.initramfs.build_output.value)
try:
# Move the output file as stated in the configuration to the kernel tree
shutil.move(cmd_output_file, initramfs_output)
except IOError as e:
log.die("Could not copy initramfs from '{}' to '{}': {}".format(cmd_output_file, initramfs_output, str(e)))
def install_modules(args, prefix="/"):
"""
Installs the modules to the given prefix
"""
# Use correct 022 umask when installing modules
saved_umask = os.umask(0o022)
try:
subprocess.run(['make', 'modules_install', 'INSTALL_MOD_PATH=' + prefix], cwd=args.kernel_dir, check=True, stdout=None)
except subprocess.CalledProcessError as e:
log.die("'make modules_install INSTALL_MOD_PATH={}' failed in {} with code {}".format(prefix, args.kernel_dir, e.returncode))
os.umask(saved_umask)
def main_build(args, config=None):
"""
Main function for the 'build' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Set umask for build
saved_umask = os.umask(config.build.umask.value)
# Execute pre hook
execute_command(args, 'build.hooks.pre', config.build.hooks.pre, replace_common_vars)
# Clean the kernel dir, if the user wants that
if args.clean:
log.info("Cleaning kernel directory")
clean_kernel_dir(args)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_cmdline_bool = kconfig.syms['CMDLINE_BOOL']
sym_cmdline = kconfig.syms['CMDLINE']
sym_initramfs_source = kconfig.syms['INITRAMFS_SOURCE']
sym_modules = kconfig.syms['MODULES']
# Set some defaults
sym_cmdline_bool.set_value('y')
sym_cmdline.set_value('')
sym_initramfs_source.set_value('{INITRAMFS}')
# Apply autokernel configuration
kernel_cmdline = apply_autokernel_config(args, kconfig, config)
def _build_kernel():
# Write configuration to file
kconfig.write_config(
filename=config_output,
header=generated_by_autokernel_header(),
save_old=False)
# Copy file to .config, which may get changed by the makefiles
shutil.copyfile(config_output, os.path.join(args.kernel_dir, '.config'))
# Build the kernel
build_kernel(args)
def set_cmdline():
kernel_cmdline_str = ' '.join(kernel_cmdline)
has_user_cmdline_bool = sym_cmdline_bool in autokernel.symbol_tracking.symbol_changes
has_user_cmdline = sym_cmdline in autokernel.symbol_tracking.symbol_changes
if has_user_cmdline_bool and sym_cmdline_bool.str_value == 'n':
# The user has explicitly disabled the builtin commandline,
# so there is no need to set it.
pass
else:
sym_cmdline_bool.set_value('y')
# Issue a warning, if a custom cmdline does not contain "{CMDLINE}", and we have gathered add_cmdline options.
if has_user_cmdline and not sym_cmdline.str_value.contains('{CMDLINE}') and len(kernel_cmdline) > 0:
log.warn("CMDLINE was set manually and doesn't contain a '{CMDLINE}' token, although add_cmdline has also been used.")
if has_user_cmdline:
sym_cmdline.set_value(sym_cmdline.str_value.replace('{CMDLINE}', kernel_cmdline_str))
else:
sym_cmdline.set_value(kernel_cmdline_str)
def check_initramfs_source(sym_initramfs_source):
has_user_initramfs_source = sym_initramfs_source in autokernel.symbol_tracking.symbol_changes
# It is an error to explicitly set INITRAMFS_SOURCE, if our initramfs is set to builtin.
if has_user_initramfs_source \
and config.initramfs.enabled \
and config.initramfs.builtin \
and autokernel.symbol_tracking.symbol_changes[sym_initramfs_source].reason == 'explicitly set':
log.die("INITRAMFS_SOURCE was set manually, although a custom initramfs should be built and integrated into the kernel.")
# Set CMDLINE_BOOL and CMDLINE
set_cmdline()
# Preprocess INITRAMFS_SOURCE
check_initramfs_source(sym_initramfs_source)
# Kernel build pass #1
log.info("Building kernel")
# On the first pass, disable all initramfs sources
sym_initramfs_source.set_value('')
# Start the build process
_build_kernel()
# Build the initramfs, if enabled
if config.initramfs.enabled:
with tempfile.TemporaryDirectory() as tmppath:
if sym_modules.str_value != 'n':
# Temporarily install modules so the initramfs generator has access to them
log.info("Copying modules into temporary directory")
tmp_modules_prefix = os.path.join(tmppath, 'modules')
install_modules(args, prefix=tmp_modules_prefix)
else:
tmp_modules_prefix = None
# Build the initramfs
build_initramfs(args, config, tmp_modules_prefix, initramfs_output)
# Pack the initramfs into the kernel if desired
if config.initramfs.builtin:
log.info("Rebuilding kernel to pack external resources")
# On the second pass, we enable the initramfs cpio archive, which is now in the kernel_dir
sym_initramfs_source.set_value(initramfs_basename)
# Rebuild the kernel to pack the new images
_build_kernel()
# Execute post hook
execute_command(args, 'build.hooks.post', config.build.hooks.post, replace_common_vars)
os.umask(saved_umask)
def main_install(args, config=None):
"""
Main function for the 'install' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Use correct umask when installing
saved_umask = os.umask(config.install.umask.value)
# Mount
new_mounts = []
for i in config.install.mount:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.info("Mounting {}".format(i))
new_mounts.append(i)
try:
subprocess.run(['mount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.die("Could not mount '{}', mount returned code {}. Aborting.".format(i, e.returncode))
# Check mounts
for i in config.install.mount + config.install.assert_mounted:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.die("'{}' is not mounted. Aborting.".format(i))
# Execute pre hook
execute_command(args, 'install.hooks.pre', config.install.hooks.pre, replace_common_vars)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
target_dir = replace_common_vars(args, config.install.target_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# bzImage output
bzimage_output = os.path.join(args.kernel_dir, 'arch', autokernel.kconfig.get_uname_arch(), 'boot/bzImage')
def _purge_old(path):
keep_old = config.install.keep_old.value
# Disable purging on negative count
if keep_old < 0:
return
# Disable purging for non versionated paths
if not '{KERNEL_VERSION}' in path:
return
tokens = path.split('{KERNEL_VERSION}')
if len(tokens) > 2:
log.warn("Cannot purge path with more than one {{KERNEL_VERSION}} token: '{}'".format(path))
return
re_semver = re.compile(r'^[\d\.]+\d')
def _version_sorter(i):
suffix = i[len(tokens[0]):]
basename = suffix.split('/')[0]
st = os.stat(i)
try:
time_create = st.st_birthtime
except AttributeError:
time_create = st.st_mtime
semver = re_semver.match(basename).group()
val = autokernel.config.semver_to_int(semver)
return val, time_create
escaped_kv = re.escape('{KERNEL_VERSION}')
# matches from {KERNEL_VERSION} until first / exclusive in an regex escaped path
match_basename = re.compile(re.escape(escaped_kv) + r"(.+?(?=\\\/|$)).*$")
# derive regex to check if a valid semver is contained and prefix and suffix are given
re_match_valid_paths = re.compile('^' + match_basename.sub(lambda m: r'[0-9]+(\.[0-9]+(\.[0-9]+)?)?(-[^\/]*)?' + m.group(1) + r'.*$', re.escape(path)))
# matches from {KERNEL_VERSION} until first / exclusive in a normal path
re_replace_wildcard = re.compile(escaped_kv + r"[^\/]*")
# replace {KERNEL_VERSION}-* component with *
wildcard_path = re_replace_wildcard.sub('*', glob.escape(path))
# sort out paths that don't contain valid semvers
valid_globbed = [i for i in glob.glob(wildcard_path) if re_match_valid_paths.match(i)]
for i in sorted(valid_globbed, key=_version_sorter)[:-(keep_old + 1)]:
# For security, we will not call rmtree on a path that doesn't end with a slash,
# or if the realpath has less then two slash characters in it.
# Otherwise we only call unlink
if i[-1] == '/' and os.path.realpath(i).count('/') >= 2:
try:
shutil.rmtree(i)
except OSError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
else:
try:
os.unlink(i)
except IOError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
def _move_to_old(path):
re_old_suffix = re.compile(r'^.*\.old(\.\d+)?\/*$')
dst = path + '.old'
highest_num = -1
for i in glob.glob(glob.escape(dst) + '*'):
m = re_old_suffix.match(i)
old_num = int((m.group(1) or '.0')[1:]) if m else 0
if highest_num < old_num:
highest_num = old_num
if highest_num >= 0:
dst += ".{:d}".format(highest_num + 1)
shutil.move(path, dst)
def _install(name, src, target_var):
# If the target is disabled, return.
if not target_var:
return
# Figure out destination, and move existing filed if necessary
dst = os.path.join(target_dir, replace_common_vars(args, target_var))
if os.path.exists(dst):
_move_to_old(dst)
# Create directory if it doesn't exist
Path(os.path.dirname(dst)).mkdir(parents=True, exist_ok=True)
log.info("Installing {:<11s} {}".format(name + ':', dst))
# Install target file
shutil.copyfile(src, dst)
# Purge old files
_purge_old(os.path.join(target_dir, str(target_var)))
# Move target_dir, if it is dynamic
if '{KERNEL_VERSION}' in str(config.install.target_dir) and os.path.exists(target_dir):
_move_to_old(os.path.realpath(target_dir))
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_modules = kconfig.syms['MODULES']
# Install modules
if config.install.modules_prefix and sym_modules.str_value != 'n':
modules_prefix = str(config.install.modules_prefix)
modules_prefix_with_lib = os.path.join(modules_prefix, "lib/modules")
modules_dir = os.path.join(modules_prefix_with_lib, kernel_version)
if os.path.exists(modules_dir):
_move_to_old(os.path.realpath(modules_dir))
log.info("Installing modules: {}".format(modules_prefix_with_lib))
install_modules(args, prefix=modules_prefix)
_purge_old(modules_prefix_with_lib + "/{KERNEL_VERSION}/")
# Install targets
_install('bzimage', bzimage_output, config.install.target_kernel)
_install('config', config_output, config.install.target_config)
if config.initramfs.enabled:
_install('initramfs', initramfs_output, config.install.target_initramfs)
# Purge old target_dirs (will only be done if it is dynamic)
_purge_old(str(config.install.target_dir) + '/')
# Execute post hook
execute_command(args, 'install.hooks.post', config.install.hooks.post, replace_common_vars)
# Undo what we have mounted
for i in reversed(new_mounts):
log.info("Unmounting {}".format(i))
try:
subprocess.run(['umount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.warn("Could not umount '{}' (returned {})".format(i, e.returncode))
# Restore old umask
os.umask(saved_umask)
def main_build_all(args):
"""
Main function for the 'all' command.
"""
log.info("Started full build")
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
main_build(args, config)
main_install(args, config)
class Module():
"""
A module consists of dependencies (other modules) and option assignments.
"""
def __init__(self, name):
self.name = name
self.deps = []
self.assignments = []
self.assertions = []
self.rev_deps = []
def check_config_against_detected_modules(kconfig, modules, differences_only):
log.info("Here are the detected options with both current and desired value.")
log.info("The output format is: [current] OPTION_NAME = desired")
log.info("HINT: Options are ordered by dependencies, i.e. applying")
log.info(" them from | |
# fmt: off
import h5py
import os
import shutil
import copy
import h5py_cache
import pickle as pkl
import numpy as np
import pandas as pd
import ipywidgets as ipyw
from nd2reader import ND2Reader
from tifffile import imsave, imread
from .utils import pandas_hdf5_handler,writedir
from parse import compile
class hdf5_fov_extractor:
def __init__(self,nd2filename,headpath,tpts_per_file=100,ignore_fovmetadata=False,nd2reader_override={}): #note this chunk size has a large role in downstream steps...make sure is less than 1 MB
self.nd2filename = nd2filename
self.headpath = headpath
self.metapath = self.headpath + "/metadata.hdf5"
self.hdf5path = self.headpath + "/hdf5"
self.tpts_per_file = tpts_per_file
self.ignore_fovmetadata = ignore_fovmetadata
self.nd2reader_override = nd2reader_override
self.organism = ''
self.microscope = ''
self.notes = ''
def writemetadata(self,t_range=None,fov_list=None):
ndmeta_handle = nd_metadata_handler(self.nd2filename,ignore_fovmetadata=self.ignore_fovmetadata,nd2reader_override=self.nd2reader_override)
if self.ignore_fovmetadata:
exp_metadata = ndmeta_handle.get_metadata()
else:
exp_metadata,fov_metadata = ndmeta_handle.get_metadata()
if t_range is not None:
exp_metadata["frames"] = exp_metadata["frames"][t_range[0]:t_range[1]+1]
exp_metadata["num_frames"] = len(exp_metadata["frames"])
fov_metadata = fov_metadata.loc[pd.IndexSlice[:,slice(t_range[0],t_range[1])],:] #4 -> 70
if fov_list is not None:
fov_metadata = fov_metadata.loc[list(fov_list)]
exp_metadata["fields_of_view"] = list(fov_list)
self.chunk_shape = (1,exp_metadata["height"],exp_metadata["width"])
chunk_bytes = (2*np.multiply.accumulate(np.array(self.chunk_shape))[-1])
self.chunk_cache_mem_size = 2*chunk_bytes
exp_metadata["chunk_shape"],exp_metadata["chunk_cache_mem_size"] = (self.chunk_shape,self.chunk_cache_mem_size)
exp_metadata["Organism"],exp_metadata["Microscope"],exp_metadata["Notes"] = (self.organism,self.microscope,self.notes)
self.meta_handle = pandas_hdf5_handler(self.metapath)
if self.ignore_fovmetadata:
assignment_metadata = self.assignidx(exp_metadata,metadf=None)
assignment_metadata.astype({"File Index":int,"Image Index":int})
else:
assignment_metadata = self.assignidx(exp_metadata,metadf=fov_metadata)
assignment_metadata.astype({"t":float,"x": float,"y":float,"z":float,"File Index":int,"Image Index":int})
self.meta_handle.write_df("global",assignment_metadata,metadata=exp_metadata)
def assignidx(self,expmeta,metadf=None):
if metadf is None:
numfovs = len(expmeta["fields_of_view"])
timepoints_per_fov = len(expmeta["frames"])
else:
numfovs = len(metadf.index.get_level_values(0).unique().tolist())
timepoints_per_fov = len(metadf.index.get_level_values(1).unique().tolist())
files_per_fov = (timepoints_per_fov//self.tpts_per_file) + 1
remainder = timepoints_per_fov%self.tpts_per_file
ttlfiles = numfovs*files_per_fov
fov_file_idx = np.repeat(list(range(files_per_fov)), self.tpts_per_file)[:-(self.tpts_per_file-remainder)]
file_idx = np.concatenate([fov_file_idx+(fov_idx*files_per_fov) for fov_idx in range(numfovs)])
fov_img_idx = np.repeat(np.array(list(range(self.tpts_per_file)))[np.newaxis,:],files_per_fov,axis=0)
fov_img_idx = fov_img_idx.flatten()[:-(self.tpts_per_file-remainder)]
img_idx = np.concatenate([fov_img_idx for fov_idx in range(numfovs)])
if metadf is None:
fov_idx = np.repeat(list(range(numfovs)), timepoints_per_fov)
timepoint_idx = np.repeat(np.array(list(range(timepoints_per_fov)))[np.newaxis,:],numfovs,axis=0).flatten()
data = {"fov" : fov_idx,"timepoints" : timepoint_idx,"File Index" : file_idx, "Image Index" : img_idx}
outdf = pd.DataFrame(data)
outdf = outdf.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
else:
outdf = copy.deepcopy(metadf)
outdf["File Index"] = file_idx
outdf["Image Index"] = img_idx
return outdf
def read_metadata(self):
writedir(self.hdf5path,overwrite=True)
self.writemetadata()
metadf = self.meta_handle.read_df("global",read_metadata=True)
self.metadata = metadf.metadata
metadf = metadf.reset_index(inplace=False)
metadf = metadf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
self.metadf = metadf.sort_index()
def set_params(self,fov_list,t_range,organism,microscope,notes):
self.fov_list = fov_list
self.t_range = t_range
self.organism = organism
self.microscope = microscope
self.notes = notes
def inter_set_params(self):
self.read_metadata()
t0,tf = (self.metadata['frames'][0],self.metadata['frames'][-1])
available_fov_list = self.metadf["fov"].unique().tolist()
selection = ipyw.interactive(self.set_params, {"manual":True}, fov_list=ipyw.SelectMultiple(options=available_fov_list),\
t_range=ipyw.IntRangeSlider(value=[t0, tf],\
min=t0,max=tf,step=1,description='Time Range:',disabled=False), organism=ipyw.Textarea(value='',\
placeholder='Organism imaged in this experiment.',description='Organism:',disabled=False),\
microscope=ipyw.Textarea(value='',placeholder='Microscope used in this experiment.',\
description='Microscope:',disabled=False),notes=ipyw.Textarea(value='',\
placeholder='General experiment notes.',description='Notes:',disabled=False),)
display(selection)
def extract(self,dask_controller):
dask_controller.futures = {}
self.writemetadata(t_range=self.t_range,fov_list=self.fov_list)
metadf = self.meta_handle.read_df("global",read_metadata=True)
self.metadata = metadf.metadata
metadf = metadf.reset_index(inplace=False)
metadf = metadf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
self.metadf = metadf.sort_index()
def writehdf5(fovnum,num_entries,timepoint_list,file_idx,num_fovs):
with ND2Reader(self.nd2filename) as nd2file:
for key,item in self.nd2reader_override.items():
nd2file.metadata[key] = item
y_dim = self.metadata['height']
x_dim = self.metadata['width']
with h5py_cache.File(self.hdf5path + "/hdf5_" + str(file_idx) + ".hdf5","w",chunk_cache_mem_size=self.chunk_cache_mem_size) as h5pyfile:
for i,channel in enumerate(self.metadata["channels"]):
hdf5_dataset = h5pyfile.create_dataset(str(channel),\
(num_entries,y_dim,x_dim), chunks=self.chunk_shape, dtype='uint16')
for j in range(len(timepoint_list)):
frame = timepoint_list[j]
nd2_image = nd2file.get_frame_2D(c=i, t=frame, v=fovnum)
hdf5_dataset[j,:,:] = nd2_image
return "Done."
file_list = self.metadf.index.get_level_values("File Index").unique().values
num_jobs = len(file_list)
random_priorities = np.random.uniform(size=(num_jobs,))
for k,file_idx in enumerate(file_list):
priority = random_priorities[k]
filedf = self.metadf.loc[file_idx]
fovnum = filedf[0:1]["fov"].values[0]
num_entries = len(filedf.index.get_level_values("Image Index").values)
timepoint_list = filedf["timepoints"].tolist()
future = dask_controller.daskclient.submit(writehdf5,fovnum,num_entries,timepoint_list,file_idx,self.metadata["num_fovs"],retries=1,priority=priority)
dask_controller.futures["extract file: " + str(file_idx)] = future
extracted_futures = [dask_controller.futures["extract file: " + str(file_idx)] for file_idx in file_list]
pause_for_extract = dask_controller.daskclient.gather(extracted_futures,errors='skip')
futures_name_list = ["extract file: " + str(file_idx) for file_idx in file_list]
failed_files = [futures_name_list[k] for k,item in enumerate(extracted_futures) if item.status is not "finished"]
failed_file_idx = [int(item.split(":")[1]) for item in failed_files]
outdf = self.meta_handle.read_df("global",read_metadata=False)
tempmeta = outdf.reset_index(inplace=False)
tempmeta = tempmeta.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
failed_fovs = tempmeta.loc[failed_file_idx]["fov"].unique().tolist()
outdf = outdf.drop(failed_fovs)
if self.t_range is not None:
outdf = outdf.reset_index(inplace=False)
outdf["timepoints"] = outdf["timepoints"] - self.t_range[0]
outdf = outdf.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
self.meta_handle.write_df("global",outdf,metadata=self.metadata)
class nd_metadata_handler:
def __init__(self,nd2filename,ignore_fovmetadata=False,nd2reader_override={}):
self.nd2filename = nd2filename
self.ignore_fovmetadata = ignore_fovmetadata
self.nd2reader_override = nd2reader_override
def decode_unidict(self,unidict):
outdict = {}
for key, val in unidict.items():
if type(key) == bytes:
key = key.decode('utf8')
if type(val) == bytes:
val = val.decode('utf8')
outdict[key] = val
return outdict
def read_specsettings(self,SpecSettings):
spec_list = SpecSettings.decode('utf-8').split('\r\n')[1:]
spec_list = [item for item in spec_list if ":" in item]
spec_dict = {item.split(": ")[0].replace(" ", "_"):item.split(": ")[1].replace(" ", "_") for item in spec_list}
return spec_dict
def get_imaging_settings(self,nd2file):
raw_metadata = nd2file.parser._raw_metadata
imaging_settings = {}
for key,meta in raw_metadata.image_metadata_sequence[b'SLxPictureMetadata'][b'sPicturePlanes'][b'sSampleSetting'].items():
camera_settings = meta[b'pCameraSetting']
camera_name = camera_settings[b'CameraUserName'].decode('utf-8')
channel_name = camera_settings[b'Metadata'][b'Channels'][b'Channel_0'][b'Name'].decode('utf-8')
obj_settings = self.decode_unidict(meta[b'pObjectiveSetting'])
spec_settings = self.read_specsettings(meta[b'sSpecSettings'])
imaging_settings[channel_name] = {'camera_name':camera_name,'obj_settings':obj_settings,**spec_settings}
return imaging_settings
def make_fov_df(self,nd2file, exp_metadata): #only records values for single timepoints, does not seperate between channels....
img_metadata = nd2file.parser._raw_metadata
num_fovs = exp_metadata['num_fovs']
num_frames = exp_metadata['num_frames']
num_images_expected = num_fovs*num_frames
if img_metadata.x_data is not None:
x = np.reshape(img_metadata.x_data,(-1,num_fovs)).T
y = np.reshape(img_metadata.y_data,(-1,num_fovs)).T
z = np.reshape(img_metadata.z_data,(-1,num_fovs)).T
else:
positions = img_metadata.image_metadata[b'SLxExperiment'][b'ppNextLevelEx'][b''][b'uLoopPars'][b'Points'][b'']
x = []
y = []
z = []
for position in positions:
x.append([position[b'dPosX']]*num_frames)
y.append([position[b'dPosY']]*num_frames)
z.append([position[b'dPosZ']]*num_frames)
x = np.array(x)
y = np.array(y)
z = np.array(z)
time_points = x.shape[1]
acq_times = np.reshape(np.array(list(img_metadata.acquisition_times)[:num_images_expected]),(-1,num_fovs)).T
pos_label = np.repeat(np.expand_dims(np.add.accumulate(np.ones(num_fovs,dtype=int))-1,1),time_points,1) ##???
time_point_labels = np.repeat(np.expand_dims(np.add.accumulate(np.ones(time_points,dtype=int))-1,1),num_fovs,1).T
output = pd.DataFrame({'fov':pos_label.flatten(),'timepoints':time_point_labels.flatten(),'t':acq_times.flatten(),'x':x.flatten(),'y':y.flatten(),'z':z.flatten()})
output = output.astype({'fov': int, 'timepoints':int, 't': float, 'x': float,'y': float,'z': float})
output = output[~((output['x'] == 0.)&(output['y'] == 0.)&(output['z'] == 0.))].reset_index(drop=True) ##bootstrapped to fix issue when only some FOVs are selected (return if it causes problems in the future)
output = output.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
return output
def get_metadata(self):
# Manual numbers are for broken .nd2 files (from when Elements crashes)
nd2file = ND2Reader(self.nd2filename)
for key,item in self.nd2reader_override.items():
nd2file.metadata[key] = item
exp_metadata = copy.copy(nd2file.metadata)
wanted_keys = ['height', 'width', 'date', 'fields_of_view', 'frames', 'z_levels', 'z_coordinates', 'total_images_per_channel', 'channels', 'pixel_microns', 'num_frames', 'experiment']
exp_metadata = dict([(k, exp_metadata[k]) for k in wanted_keys if k in exp_metadata])
exp_metadata["num_fovs"] = len(exp_metadata['fields_of_view'])
exp_metadata["settings"] = self.get_imaging_settings(nd2file)
if not self.ignore_fovmetadata:
fov_metadata = self.make_fov_df(nd2file, exp_metadata)
nd2file.close()
return exp_metadata,fov_metadata
else:
nd2file.close()
return exp_metadata
class tiff_to_hdf5_extractor:
"""Utility to convert individual tiff files to hdf5 archives.
Attributes:
headpath (str): base directory for data analysis
tiffpath (str): directory where tiff files are located
metapath (str): metadata path
hdf5path (str): where to store hdf5 data
tpts_per_file (int): number of timepoints to put in each hdf5 file
format_string (str): format of filenames from which to extract metadata (using parse library)
"""
def __init__(self, headpath, tiffpath, format_string, tpts_per_file=100, manual_metadata_params={}):
self.tiffpath = tiffpath
self.headpath = headpath
self.metapath = self.headpath + "/metadata.hdf5"
self.hdf5path = self.headpath + "/hdf5"
self.tpts_per_file = tpts_per_file
self.format_string = format_string
self.manual_metadata_params = manual_metadata_params
def get_notes(self,organism,microscope,notes):
"""Get note metadata.
Inputs:
organism (str): organism
microscope (str): microscope
notes (str): notes
"""
self.organism = organism
self.microscope = microscope
self.notes = notes
def inter_get_notes(self):
"""Get notes interactively using ipywidgets."""
selection = ipyw.interactive(self.get_notes, {"manual":True}, organism=ipyw.Textarea(value='',\
placeholder='Organism imaged in this experiment.',description='Organism:',disabled=False),\
microscope=ipyw.Textarea(value='',placeholder='Microscope used in this experiment.',\
description='Microscope:',disabled=False),notes=ipyw.Textarea(value='',\
placeholder='General experiment notes.',description='Notes:',disabled=False),)
display(selection)
def assignidx(self,metadf):
"""Get indices for each image in each file (for metadata)
Args:
metadf (pandas.DataFrame): metadata without file indices
Returns:
outdf (pandas.DataFrame): metadata with file indices
"""
outdf = copy.deepcopy(metadf)
# get number of each dimension of the data
numchannels = len(pd.unique(metadf["channel"]))
numfovs = len(metadf.index.get_level_values("fov").unique())
timepoints_per_fov = len(metadf.index.get_level_values("timepoints").unique())
# Calculate number of files required for the number of timepoints
files_per_fov = (timepoints_per_fov//self.tpts_per_file) + 1
remainder = timepoints_per_fov%self.tpts_per_file
# Assign file indices to each individual image in a field of view
fov_file_idx = np.repeat(list(range(files_per_fov)), self.tpts_per_file*numchannels)[:-(self.tpts_per_file-remainder)*numchannels]
file_idx = np.concatenate([fov_file_idx+(fov_idx*files_per_fov) for fov_idx in range(numfovs)])
# Assign image indices within a file
fov_img_idx = np.repeat(np.repeat(np.array(list(range(self.tpts_per_file))), numchannels)[np.newaxis,:],files_per_fov,axis=0)
fov_img_idx = fov_img_idx.flatten()[:-(self.tpts_per_file-remainder)*numchannels]
img_idx = np.concatenate([fov_img_idx for fov_idx in range(numfovs)])
outdf["File Index"] = file_idx
outdf["Image Index"] = img_idx
return outdf
def writemetadata(self, parser, tiff_files, manual_metadata_params={}):
"""Write metadata.
Args:
parser (parser): compiled parser to find metadata
tiff_files (list, str): list of full paths to each tiff file
Returns:
channel_paths_by_file_index (list, tuple): Group files that represent multiple channels
for a single field of view
"""
fov_metadata = {}
exp_metadata = {}
assignment_metadata = {}
first_successful_file= True
for f in tiff_files:
match = parser.search(f)
# ignore any files that don't match the regex
if match is not None:
if first_successful_file:
# Build metadata
first_img = imread(f)
# get dimensions by loading file
exp_metadata["height"] = first_img.shape[0]
exp_metadata["width"] = first_img.shape[1]
exp_metadata["Organism"] = self.organism
exp_metadata["Microscope"] = self.microscope
exp_metadata["Notes"] = self.notes
self.chunk_shape = (1,exp_metadata["height"],exp_metadata["width"])
chunk_bytes = (2*np.multiply.accumulate(np.array(self.chunk_shape))[-1])
self.chunk_cache_mem_size = 2*chunk_bytes
exp_metadata["chunk_shape"],exp_metadata["chunk_cache_mem_size"] = (self.chunk_shape,self.chunk_cache_mem_size)
# get metadata from the file name
fov_metadata = dict([(key, [value]) for key, value in match.named.items()])
fov_metadata["Image Path"] = [f]
first_successful_file = False
else:
# Add to dictionary
fov_frame_dict = match.named
for key, value in | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 16:18:37 2018
New version of the JointStory script, rewritten for the Harvard - Dartmouth
connection. The earlier versions had a NAT traversal-related bug.
For argument options, type
$ python JointStory.py --help
----------------------------------------
LOGIC OF PROGRAM:
(1) System clocks of the control computers need to be synched before
experiment, either by using the same NTP servers or by synching to
GPS time signal. THIS PROGRAM DOES NOT DO THAT FOR YOU !
(2) Creates audio link between distant sites. First, it uses crude hole
punching for NAT traversal, then trasmits audio with dual UDP streams.
UDP packets are uncompressed audio data appended with timestamps
(3) Synchronizes task start on both control computers using timestamp
exchanges and a common start time derived from those timestamps
(4) Uses Psychopy for visual stimuli (task instructions + turn-taking
control) and keyboard events during the task.
---------------------------------------
IMPLEMENTATION NOTES:
- For audio, the script relies on pyAudio, so - in principle - it runs on
multiple OS (tested only on Ubuntu 16.04 and OS X 10.11 though)
- Current version handles TTLs in two different ways. First, it simply
records and timestamps keyboard inputs that equal a preset value -
this method supports scanner sites like Princeton and Harvard. Second,
it reads in triggers coming through a serial port, as at Dartmouth.
Edit the magic numbers section to adapt the settings.
The command line option --LOGTTL controls only the second option, as
that is the more complex, using a separate subprocess, while the other
only consists of catching key presses
- Audio transmission is via UDP streams. Prepare for a few lost
packages. To account for fluctuations in travel time / losses, the
program uses a simple two-ended continuous audio buffer, with only
occasional health reports. Nothing adaptive about it. The audio chunk
and buffer size can be controlled with command line options --CHUNK and
--BUFFER
- Synching start across computers is done using a third UDP socket-pair.
- Capable of simple NAT traversal (UDP hole punching), works with one,
but not with two firewalls, so a static IP is required at least on one
end if NATs are present
- Includes a wrapper around stunclient for exploring current NAT properties,
controlled with input option --STUN
- Uses PsychoPy for controlling visuals, as a result it works on python
2.7 atm. Damn PsychoPy is still only talking about migrating.
- Hardcoded variables are all in the magicNumbers function. If you need
to set some parameter, that's where you look.
- Has a built-in re-tell part at the end of the trial. That is,
after the channel is closed, participants are prompted to retell the
story/stories.
- A bunch of command line options are for selecting the type of trial.
--TRIALTYPE sets joint/individual condition for the story (trial)
--WHICHSEED controls the story prompt subjects receive
--ROLE sets who starts the story (trial)
---------------------------------------
TO IMPROVE:
- Correct file handling, based on input arguments of pair and condition number.
Right now it simply saves everything into the working directory and file names
do not contain any identifier (e.g. date, pair number, etc)
- Saving a separate settings file with all variables used for that run?
- Re-tell is very hacky. We keep sending the packets even after audio output is
shut off at both ends. It works fine, but is painfully stupid. Instead, we
should probably close the audio input stream at the end of the story and open
it again for re-tell with a simpler callback function.
- Use psychopy's core.Clock with getLastResetTime to capture its start,
then use event.getKeys with timeStamped argument set to the core.Clock,
this ensures very precise timing info both on visual events and key presses /
ttls
@author: adamb
"""
#%% Imports
# imports providing backward compatibility to 2.7
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from builtins import range
from builtins import map
from builtins import bytes
from io import open
# imports we would have anyways
import argparse
import socket
import pyaudio
import sys
import time
import struct
import subprocess
import re
import wave
from operator import sub
import csv
import multiprocessing
import serial
#%% Python 2.7 / 3 compatibility parts
# get python 3 flush arg behavior on 2.7 for print function
if sys.version_info[:2] < (3, 3):
old_print = print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
old_print(*args, **kwargs)
file = kwargs.get('file', sys.stdout)
if flush and file is not None:
file.flush()
# get python 3 user input behavior
if hasattr(__builtins__, 'raw_input'):
input = raw_input
#%% MAGIC NUMBERS aka hardcoded variables, default values
def magicNumbers(ROLE, TRIALTYPE, WHICHSEED):
# UDP hole punch timeout (time for handshake), in seconds
punchTimeout = 30
# default audio settings: mono, 16kHz sampling, 16bit encoding
CHANNELS = 1
RATE = 16000
FORMAT = pyaudio.paInt16
# default filenames (for saving audio)
savefileOut = 'RecordedAudio'
savefileIn1 = 'ReceivedAudio'
savefileIn2 = 'PlayedAudio'
savefileLog = 'TimingsLog.txt'
savefileTTL = 'TTLtimestamps.txt'
# default port numbers
# local
portIn = 30002
portOut = 30001
# remote
PortIn = 30002
PortOut = 30001
PortComm = 30003
# serial settings dictionary
serialSettings = {
'mount': '/dev/ttyUSB0', # on ubuntu this works well
'baud': 115200,
'timeout': 0 # non-blocking; returns upon receiving 8 bits
}
# Settings for the visual and storytelling/retell part, incl text
# time for each countdown (turn)
turntime = 30
# start counter for countdown
timeDiff = 0
# number of turns
turnN = 30
# start counter for turns
turnCurrent = 1
# letter size
letterH = 0.06
# time for showing sintructions at the beginning
instTime = 30
# time variable for start synch in seconds
startLag = 10
# time for re-tell in seconds
retellLength = 300
# instructions to be displayed in the beginning, depending on trial type
if TRIALTYPE == 0:
instText1 = ('Your task is to build a story together with '
'the other participant.\n\n'
'You will take turns in doing so. Each turn lasts ' +
str(turntime) + ' seconds and there will be ' +
str(turnN) + ' turns, so you will have ' +
'{0:.0f}'.format(turntime*turnN/60) + ' minutes for the' +
' whole story.\n'
'There will be a timer on the display showing the time '
'left from the current turn. In the lower part you will '
'also see the current turn number. \n'
'You may take the story into any direction you want, but '
'build upon the contributions of the other participant. '
'Ideally, the story should sound as one story-line, '
'your contributions should be hard to tell apart from '
'those coming from your partner. \n')
elif TRIALTYPE == 1:
instText1 = ('Your task is to build a story alone, '
'independent from the other participant.\n\n'
'You will take turns in creating your own stories. '
'Each turn lasts ' + str(turntime) + ' seconds and ' +
'there will be ' + str(turnN) + ' turns, so you will ' +
'have ' + '{0:.0f}'.format(turntime*turnN/60) +
' minutes for the whole story.\n'
'There will be a timer on the display showing the time '
'left from the current turn. In the lower part you will '
'also see the current turn number. \n'
'Remember, you build your own story now, '
'you do not have to take anything from the other '
'story into account. '
'Ideally, at the end we will have two separate stories '
'built around the same topic. \n'
'Nevertheless, pay attention to the other story as well, '
'make sure to follow it.')
# story topic intructions, depending on which seed is set
if WHICHSEED == 0:
instText2 = ('For this trial, create a story about a group of '
'students having first contact with aliens.')
elif WHICHSEED == 1:
instText2 = ('For this trial, create a story about a child, '
'who somehow becomes the President of the USA.')
elif WHICHSEED == 2:
instText2 = ('For this trial, create a story about a family, '
'where the child gets into trouble.')
elif WHICHSEED == 3:
instText2 = ('For this trial, create a story about someone '
'finding a genie lamp')
elif WHICHSEED == 4:
instText2 = ('For this trial, create a story about a grad student '
'having a very unusual day')
# text | |
an internal description instead
of the normal description, if available. For example, the inside
of a vehicle should have a different description than the outside.
"""
if from_inside and self.internal_description:
return self.internal_description
return self.description
def get_appearance_name(self, invoker, force_admin_view=False):
"""
Returns the 'pretty' form of the name for the object's appearance.
:param invoker: The object asking for the appearance. If None is
provided, provide the non-admin view.
:type invoker: BaseObject or None
:param bool force_admin_view: If this is True, force the adin view,
even if the invoker is not an admin (or no invoker is given).
:rtype: str
:returns: The object's 'pretty' name.
"""
if (invoker and invoker.is_admin()) or force_admin_view:
# Used to show a single-character type identifier next to object id.
if self.base_type == 'room':
type_str = 'R'
elif self.base_type == 'thing':
type_str = 'T'
elif self.base_type == 'exit':
type_str = 'E'
elif self.base_type == 'player':
type_str = 'P'
else:
# Wtf dis?
type_str = 'U'
extra_info = '(#%s%s)' % (
self.id,
type_str,
)
else:
extra_info = ''
return "%s%s%s%s" % (ANSI_HILITE, self.name, ANSI_NORMAL, extra_info)
#noinspection PyUnusedLocal
def get_appearance_contents_and_exits(self, invoker, from_inside=False):
"""
Returns the contents and exits display for the object.
:param BaseObject invoker: The object asking for the appearance.
:param bool from_inside: Show the contents/exits as if the invoker
was inside this object.
:rtype: str
:returns: The contents/exits display.
"""
exits_str = ''
things_str = ''
contents = self.get_contents()
for obj in contents:
if obj.id == invoker.id:
# This is the invoker, don't show yourself.
continue
if obj.base_type == 'exit':
# Exits show the exit's primary alias.
obj_alias = obj.aliases[0] if obj.aliases else '_'
exits_str += '<%s> %s\n' % (
obj_alias,
obj.get_appearance_name(invoker),
)
else:
# Everything else just shows the name.
things_str += '%s\n' % obj.get_appearance_name(invoker)
retval = ''
if things_str:
retval += '\nContents:\n'
retval += things_str
if exits_str:
retval += '\nExits:\n'
retval += exits_str
return retval
def get_appearance(self, invoker):
"""
Shows the full appearance for an object. Includes description, contents,
exits, and everything else.
:param BaseObject invoker: The object asking for the appearance.
:rtype: str
:returns: The object's appearance, from the outside or inside.
"""
#noinspection PyUnresolvedReferences
is_inside = invoker.location.id == self.id
desc = self.get_description(invoker, from_inside=is_inside)
name = self.get_appearance_name(invoker)
contents = self.get_appearance_contents_and_exits(
invoker,
from_inside=is_inside
)
return "%s\n%s\n%s" % (name, desc, contents)
def _find_name_or_alias_match(self, objects, query):
"""
Performs name and alias matches on a list of objects. Returns the
best match, or ``None`` if nothing was found.
:param iterable objects: A list of ``BaseObject`` sub-class instances
to attempt to match to.
:param str query: The string to match against.
:rtype: BaseObject
:returns: The best match object for the given query.
"""
if not objects:
return None
for obj in objects:
# Start by checking all objects for an alias match.
aliases = [alias.lower() for alias in obj.aliases]
if query.lower() in aliases:
# If a match is found, return immediately on said match.
return obj
processor = lambda x: fuzz_utils.full_process(x)
for choice in objects:
processed = processor(choice.name)
if query in processed:
return choice
return None
def _find_object_id_match(self, desc):
"""
Given an object ID string (ie: '#9'), determine whether this object
can find said object.
:param str desc: A string with which to perform a search
:rtype: :class:'BaseObject' or ``None``
:returns: An object that best matches the string provided. If no
suitable match was found, returns ``None``.
"""
mud_service = self.mud_service
try:
# Object IDs are int primary keys in the object store.
obj_id = int(desc[1:])
except (ValueError, TypeError):
# This isn't an object ID.
return None
# Absolute object identifier: lookup the id
try:
obj = mud_service.object_store.get_object(obj_id)
except NoSuchObject:
return None
if not self.is_admin():
# Non-admins can only find objects in their current location.
if self.location and obj.location:
# Both invoker and the target have a location. See if they
# are in the same place.
#noinspection PyUnresolvedReferences
location_match = self.location.id == obj.location.id or \
self.location.id == obj.id
if location_match:
# Locations match. Good to go.
return obj
elif obj.base_type == 'room':
#noinspection PyUnresolvedReferences
if self.location and self.location.id == obj.id:
# Non-admin is looking at their current location, which
# is a room.
return obj
else:
# Non-specified or differing locations. Either way, there
# is no usable match.
return None
else:
# Invoker is an admin, and can find object id matches globally.
return obj
def contextual_object_search(self, desc):
"""
Searches for objects using the current object as a frame of
reference
:param str desc: A string with which to perform a search
:rtype: :class:'BaseObject' or ``None``
:returns: An object that best matches the string provided. If no
suitable match was found, returns ``None``.
"""
desc = desc.strip()
if not desc:
# Probably an empty string, which we can't do much with.
return None
if desc[0] == '#':
oid_match = self._find_object_id_match(desc)
if oid_match:
return oid_match
if desc.lower() == 'me':
# Object is referring to itself
return self
if desc.lower() == 'here':
# Object is referring to it's location
return self.location
# Not a keyword, begin fuzzy search
# First search the objects in the room
if self.location:
#noinspection PyUnresolvedReferences
neighboring_match = self._find_name_or_alias_match(
self.location.get_contents(),
desc
)
if neighboring_match:
return neighboring_match
# Next search the objects inside the invoker
inventory_match = self._find_name_or_alias_match(
self.get_contents(),
desc
)
if inventory_match:
return inventory_match
# Unable to find anything
return None
def can_object_enter(self, obj):
"""
Determine whether another object can enter this object.
:param BaseObject obj: The object to check enter permissions for.
:rtype: tuple
:returns: A tuple in the format of ``(can_enter, message)``, where
``can_enter`` is a bool, and ``message`` is a string or ``None``,
used to provide a reason for the object not being able to enter.
"""
if obj.is_admin():
# Admin can enter anything.
return True, None
return False, "You can't enter that."
def determine_enter_destination(self, obj):
"""
Given an object that is going to enter this one, determine where said
object will be moved to. This defaults to this object's inventory,
but in the case of something like a ship, they should enter to the
bridge.
:param BaseObject obj: The other object that is entering this one.
:rtype: BaseObject
:returns: The target location for the object to be moved to upon
entering this object.
"""
return self
def can_object_leave(self, obj):
"""
Determine whether another object can leave this object.
:param BaseObject obj: The object to check enter permissions for.
:rtype: tuple
:returns: A tuple in the format of ``(can_leave, message)``, where
``can_leave`` is a bool, and ``message`` is a string or ``None``,
used to provide a reason for the object not being able to leave.
"""
if not obj.location:
return False, "You can't find a way out."
# All is well
return True, None
def determine_leave_destination(self, obj):
"""
Given an object that is going to leave this one, determine where said
object will be moved to. This defaults to this object's location,
but in the case of leaving a ship's bridge, they should end up outside
the ship, rather than inside the ship object.
:param BaseObject obj: The other object that is entering this one.
:rtype: BaseObject
:returns: The target location for the object to be moved to upon
leaving this object.
"""
return self.location
#
## Begin events
#
def after_session_connect_event(self):
"""
This is called when the proxy authenticates and logs in a Session that
controls this object. This event is only triggered when the first
Session controlling this object is logged in. For example, logging in
a second time with another client would not trigger this again.
This is currently only meaningful for PlayerObject instances. We don't
want players to see connects/disconnects when admins are controlling
NPCs.
"""
pass
def after_session_disconnect_event(self):
"""
This is called when the last Sesssion that controls this object is
disconnected. If you have two clients open that are authenticated and
controlling the same object, this will not | |
all higher pixels within rng
back_my += my # running sum of my between pixel p and all higher pixels within rng
if index < max_index:
rng_ders2_[index] = (_p, _dx, fdy, _mx, fmy)
elif y > min_coord:
ders2_.append((_p, _dx, fdy, _mx, fmy)) # completed bilateral tuple is transferred from rng_ders2_ to ders2_
index += 1
rng_ders2_.appendleft((p, dx, back_dy, mx, back_my)) # new ders2 displaces completed one in vertical ders2_ via maxlen
new_rng_ders2__.append(rng_ders2_) # 2D array of vertically-incomplete 2D tuples, converted to rng_ders2__, for next line
x += 1
return ders2_, new_rng_ders2__
# ---------- vertical_comp() end ------------------------------------------------------------------------------------
def temporal_comp(ders2_, rng_ders3___, _xP_, _yP_, _tP_, frame, _frame):
# ders2_: input line of complete 2D ders
# rng_ders3___: prior frame of incomplete 3D tuple buffers, sliced into lines
# comparison between t_rng temporally consecutive pixels, forming ders3: 3D tuple of derivatives per pixel
# each of the following contains 2 types, per core variables m and d:
xP = [pattern('mxP', (rng, -1)), pattern('dxP', (rng, -1))] # initialize with min_x = rng, max_x = -1
yP = [pattern('myP', (rng, -1)), pattern('dyP', (rng, -1))]
tP = [pattern('mtP', (rng, -1)), pattern('dtP', (rng, -1))]
xP_ = [deque(), deque()]
yP_ = [deque(), deque()] # line y - rng
tP_ = [deque(), deque()]
xbuff_ = [deque(), deque()]
ybuff_ = [deque(), deque()] # line y - rng - 1: _Ps buffered by previous run of scan_P_
tbuff_ = [deque(), deque()]
rng_ders3__ = rng_ders3___.pop(0)
new_rng_ders3__ = deque() # 2D array: line of rng_ders3_s buffered for next-frame comp
max_index = t_rng - 1 # max rng_ders3_ index
x = rng # lateral coordinate of pixel
for (p, dx, dy, mx, my), rng_ders3_ in zip(ders2_, rng_ders3__): # pixel comp to rng _pixels in rng_ders3_, summing dt and mt
index = 0
back_dt, back_mt = 0, 0
for (_p, _dx, _dy, fdt, _mx, _my, fmt) in rng_ders3_: # temporal derivatives are incomplete; prefix '_' denotes previous-frame variable
dt = p - _p
mt = ave - abs(dt)
fdt += dt # running sum of differences between pixel _p and all previous and subsequent pixels within t_rng
fmt += mt # running sum of matches between pixel _p and all previous and subsequent pixels within t_rng
back_dt += dt # running sum of dt between pixel p and all previous pixels within t_rng
back_mt += mt # running sum of mt between pixel p and all previous pixels within t_rng
if index < max_index:
rng_ders3_[index] = (_p, _dx, _dy, fdt, _mx, _my, fmt)
elif t > t_min_coord:
ders = _p, _dx, _dy, fdt, _mx, _my, fmt
xP, xP_, xbuff_, _xP_, frame, _frame = form_P(ders, x, X - rng - 1, xP, xP_, xbuff_, _xP_, frame, _frame, 0) # mxP: typ = 0
yP, yP_, ybuff_, _yP_, frame, _frame = form_P(ders, x, X - rng - 1, yP, yP_, ybuff_, _yP_, frame, _frame, 1) # myP: typ = 1
tP, tP_, tbuff_, _tP_, frame, _frame = form_P(ders, x, X - rng - 1, tP, tP_, tbuff_, _tP_, frame, _frame, 2) # mtP: typ = 2
index += 1
rng_ders3_.appendleft((p, dx, dy, back_dt, mx, my, back_mt)) # new ders3 displaces completed one in temporal rng_ders3_ via maxlen
new_rng_ders3__.append(rng_ders3_) # rng_ders3__: line of incomplete ders3 buffers, to be added to next-frame rng_ders3___
x += 1
# terminate last higher line dP (typ = 3 -> 5) within neg mPs
for typ in range(dim, dim * 2):
if typ == 3: buff_ = xbuff_[1]; hP_ = _xP_[1]
if typ == 4: buff_ = ybuff_[1]; hP_ = _yP_[1]
if typ == 5: buff_ = tbuff_[1]; hP_ = _tP_[1]
while buff_:
hP = buff_.popleft()
if hP.roots != 1: # no roots
frame, _frame = form_blob(hP, frame, _frame, typ)
hP_, frame, _frame = term_segment_(hP_, frame, _frame, typ)
rng_ders3___.append(new_rng_ders3__) # rng_ders3___ for next frame
return rng_ders3___, xP_, yP_, tP_, frame, _frame
# ---------- temporal_comp() end ------------------------------------------------------------------------------------
def form_P(ders, x, term_x, P, P_, buff_, hP_, frame, _frame, typ, is_dP=0):
# Initializes and accumulates 1D pattern
# is_dP = bool(typ // dim), computed directly for speed and clarity:
p, dx, dy, dt, mx, my, mt = ders # 3D tuple of derivatives per pixel, "x" for lateral, "y" for vertical, "t" for temporal
if typ == 0: core = mx; alt0 = dx; alt1 = my; alt2 = mt; alt3 = dy; alt4 = dt
elif typ == 1: core = my; alt0 = dy; alt1 = mx; alt2 = mt; alt3 = dx; alt4 = dt
elif typ == 2: core = mt; alt0 = dt; alt1 = mx; alt2 = my; alt3 = dx; alt4 = dy
elif typ == 3: core = dx; alt0 = mx; alt1 = dy; alt2 = dt; alt3 = my; alt4 = mt
elif typ == 4: core = dy; alt0 = my; alt1 = dx; alt2 = dt; alt3 = mx; alt4 = mt
else: core = dt; alt0 = mt; alt1 = dx; alt2 = dy; alt3 = mx; alt4 = my
s = 1 if core > 0 else 0
if not (s == P[is_dP].sign or x == P[is_dP].min_x): # P is terminated. P[0] is mP, P[1] is dP
P, P_, buff_, hP_, frame, _frame = term_P(s, x, P, P_, buff_, hP_, frame, _frame, typ, is_dP)
# Continued or initialized input and derivatives are accumulated:
P[is_dP].accum_params([1, p, dx, dy, dt, mx, my, mt, abs(alt0), abs(alt1), abs(alt2), abs(alt3), abs(alt4)])
# params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
P[is_dP].e_.append(ders)
if P[is_dP].sign == -1: P[is_dP].sign = s
if x == term_x: # P is terminated
P, P_, buff_, hP_, frame, _frame = term_P(s, x + 1, P, P_, buff_, hP_, frame, _frame, typ, is_dP)
return P, P_, buff_, hP_, frame, _frame # accumulated within line, P_ is a buffer for conversion to _P_
# ---------- form_P() end -------------------------------------------------------------------------------------------
def term_P(s, x, P, P_, buff_, hP_, frame, _frame, typ, is_dP):
# Terminates 1D pattern if sign change or P_ end
if not is_dP and P[is_dP].sign == 0:
x0, L, ders_ = P[0].min_x, P[0].L, P[0].e_
P[1] = pattern(typ_str[typ + dim] + 'P', (x0, -1)) # dPs (P[1]) formed inside of negative mP (P[0])
for i in range(L):
P, P_, buff_, _P_, frame, _frame = form_P(ders_[i], x0 + i, x - 1, P, P_, buff_, hP_, frame, _frame, typ + dim, True) # is_dP = 1
P[is_dP].max_x = x - 1
P[is_dP].terminated = True
if y == rng * 2: # 1st line P_ is converted to init hP_; scan_P_(), form_segment(), form_blob() use one type of Ps, hPs, buffs
P_[is_dP].append((P[is_dP], [])) # P, _fork_, no root yet
else:
P_[is_dP], buff_[is_dP], hP_[is_dP], frame, _frame \
= scan_P_(x - 1, P[is_dP], P_[is_dP], buff_[is_dP], hP_[is_dP], frame, _frame, typ) # P scans hP_
P[is_dP] = pattern(typ_str[typ] + 'P', (x, -1), sign=s) # new P initialization at x0 = x
return P, P_, buff_, hP_, frame, _frame
# ---------- term_P() end -------------------------------------------------------------------------------------------
def scan_P_(x, P, P_, _buff_, hP_, frame, _frame, typ):
# P scans shared-x-coordinate hPs in higher P_, combines overlapping Ps into blobs
buff_ = deque() # new buffer for displaced hPs (higher-line P tuples), for scan_P_(next P)
fork_ = [] # refs to hPs connected to input P
_x0 = 0 # to start while loop
x0 = P.min_x
while _x0 <= x: # while x values overlap between P and _P
if _buff_:
hP = _buff_.popleft() # hP was extended to segment and buffered in prior scan_P_
elif hP_:
hP, frame, _frame = form_segment(hP_.popleft(), frame, _frame, typ)
else:
break # higher line ends, all hPs are converted to segments
roots = hP.roots
_x0 = hP.e_[-1].min_x # hP.e_[-1] is _P
_x = hP.e_[-1].max_x
if P.sign == hP.sign and not _x < x0 and not x < _x0: # P comb -> blob if s == _s, _last_x >= first_x and | |
import os
import numpy as np
from discretize.utils import mkvc
from discretize.utils.code_utils import deprecate_method
import warnings
try:
from discretize.mixins.vtk_mod import InterfaceTensorread_vtk
except ImportError:
InterfaceTensorread_vtk = object
class TensorMeshIO(InterfaceTensorread_vtk):
"""Class for managing the input/output of tensor meshes and models.
The ``TensorMeshIO`` class contains a set of class methods specifically
for the :class:`~discretize.TensorMesh` class. These include:
- Read/write tensor meshes to file
- Read/write models defined on tensor meshes
"""
@classmethod
def _readUBC_3DMesh(cls, file_name):
"""Read 3D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
# Interal function to read cell size lines for the UBC mesh files.
def readCellLine(line):
line_list = []
for seg in line.split():
if "*" in seg:
sp = seg.split("*")
seg_arr = np.ones((int(sp[0]),)) * float(sp[1])
else:
seg_arr = np.array([float(seg)], float)
line_list.append(seg_arr)
return np.concatenate(line_list)
# Read the file as line strings, remove lines with comment = !
msh = np.genfromtxt(file_name, delimiter="\n", dtype=np.str, comments="!")
# Fist line is the size of the model
sizeM = np.array(msh[0].split(), dtype=float)
# Second line is the South-West-Top corner coordinates.
origin = np.array(msh[1].split(), dtype=float)
# Read the cell sizes
h1 = readCellLine(msh[2])
h2 = readCellLine(msh[3])
h3temp = readCellLine(msh[4])
# Invert the indexing of the vector to start from the bottom.
h3 = h3temp[::-1]
# Adjust the reference point to the bottom south west corner
origin[2] = origin[2] - np.sum(h3)
# Make the mesh
tensMsh = cls([h1, h2, h3], origin=origin)
return tensMsh
@classmethod
def _readUBC_2DMesh(cls, file_name):
"""Read 2D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
fopen = open(file_name, "r")
# Read down the file and unpack dx vector
def unpackdx(fid, nrows):
for ii in range(nrows):
line = fid.readline()
var = np.array(line.split(), dtype=float)
if ii == 0:
x0 = var[0]
xvec = np.ones(int(var[2])) * (var[1] - var[0]) / int(var[2])
xend = var[1]
else:
xvec = np.hstack(
(xvec, np.ones(int(var[1])) * (var[0] - xend) / int(var[1]))
)
xend = var[0]
return x0, xvec
# Start with dx block
# First line specifies the number of rows for x-cells
line = fopen.readline()
# Strip comments lines
while line.startswith("!"):
line = fopen.readline()
nl = np.array(line.split(), dtype=int)
[x0, dx] = unpackdx(fopen, nl[0])
# Move down the file until reaching the z-block
line = fopen.readline()
if not line:
line = fopen.readline()
# End with dz block
# First line specifies the number of rows for z-cells
line = fopen.readline()
nl = np.array(line.split(), dtype=int)
[z0, dz] = unpackdx(fopen, nl[0])
# Flip z0 to be the bottom of the mesh for SimPEG
z0 = -(z0 + sum(dz))
dz = dz[::-1]
# Make the mesh
tensMsh = cls([dx, dz], origin=(x0, z0))
fopen.close()
return tensMsh
@classmethod
def read_UBC(cls, file_name, directory=""):
"""Read 2D or 3D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file or just its name if directory is specified
directory : str, optional
directory where the UBC-GIF file lives
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
# Check the expected mesh dimensions
fname = os.path.join(directory, file_name)
# Read the file as line strings, remove lines with comment = !
msh = np.genfromtxt(
fname, delimiter="\n", dtype=np.str, comments="!", max_rows=1
)
# Fist line is the size of the model
sizeM = np.array(msh.ravel()[0].split(), dtype=float)
# Check if the mesh is a UBC 2D mesh
if sizeM.shape[0] == 1:
Tnsmsh = cls._readUBC_2DMesh(fname)
# Check if the mesh is a UBC 3D mesh
elif sizeM.shape[0] == 3:
Tnsmsh = cls._readUBC_3DMesh(fname)
else:
raise Exception("File format not recognized")
return Tnsmsh
def _readModelUBC_2D(mesh, file_name):
"""Read UBC-GIF formatted model file for 2D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file
Returns
-------
(n_cells) numpy.ndarray
The model defined on the 2D tensor mesh
"""
# Open fileand skip header... assume that we know the mesh already
obsfile = np.genfromtxt(file_name, delimiter=" \n", dtype=np.str, comments="!")
dim = tuple(np.array(obsfile[0].split(), dtype=int))
if mesh.shape_cells != dim:
raise Exception("Dimension of the model and mesh mismatch")
model = []
for line in obsfile[1:]:
model.extend([float(val) for val in line.split()])
model = np.asarray(model)
if not len(model) == mesh.nC:
raise Exception(
"""Something is not right, expected size is {:d}
but unwrap vector is size {:d}""".format(
mesh.nC, len(model)
)
)
return model.reshape(mesh.vnC, order="F")[:, ::-1].reshape(-1, order="F")
def _readModelUBC_3D(mesh, file_name):
"""Read UBC-GIF formatted model file for 3D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file
Returns
-------
(n_cells) numpy.ndarray
The model defined on the 3D tensor mesh
"""
f = open(file_name, "r")
model = np.array(list(map(float, f.readlines())))
f.close()
nCx, nCy, nCz = mesh.shape_cells
model = np.reshape(model, (nCz, nCx, nCy), order="F")
model = model[::-1, :, :]
model = np.transpose(model, (1, 2, 0))
model = mkvc(model)
return model
def read_model_UBC(mesh, file_name, directory=""):
"""Read UBC-GIF formatted model file for 2D or 3D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file or just its name if directory is specified
directory : str, optional
directory where the UBC-GIF file lives
Returns
-------
(n_cells) numpy.ndarray
The model defined on the mesh
"""
fname = os.path.join(directory, file_name)
if mesh.dim == 3:
model = mesh._readModelUBC_3D(fname)
elif mesh.dim == 2:
model = mesh._readModelUBC_2D(fname)
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
return model
def write_model_UBC(mesh, file_name, model, directory=""):
"""Write 2D or 3D tensor model to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file or just its name if directory is specified
model : (n_cells) numpy.ndarray
directory : str, optional
output directory
"""
fname = os.path.join(directory, file_name)
if mesh.dim == 3:
# Reshape model to a matrix
modelMat = mesh.reshape(model, "CC", "CC", "M")
# Transpose the axes
modelMatT = modelMat.transpose((2, 0, 1))
# Flip z to positive down
modelMatTR = mkvc(modelMatT[::-1, :, :])
np.savetxt(fname, modelMatTR.ravel())
elif mesh.dim == 2:
modelMat = mesh.reshape(model, "CC", "CC", "M").T[::-1]
f = open(fname, "w")
f.write("{:d} {:d}\n".format(*mesh.shape_cells))
f.close()
f = open(fname, "ab")
np.savetxt(f, modelMat)
f.close()
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
def _writeUBC_3DMesh(mesh, file_name, comment_lines=""):
"""Write 3D tensor mesh to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file
comment_lines : str, optional
comment lines preceded are preceeded with '!'
"""
if not mesh.dim == 3:
raise Exception("Mesh must be 3D")
s = comment_lines
s += "{0:d} {1:d} {2:d}\n".format(*tuple(mesh.vnC))
# Have to it in the same operation or use mesh.origin.copy(),
# otherwise the mesh.origin is updated.
origin = mesh.origin + np.array([0, 0, mesh.h[2].sum()])
nCx, nCy, nCz = mesh.shape_cells
s += "{0:.6f} {1:.6f} {2:.6f}\n".format(*tuple(origin))
s += ("%.6f " * nCx + "\n") % tuple(mesh.h[0])
s += ("%.6f " * nCy + "\n") % tuple(mesh.h[1])
s += ("%.6f " * nCz + "\n") % tuple(mesh.h[2][::-1])
f = open(file_name, "w")
f.write(s)
f.close()
def _writeUBC_2DMesh(mesh, file_name, comment_lines=""):
"""Write 2D tensor mesh to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file
comment_lines : str, optional
comment lines preceded are preceeded with '!'
"""
if not mesh.dim == 2:
raise Exception("Mesh must be 2D")
def writeF(fx, outStr=""):
# Init
i = 0
origin = True
x0 = fx[i]
f = fx[i]
number_segment = 0
auxStr = ""
while True:
i = i + 1
if i >= fx.size:
break
dx = -f + fx[i]
f = fx[i]
n = 1
for j in range(i + 1, fx.size):
if -f + fx[j] == dx:
n += 1
i += 1
f = fx[j]
else:
break
number_segment += 1
if origin:
auxStr += "{:.10f} {:.10f} {:d} \n".format(x0, f, n)
origin = False
else:
auxStr += "{:.10f} {:d} \n".format(f, | |
None:
"""Deletes the entry for the given user in the account validity table, removing
their expiration date and renewal token.
Args:
user_id: ID of the user to remove from the account validity table.
"""
await self.db_pool.simple_delete_one(
table="account_validity",
keyvalues={"user_id": user_id},
desc="delete_account_validity_for_user",
)
async def is_server_admin(self, user: UserID) -> bool:
"""Determines if a user is an admin of this homeserver.
Args:
user: user ID of the user to test
Returns:
true iff the user is a server admin, false otherwise.
"""
res = await self.db_pool.simple_select_one_onecol(
table="users",
keyvalues={"name": user.to_string()},
retcol="admin",
allow_none=True,
desc="is_server_admin",
)
return bool(res) if res else False
async def set_server_admin(self, user: UserID, admin: bool) -> None:
"""Sets whether a user is an admin of this homeserver.
Args:
user: user ID of the user to test
admin: true iff the user is to be a server admin, false otherwise.
"""
def set_server_admin_txn(txn):
self.db_pool.simple_update_one_txn(
txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0}
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user.to_string(),)
)
await self.db_pool.runInteraction("set_server_admin", set_server_admin_txn)
async def set_shadow_banned(self, user: UserID, shadow_banned: bool) -> None:
"""Sets whether a user shadow-banned.
Args:
user: user ID of the user to test
shadow_banned: true iff the user is to be shadow-banned, false otherwise.
"""
def set_shadow_banned_txn(txn):
user_id = user.to_string()
self.db_pool.simple_update_one_txn(
txn,
table="users",
keyvalues={"name": user_id},
updatevalues={"shadow_banned": shadow_banned},
)
# In order for this to apply immediately, clear the cache for this user.
tokens = self.db_pool.simple_select_onecol_txn(
txn,
table="access_tokens",
keyvalues={"user_id": user_id},
retcol="token",
)
for token in tokens:
self._invalidate_cache_and_stream(
txn, self.get_user_by_access_token, (token,)
)
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
await self.db_pool.runInteraction("set_shadow_banned", set_shadow_banned_txn)
def _query_for_auth(self, txn, token: str) -> Optional[TokenLookupResult]:
sql = """
SELECT users.name as user_id,
users.is_guest,
users.shadow_banned,
access_tokens.id as token_id,
access_tokens.device_id,
access_tokens.valid_until_ms,
access_tokens.user_id as token_owner,
access_tokens.used as token_used
FROM users
INNER JOIN access_tokens on users.name = COALESCE(puppets_user_id, access_tokens.user_id)
WHERE token = ?
"""
txn.execute(sql, (token,))
rows = self.db_pool.cursor_to_dict(txn)
if rows:
row = rows[0]
# This field is nullable, ensure it comes out as a boolean
if row["token_used"] is None:
row["token_used"] = False
return TokenLookupResult(**row)
return None
@cached()
async def is_real_user(self, user_id: str) -> bool:
"""Determines if the user is a real user, ie does not have a 'user_type'.
Args:
user_id: user id to test
Returns:
True if user 'user_type' is null or empty string
"""
return await self.db_pool.runInteraction(
"is_real_user", self.is_real_user_txn, user_id
)
@cached()
async def is_support_user(self, user_id: str) -> bool:
"""Determines if the user is of type UserTypes.SUPPORT
Args:
user_id: user id to test
Returns:
True if user is of type UserTypes.SUPPORT
"""
return await self.db_pool.runInteraction(
"is_support_user", self.is_support_user_txn, user_id
)
def is_real_user_txn(self, txn, user_id):
res = self.db_pool.simple_select_one_onecol_txn(
txn=txn,
table="users",
keyvalues={"name": user_id},
retcol="user_type",
allow_none=True,
)
return res is None
def is_support_user_txn(self, txn, user_id):
res = self.db_pool.simple_select_one_onecol_txn(
txn=txn,
table="users",
keyvalues={"name": user_id},
retcol="user_type",
allow_none=True,
)
return True if res == UserTypes.SUPPORT else False
async def get_users_by_id_case_insensitive(self, user_id: str) -> Dict[str, str]:
"""Gets users that match user_id case insensitively.
Returns:
A mapping of user_id -> password_hash.
"""
def f(txn):
sql = "SELECT name, password_hash FROM users WHERE lower(name) = lower(?)"
txn.execute(sql, (user_id,))
return dict(txn)
return await self.db_pool.runInteraction("get_users_by_id_case_insensitive", f)
async def record_user_external_id(
self, auth_provider: str, external_id: str, user_id: str
) -> None:
"""Record a mapping from an external user id to a mxid
Args:
auth_provider: identifier for the remote auth provider
external_id: id on that system
user_id: complete mxid that it is mapped to
"""
await self.db_pool.simple_insert(
table="user_external_ids",
values={
"auth_provider": auth_provider,
"external_id": external_id,
"user_id": user_id,
},
desc="record_user_external_id",
)
async def get_user_by_external_id(
self, auth_provider: str, external_id: str
) -> Optional[str]:
"""Look up a user by their external auth id
Args:
auth_provider: identifier for the remote auth provider
external_id: id on that system
Returns:
the mxid of the user, or None if they are not known
"""
return await self.db_pool.simple_select_one_onecol(
table="user_external_ids",
keyvalues={"auth_provider": auth_provider, "external_id": external_id},
retcol="user_id",
allow_none=True,
desc="get_user_by_external_id",
)
async def get_external_ids_by_user(self, mxid: str) -> List[Tuple[str, str]]:
"""Look up external ids for the given user
Args:
mxid: the MXID to be looked up
Returns:
Tuples of (auth_provider, external_id)
"""
res = await self.db_pool.simple_select_list(
table="user_external_ids",
keyvalues={"user_id": mxid},
retcols=("auth_provider", "external_id"),
desc="get_external_ids_by_user",
)
return [(r["auth_provider"], r["external_id"]) for r in res]
async def count_all_users(self):
"""Counts all users registered on the homeserver."""
def _count_users(txn):
txn.execute("SELECT COUNT(*) AS users FROM users")
rows = self.db_pool.cursor_to_dict(txn)
if rows:
return rows[0]["users"]
return 0
return await self.db_pool.runInteraction("count_users", _count_users)
async def count_daily_user_type(self) -> Dict[str, int]:
"""
Counts 1) native non guest users
2) native guests users
3) bridged users
who registered on the homeserver in the past 24 hours
"""
def _count_daily_user_type(txn):
yesterday = int(self._clock.time()) - (60 * 60 * 24)
sql = """
SELECT user_type, COALESCE(count(*), 0) AS count FROM (
SELECT
CASE
WHEN is_guest=0 AND appservice_id IS NULL THEN 'native'
WHEN is_guest=1 AND appservice_id IS NULL THEN 'guest'
WHEN is_guest=0 AND appservice_id IS NOT NULL THEN 'bridged'
END AS user_type
FROM users
WHERE creation_ts > ?
) AS t GROUP BY user_type
"""
results = {"native": 0, "guest": 0, "bridged": 0}
txn.execute(sql, (yesterday,))
for row in txn:
results[row[0]] = row[1]
return results
return await self.db_pool.runInteraction(
"count_daily_user_type", _count_daily_user_type
)
async def count_nonbridged_users(self):
def _count_users(txn):
txn.execute(
"""
SELECT COALESCE(COUNT(*), 0) FROM users
WHERE appservice_id IS NULL
"""
)
(count,) = txn.fetchone()
return count
return await self.db_pool.runInteraction("count_users", _count_users)
async def count_real_users(self):
"""Counts all users without a special user_type registered on the homeserver."""
def _count_users(txn):
txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null")
rows = self.db_pool.cursor_to_dict(txn)
if rows:
return rows[0]["users"]
return 0
return await self.db_pool.runInteraction("count_real_users", _count_users)
async def generate_user_id(self) -> str:
"""Generate a suitable localpart for a guest user
Returns: a (hopefully) free localpart
"""
next_id = await self.db_pool.runInteraction(
"generate_user_id", self._user_id_seq.get_next_id_txn
)
return str(next_id)
async def get_user_id_by_threepid(self, medium: str, address: str) -> Optional[str]:
"""Returns user id from threepid
Args:
medium: threepid medium e.g. email
address: threepid address e.g. <EMAIL>
Returns:
The user ID or None if no user id/threepid mapping exists
"""
user_id = await self.db_pool.runInteraction(
"get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address
)
return user_id
def get_user_id_by_threepid_txn(self, txn, medium, address):
"""Returns user id from threepid
Args:
txn (cursor):
medium (str): threepid medium e.g. email
address (str): threepid address e.g. <EMAIL>
Returns:
str|None: user id or None if no user id/threepid mapping exists
"""
ret = self.db_pool.simple_select_one_txn(
txn,
"user_threepids",
{"medium": medium, "address": address},
["user_id"],
True,
)
if ret:
return ret["user_id"]
return None
async def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
await self.db_pool.simple_upsert(
"user_threepids",
{"medium": medium, "address": address},
{"user_id": user_id, "validated_at": validated_at, "added_at": added_at},
)
async def user_get_threepids(self, user_id):
return await self.db_pool.simple_select_list(
"user_threepids",
{"user_id": user_id},
["medium", "address", "validated_at", "added_at"],
"user_get_threepids",
)
async def user_delete_threepid(self, user_id, medium, address) -> None:
await self.db_pool.simple_delete(
"user_threepids",
keyvalues={"user_id": user_id, "medium": medium, "address": address},
desc="user_delete_threepid",
)
async def user_delete_threepids(self, user_id: str) -> None:
"""Delete all threepid this user has bound
Args:
user_id: The user id to delete all threepids of
"""
await self.db_pool.simple_delete(
"user_threepids",
keyvalues={"user_id": user_id},
desc="user_delete_threepids",
)
async def add_user_bound_threepid(
self, user_id: str, medium: str, address: str, id_server: str
):
"""The server proxied a bind request to the given identity server on
behalf of the given user. We need to remember this in case the user
asks us to unbind the threepid.
Args:
user_id
medium
address
id_server
"""
# We need to use an upsert, in case they user had already bound the
# threepid
await self.db_pool.simple_upsert(
table="user_threepid_id_server",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
"id_server": id_server,
},
values={},
insertion_values={},
desc="add_user_bound_threepid",
)
async def user_get_bound_threepids(self, user_id: str) -> List[Dict[str, Any]]:
"""Get the threepids that a user has bound to an identity server through the homeserver
The homeserver remembers where binds to an identity server occurred. Using this
method can retrieve those threepids.
Args:
user_id: The ID of the user to retrieve threepids for
Returns:
List of dictionaries containing the following keys:
medium (str): The medium of the threepid (e.g "email")
address (str): The address of the threepid (e.g "<EMAIL>")
"""
return await self.db_pool.simple_select_list(
table="user_threepid_id_server",
keyvalues={"user_id": user_id},
retcols=["medium", "address"],
desc="user_get_bound_threepids",
)
async def remove_user_bound_threepid(
self, user_id: str, medium: str, address: str, id_server: str
| |
# MINLP written by GAMS Convert at 04/21/18 13:52:22
#
# Equation counts
# Total E G L N X C B
# 37 37 0 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 109 1 108 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 217 109 108 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=150789*m.b1*m.b4 + 96635*m.b1*m.b7 + 82016*m.b1*m.b10 + 71188*m.b1*m.b19 + 29652*m.b1*m.b28 +
27563*m.b1*m.b82 + 150789*m.b2*m.b5 + 96635*m.b2*m.b8 + 82016*m.b2*m.b11 + 71188*m.b2*m.b20 +
29652*m.b2*m.b29 + 27563*m.b2*m.b83 + 150789*m.b3*m.b6 + 96635*m.b3*m.b9 + 82016*m.b3*m.b12 +
71188*m.b3*m.b21 + 29652*m.b3*m.b30 + 27563*m.b3*m.b84 - 138432*m.b4*m.b7 + 135804*m.b4*m.b13 -
33425*m.b4*m.b22 + 47873*m.b4*m.b31 + 37799*m.b4*m.b85 - 138432*m.b5*m.b8 + 135804*m.b5*m.b14 -
33425*m.b5*m.b23 + 47873*m.b5*m.b32 + 37799*m.b5*m.b86 - 138432*m.b6*m.b9 + 135804*m.b6*m.b15 -
33425*m.b6*m.b24 + 47873*m.b6*m.b33 + 37799*m.b6*m.b87 + 89572*m.b7*m.b16 - 123622*m.b7*m.b25 +
36597*m.b7*m.b34 - 88889*m.b7*m.b88 + 89572*m.b8*m.b17 - 123622*m.b8*m.b26 + 36597*m.b8*m.b35 -
88889*m.b8*m.b89 + 89572*m.b9*m.b18 - 123622*m.b9*m.b27 + 36597*m.b9*m.b36 - 88889*m.b9*m.b90 -
28003*m.b10*m.b13 + 64386*m.b10*m.b16 + 15848*m.b10*m.b19 - 68711*m.b10*m.b37 + 20433*m.b10*m.b91
- 28003*m.b11*m.b14 + 64386*m.b11*m.b17 + 15848*m.b11*m.b20 - 68711*m.b11*m.b38 + 20433*m.b11*
m.b92 - 28003*m.b12*m.b15 + 64386*m.b12*m.b18 + 15848*m.b12*m.b21 - 68711*m.b12*m.b39 + 20433*
m.b12*m.b93 + 37906*m.b13*m.b16 - 100230*m.b13*m.b22 - 12359*m.b13*m.b40 + 81013*m.b13*m.b94 +
37906*m.b14*m.b17 - 100230*m.b14*m.b23 - 12359*m.b14*m.b41 + 81013*m.b14*m.b95 + 37906*m.b15*
m.b18 - 100230*m.b15*m.b24 - 12359*m.b15*m.b42 + 81013*m.b15*m.b96 - 137368*m.b16*m.b25 + 23213*
m.b16*m.b43 + 23379*m.b16*m.b97 - 137368*m.b17*m.b26 + 23213*m.b17*m.b44 + 23379*m.b17*m.b98 -
137368*m.b18*m.b27 + 23213*m.b18*m.b45 + 23379*m.b18*m.b99 - 98974*m.b19*m.b22 + 231831*m.b19*
m.b25 - 216126*m.b19*m.b46 - 217144*m.b19*m.b100 - 98974*m.b20*m.b23 + 231831*m.b20*m.b26 -
216126*m.b20*m.b47 - 217144*m.b20*m.b101 - 98974*m.b21*m.b24 + 231831*m.b21*m.b27 - 216126*m.b21*
m.b48 - 217144*m.b21*m.b102 + 35848*m.b22*m.b25 - 56735*m.b22*m.b49 - 129635*m.b22*m.b103 + 35848
*m.b23*m.b26 - 56735*m.b23*m.b50 - 129635*m.b23*m.b104 + 35848*m.b24*m.b27 - 56735*m.b24*m.b51 -
129635*m.b24*m.b105 + 110264*m.b25*m.b52 + 64614*m.b25*m.b106 + 110264*m.b26*m.b53 + 64614*m.b26*
m.b107 + 110264*m.b27*m.b54 + 64614*m.b27*m.b108 - 57506*m.b28*m.b31 - 109539*m.b28*m.b34 -
153027*m.b28*m.b37 + 74221*m.b28*m.b46 - 128728*m.b28*m.b55 - 57506*m.b29*m.b32 - 109539*m.b29*
m.b35 - 153027*m.b29*m.b38 + 74221*m.b29*m.b47 - 128728*m.b29*m.b56 - 57506*m.b30*m.b33 - 109539*
m.b30*m.b36 - 153027*m.b30*m.b39 + 74221*m.b30*m.b48 - 128728*m.b30*m.b57 - 61441*m.b31*m.b34 -
38352*m.b31*m.b40 + 65016*m.b31*m.b49 - 87621*m.b31*m.b58 - 61441*m.b32*m.b35 - 38352*m.b32*m.b41
+ 65016*m.b32*m.b50 - 87621*m.b32*m.b59 - 61441*m.b33*m.b36 - 38352*m.b33*m.b42 + 65016*m.b33*
m.b51 - 87621*m.b33*m.b60 + 89808*m.b34*m.b43 + 202917*m.b34*m.b52 - 130041*m.b34*m.b61 + 89808*
m.b35*m.b44 + 202917*m.b35*m.b53 - 130041*m.b35*m.b62 + 89808*m.b36*m.b45 + 202917*m.b36*m.b54 -
130041*m.b36*m.b63 + 33035*m.b37*m.b40 + 71965*m.b37*m.b43 - 55696*m.b37*m.b46 - 183316*m.b37*
m.b64 + 33035*m.b38*m.b41 + 71965*m.b38*m.b44 - 55696*m.b38*m.b47 - 183316*m.b38*m.b65 + 33035*
m.b39*m.b42 + 71965*m.b39*m.b45 - 55696*m.b39*m.b48 - 183316*m.b39*m.b66 + 77370*m.b40*m.b43 +
105654*m.b40*m.b49 + 32479*m.b40*m.b67 + 77370*m.b41*m.b44 + 105654*m.b41*m.b50 + 32479*m.b41*
m.b68 + 77370*m.b42*m.b45 + 105654*m.b42*m.b51 + 32479*m.b42*m.b69 - 54817*m.b43*m.b52 + 23875*
m.b43*m.b70 - 54817*m.b44*m.b53 + 23875*m.b44*m.b71 - 54817*m.b45*m.b54 + 23875*m.b45*m.b72 +
156987*m.b46*m.b49 - 97706*m.b46*m.b52 + 66291*m.b46*m.b73 + 156987*m.b47*m.b50 - 97706*m.b47*
m.b53 + 66291*m.b47*m.b74 + 156987*m.b48*m.b51 - 97706*m.b48*m.b54 + 66291*m.b48*m.b75 - 170907*
m.b49*m.b52 - 4284*m.b49*m.b76 - 170907*m.b50*m.b53 - 4284*m.b50*m.b77 - 170907*m.b51*m.b54 -
4284*m.b51*m.b78 - 52892*m.b52*m.b79 - 52892*m.b53*m.b80 - 52892*m.b54*m.b81 + 140020*m.b55*m.b58
+ 172819*m.b55*m.b61 - 68559*m.b55*m.b64 + 127058*m.b55*m.b73 - 96654*m.b55*m.b82 + 140020*m.b56
*m.b59 + 172819*m.b56*m.b62 - 68559*m.b56*m.b65 + 127058*m.b56*m.b74 - 96654*m.b56*m.b83 + 140020
*m.b57*m.b60 + 172819*m.b57*m.b63 - 68559*m.b57*m.b66 + 127058*m.b57*m.b75 - 96654*m.b57*m.b84 +
53214*m.b58*m.b61 + 113790*m.b58*m.b67 + 70369*m.b58*m.b76 + 40736*m.b58*m.b85 + 53214*m.b59*
m.b62 + 113790*m.b59*m.b68 + 70369*m.b59*m.b77 + 40736*m.b59*m.b86 + 53214*m.b60*m.b63 + 113790*
m.b60*m.b69 + 70369*m.b60*m.b78 + 40736*m.b60*m.b87 - 53179*m.b61*m.b70 - 40328*m.b61*m.b79 -
76183*m.b61*m.b88 - 53179*m.b62*m.b71 - 40328*m.b62*m.b80 - 76183*m.b62*m.b89 - 53179*m.b63*m.b72
- 40328*m.b63*m.b81 - 76183*m.b63*m.b90 + 128807*m.b64*m.b67 + 9873*m.b64*m.b70 - 163252*m.b64*
m.b73 + 118598*m.b64*m.b91 + 128807*m.b65*m.b68 + 9873*m.b65*m.b71 - 163252*m.b65*m.b74 + 118598*
m.b65*m.b92 + 128807*m.b66*m.b69 + 9873*m.b66*m.b72 - 163252*m.b66*m.b75 + 118598*m.b66*m.b93 +
26118*m.b67*m.b70 - 17710*m.b67*m.b76 - 47780*m.b67*m.b94 + 26118*m.b68*m.b71 - 17710*m.b68*m.b77
- 47780*m.b68*m.b95 + 26118*m.b69*m.b72 - 17710*m.b69*m.b78 - 47780*m.b69*m.b96 - 194573*m.b70*
m.b79 + 79568*m.b70*m.b97 - 194573*m.b71*m.b80 + 79568*m.b71*m.b98 - 194573*m.b72*m.b81 + 79568*
m.b72*m.b99 + 134721*m.b73*m.b76 - 43693*m.b73*m.b79 - 35040*m.b73*m.b100 + 134721*m.b74*m.b77 -
43693*m.b74*m.b80 - 35040*m.b74*m.b101 + 134721*m.b75*m.b78 - 43693*m.b75*m.b81 - 35040*m.b75*
m.b102 - 154491*m.b76*m.b79 + 126672*m.b76*m.b103 - 154491*m.b77*m.b80 + 126672*m.b77*m.b104 -
154491*m.b78*m.b81 + 126672*m.b78*m.b105 + 134687*m.b79*m.b106 + 134687*m.b80*m.b107 + 134687*
m.b81*m.b108 - 20223*m.b82*m.b85 + 16042*m.b82*m.b88 - 71597*m.b82*m.b91 + 105213*m.b82*m.b100 -
20223*m.b83*m.b86 + 16042*m.b83*m.b89 - 71597*m.b83*m.b92 + 105213*m.b83*m.b101 - 20223*m.b84*
m.b87 + 16042*m.b84*m.b90 - 71597*m.b84*m.b93 + 105213*m.b84*m.b102 - 23477*m.b85*m.b88 + 131588*
m.b85*m.b94 + 77329*m.b85*m.b103 - 23477*m.b86*m.b89 + 131588*m.b86*m.b95 + 77329*m.b86*m.b104 -
23477*m.b87*m.b90 + 131588*m.b87*m.b96 + 77329*m.b87*m.b105 + 243127*m.b88*m.b97 + 106932*m.b88*
m.b106 + 243127*m.b89*m.b98 + 106932*m.b89*m.b107 + 243127*m.b90*m.b99 + 106932*m.b90*m.b108 +
173520*m.b91*m.b94 - 14664*m.b91*m.b97 + 37621*m.b91*m.b100 + 173520*m.b92*m.b95 - 14664*m.b92*
m.b98 + 37621*m.b92*m.b101 + 173520*m.b93*m.b96 - 14664*m.b93*m.b99 + 37621*m.b93*m.b102 - 95030*
m.b94*m.b97 + 10313*m.b94*m.b103 - 95030*m.b95*m.b98 + 10313*m.b95*m.b104 - 95030*m.b96*m.b99 +
10313*m.b96*m.b105 + 102942*m.b97*m.b106 + 102942*m.b98*m.b107 + 102942*m.b99*m.b108 - 244497*
m.b100*m.b103 - 85233*m.b100*m.b106 - 244497*m.b101*m.b104 - 85233*m.b101*m.b107 - 244497*m.b102*
m.b105 - 85233*m.b102*m.b108 - 96225*m.b103*m.b106 - 96225*m.b104*m.b107 - 96225*m.b105*m.b108
, sense=minimize)
m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1)
m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1)
m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1)
m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1)
m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1)
m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1)
m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1)
m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1)
m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1)
m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1)
m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1)
m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1)
m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1)
m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1)
m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1)
m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1)
m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1)
m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1)
m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1)
m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1)
m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1)
m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1)
m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1)
m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1)
m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1)
m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1)
m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1)
m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1)
m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1)
m.c30 | |
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
UTILS
Utility functions.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 <NAME>. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = [
# Methods
'assert_alignment',
'assert_color',
'assert_cursor',
'assert_list_vector',
'assert_orientation',
'assert_position',
'assert_position_vector',
'assert_vector',
'check_key_pressed_valid',
'fill_gradient',
'format_color',
'get_cursor',
'get_finger_pos',
'is_callable',
'load_pygame_image_file',
'make_surface',
'mouse_motion_current_mouse_position',
'parse_padding',
'print_menu_widget_structure',
'set_pygame_cursor',
'uuid4',
'warn',
'widget_terminal_title',
# Constants
'PYGAME_V2',
# Classes
'TerminalColors'
]
import functools
# import inspect
import sys
import traceback
import types
import uuid
import warnings
import pygame
import pygame_menu
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, POSITION_CENTER, \
POSITION_NORTH, POSITION_SOUTH, POSITION_SOUTHEAST, POSITION_NORTHWEST, \
POSITION_WEST, POSITION_EAST, POSITION_NORTHEAST, POSITION_SOUTHWEST, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._types import ColorType, ColorInputType, Union, List, Vector2NumberType, \
NumberType, Any, Optional, Tuple, NumberInstance, VectorInstance, PaddingInstance, \
PaddingType, Tuple4IntType, ColorInputInstance, VectorType, EventType, \
CursorInputInstance, CursorInputType, Tuple2IntType, Dict
PYGAME_V2 = pygame.version.vernum[0] >= 2
WARNINGS_LAST_MESSAGES: Dict[int, bool] = {}
def assert_alignment(align: str) -> None:
"""
Assert that a certain alignment is valid.
:param align: Align value
:return: None
"""
assert isinstance(align, str), f'alignment "{align}" must be a string'
assert align in (ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT), \
f'incorrect alignment value "{align}"'
def assert_color(
color: Union[ColorInputType, List[int]],
warn_if_invalid: bool = True
) -> ColorType:
"""
Assert that a certain color is valid.
:param color: Object color
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Formatted color if valid, else, throws an ``AssertionError`` exception
"""
color = format_color(color, warn_if_invalid=warn_if_invalid)
assert isinstance(color, VectorInstance), \
f'color must be a tuple or list, not type "{type(color)}"'
assert 4 >= len(color) >= 3, \
'color must be a tuple or list of 3 or 4 numbers'
for i in range(3):
assert isinstance(color[i], int), \
f'"{color[i]}" in element color {color} must be an integer, not type "{type(color)}"'
assert 0 <= color[i] <= 255, \
f'"{color[i]}" in element color {color} must be an integer between 0 and 255'
if len(color) == 4:
assert isinstance(color[3], int), \
f'alpha channel must be an integer between 0 and 255, not type "{type(color)}"'
assert 0 <= color[3] <= 255, \
f'opacity of color {color} must be an integer between 0 and 255; ' \
f'where 0 is fully-transparent and 255 is fully-opaque'
return color
def assert_cursor(cursor: CursorInputType) -> None:
"""
Assert a given cursor is valid.
:param cursor: Cursor object
:return: None
"""
assert isinstance(cursor, CursorInputInstance), \
'cursor instance invalid, it can be None, an integer, ' \
'or pygame.cursors.Cursor'
def assert_list_vector(list_vector: Union[List[Vector2NumberType], Tuple[Vector2NumberType, ...]],
length: int) -> None:
"""
Assert that a list fixed length vector is numeric.
:param list_vector: Numeric list vector
:param length: Length of the required vector. If ``0`` don't check the length
:return: None
"""
assert isinstance(list_vector, VectorInstance), \
f'list_vector "{list_vector}" must be a tuple or list'
for v in list_vector:
assert_vector(v, length)
def assert_orientation(orientation: str) -> None:
"""
Assert that a certain widget orientation is valid.
:param orientation: Object orientation
:return: None
"""
assert isinstance(orientation, str), \
f'orientation "{orientation}" must be a string'
assert orientation in (ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL), \
f'invalid orientation value "{orientation}"'
def assert_position(position: str) -> None:
"""
Assert that a certain position is valid.
:param position: Object position
:return: None
"""
assert isinstance(position, str), \
f'position "{position}" must be a string'
assert position in (POSITION_WEST, POSITION_SOUTHWEST, POSITION_SOUTH,
POSITION_SOUTHEAST, POSITION_EAST, POSITION_NORTH,
POSITION_NORTHWEST, POSITION_NORTHEAST, POSITION_CENTER), \
f'invalid position value "{position}"'
def assert_position_vector(position: Union[str, List[str], Tuple[str, ...]]) -> None:
"""
Assert that a position vector is valid.
:param position: Object position
:return: None
"""
if isinstance(position, str):
assert_position(position)
else:
assert isinstance(position, VectorInstance)
unique = []
for pos in position:
assert_position(pos)
if pos not in unique:
unique.append(pos)
assert len(unique) == len(position), 'there cannot be repeated positions'
def assert_vector(
num_vector: VectorType,
length: int,
instance: type = NumberInstance
) -> None:
"""
Assert that a fixed length vector is numeric.
:param num_vector: Numeric vector
:param length: Length of the required vector. If ``0`` don't check the length
:param instance: Instance of each item of the vector
:return: None
"""
assert isinstance(num_vector, VectorInstance), \
f'vector "{num_vector}" must be a list or tuple of {length} items if type {instance}'
if length != 0:
assert len(num_vector) == length, \
f'vector "{num_vector}" must contain {length} numbers only, ' \
f'but {num_vector} were given'
for i in range(len(num_vector)):
num = num_vector[i]
if instance == int and isinstance(num, float) and int(num) == num:
num = int(num)
assert isinstance(num, instance), \
f'item {num} of vector must be {instance}, not type "{type(num)}"'
def check_key_pressed_valid(event: EventType) -> bool:
"""
Checks if the pressed key is valid.
:param event: Key press event
:return: ``True`` if a key is pressed
"""
# If the system detects that any key event has been pressed but
# there's not any key pressed then this method raises a KEYUP
# flag
bad_event = not (True in pygame.key.get_pressed())
if bad_event:
if 'test' in event.dict and event.dict['test']:
return True
ev = pygame.event.Event(pygame.KEYUP, {'key': event.key})
pygame.event.post(ev)
return not bad_event
def fill_gradient(
surface: 'pygame.Surface',
color: ColorInputType,
gradient: ColorInputType,
rect: Optional['pygame.Rect'] = None,
vertical: bool = True,
forward: bool = True
) -> None:
"""
Fill a surface with a gradient pattern.
:param surface: Surface to fill
:param color: Starting color
:param gradient: Final color
:param rect: Area to fill; default is surface's rect
:param vertical: True=vertical; False=horizontal
:param forward: True=forward; False=reverse
:return: None
"""
if rect is None:
rect = surface.get_rect()
x1, x2 = rect.left, rect.right
y1, y2 = rect.top, rect.bottom
color = assert_color(color)
gradient = assert_color(gradient)
if vertical:
h = y2 - y1
else:
h = x2 - x1
if forward:
a, b = color, gradient
else:
b, a = color, gradient
rate = (
float(b[0] - a[0]) / h,
float(b[1] - a[1]) / h,
float(b[2] - a[2]) / h
)
fn_line = pygame.draw.line
if vertical:
for line in range(y1, y2):
color = (
min(max(a[0] + (rate[0] * (line - y1)), 0), 255),
min(max(a[1] + (rate[1] * (line - y1)), 0), 255),
min(max(a[2] + (rate[2] * (line - y1)), 0), 255)
)
fn_line(surface, color, (x1, line), (x2, line))
else:
for col in range(x1, x2):
color = (
min(max(a[0] + (rate[0] * (col - x1)), 0), 255),
min(max(a[1] + (rate[1] * (col - x1)), 0), 255),
min(max(a[2] + (rate[2] * (col - x1)), 0), 255)
)
fn_line(surface, color, (col, y1), (col, y2))
def format_color(
color: Union[ColorInputType, Any],
warn_if_invalid: bool = True
) -> Union[ColorType, Any]:
"""
Format color from string, int, or tuple to tuple type.
Available formats:
- Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py)
- HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
pinner
- get git info from need packages
- create new version of git repository where script was run
- create tag the same as version
- set new revision in config file
revision is appropriate to git HEAD in specific package
- create or update change-log
- commit change-log
- commit tag
- push changes
"""
from __future__ import print_function
from time import gmtime, strftime
import getpass
import os
import re
import sys
from copy import deepcopy
import logging
from ConfigParser import ConfigParser
from StringIO import StringIO
from git import Repo, GitCommandError
import yaml
from yaml.parser import ParserError
CHANGE_LOG_PATH = 'profiles/sandbox_changelog.yaml'
SANDBOX_PATH = 'profiles/sandbox.cfg'
EA_VERSION = 'ea2-'
LOGGER = logging.getLogger(__name__)
LOG_HANDLER = logging.StreamHandler()
LOG_FORMATTER = logging.Formatter('%(levelname)s - %(message)s')
LOG_HANDLER.setFormatter(LOG_FORMATTER)
LOGGER.addHandler(LOG_HANDLER)
LOGGER.setLevel(logging.INFO)
ORIGIN_PWD = os.getcwd()
PATTERN_COMMIT_MESSAGE_REGEX = re.compile(r'[^a-zA-Z0-9/\n _]')
PATTERN_EA_VERSION = re.compile(r'({})'.format(EA_VERSION))
PATTERN_EA_VERSION_VALUE = re.compile(r'{}(.*)'.format(EA_VERSION))
PATTERN_REVISION_REGEX = re.compile(r'(rev=\w+)')
def get_current_rev(current_rev_str):
"""
Get revision from string
:Example:
>>> get_current_rev('doc rev=12345')
'12345'
:param current_rev_str: whole string of git url info
:type str:
:rparam rev: found revision
:rtype rev: str
"""
rev = PATTERN_REVISION_REGEX.search(current_rev_str)
return rev.group().split('=')[1]
def format_commits(commits):
"""
Format commit representation
:param commits: iterable of commit objects
:type commits: ABC Iterable[Commit]
:rparam commits: formated iterable of commit objects
:rtype commits: ABC Iterable[Commit]
"""
formated_commits = []
formated_commit = {}
for commit in commits:
formated_commit['commit'] = commit.hexsha.encode()
formated_commit['author'] = commit.author.name.encode()
formated_commit['date'] = commit.authored_datetime.strftime("%Y-%m-%d %H:%M:%S").encode()
try:
message = PATTERN_COMMIT_MESSAGE_REGEX.sub('', commit.message.encode())
except UnicodeEncodeError:
message = ''
message = [x.strip() for x in message.split('\n') if x]
formated_commit['massage'] = message
formated_commits.append(deepcopy(formated_commit))
return formated_commits if formated_commits else None
def get_git_info(package):
"""
Change directory where package located and get info
:param package: package info
:type package: tuple
:rparam package: git info
:rtype package: tuple
"""
package_name = package[0]
context = {}
commits = []
i = 0
os.chdir(ORIGIN_PWD + '/src/' + package_name)
repo = Repo('')
head = repo.head.object.hexsha
rev = get_current_rev(package[1])
for i, commit in enumerate(repo.iter_commits()):
if commit.hexsha == rev:
break
commits.append(commit)
context['changes'] = format_commits(commits)
LOGGER.info("In package '%s' is %d commits differents", package_name, i)
return (context, head)
def inspect_packages(sandbox):
"""
Create mapping package name to changes(commis) in this packages
and create mapping package name to git HEAD
:param sandbox: it is config object from loading file config (*.cfg)
:type sandbox: ConfigParser
:rparam : mapping package name to changes in this packages
:rtype git_info: ABC MutableMapping
:rparam : mapping package name to git HEAD
:rtype new_heads: ABC MutableMapping
"""
all_packages = sandbox.items('sources')
git_info = {}
new_heads = {}
for package in all_packages:
try:
git_info[package[0]], head = get_git_info(package)
new_heads[package[0]] = head
except OSError:
continue
os.chdir(ORIGIN_PWD)
return git_info, new_heads
def get_config(filename):
"""
Get config object from file
:param filename: config file
:type filename: file
:rparam config: it is config object from loading file config (*.cfg)
:rtype config: ConfigParser
"""
config = ConfigParser()
config.read('profiles/{}.cfg'.format(filename))
return config
def get_content(sandbox):
"""
Get content from config, replace some errors
:param sandox: it is config from loading file config (*.cfg)
:type sandox: ConfigParser
:rparam content: correct content of config file
:rtype conent: str
"""
buff = StringIO()
sandbox.write(buff)
content = buff.getvalue().replace('+ =', '+=').replace(' \n', '\n')
buff.close()
return content
def ver_incrementer(ver_chain, pos=-1):
"""
Version incrementer
It is complex function
:Example:
>>> ver_incrementer('1.2.3')
['1','2', '4']
>>> ver_incrementer('1.2.99')
['1','3', '0']
:param ver_chain: iterable of split value of version
:type ver_chain: ABC Iterable
:param pos: position what is need in call of function
:type pos: int
"""
if abs(pos) > len(ver_chain):
return ver_chain.insert(0, 1)
ver_chain[pos] = int(ver_chain[pos]) + 1
if len(str(ver_chain[pos])) < 3:
return [str(x) for x in ver_chain]
ver_chain[pos] = 0
ver_incrementer(ver_chain, pos-1)
if pos == -1:
return [str(x) for x in ver_chain]
return None
def sort_func(version):
"""
Sort func for versions
:Example:
>>> sort_func('1.2.3')
[1, 2, 3]
>>> sort_func('1.2.q')
ValueError: invalid literal for int() with base 10: 'q'
:param version: version of git repository
:type version: str
:rparam split_version: split version and transform to int
:rparam split_version: ABC Iterable
"""
return map(int, version.split('.'))
def get_last_version(tags):
"""
Get last version of git repository where script run
Check if version is correct relatively to version pattern
:param tags: list of git tags
:type version: str
"""
tags = [x for x in tags if PATTERN_EA_VERSION.search(x)]
if not tags:
return None
versions = [PATTERN_EA_VERSION_VALUE.search(x).groups()[0] for x in tags]
if not versions:
return None
try:
for i, version in enumerate(versions):
map(int, version.split('.'))
versions.sort(key=sort_func)
except ValueError:
LOGGER.warning("Invalid version style in versions, correct example"
" 0.0.7 please fix tag: %s",
versions[i])
sys.exit(0)
return versions[-1]
def get_new_version(repo):
"""
Create new version of git repository where script run
:param repo: instance of repository where was script run
:type repo: git.Repo
:rparam version: new version of git repository
:rtype version: str
"""
version = ''
last_version = ''
start_version = '2.5.1'
if repo.tags:
last_version = get_last_version([x.name for x in repo.tags])
if not last_version:
version = '{}{}'.format(EA_VERSION, start_version)
else:
version = ver_incrementer(last_version.split('.'))
version = '{}{}'.format(EA_VERSION, '.'.join(version))
return version
def get_change_log(file_handler):
"""
Get change log file
:param file_handler: change log file
:type file_handler: file
:rparam change_log: mapping of existing change log
:type change_log: ABC MutableMapping
"""
change_log = yaml.load(file_handler)
file_handler.seek(0)
if not change_log:
change_log = {}
return change_log
def get_change_log_header():
"""
Create and return changes log header
:rparam header: header fir change log file
:type header: str
"""
time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
user = getpass.getuser()
header = "# Autogenerated at ({time}) by {user}\n"
return header.format(time=time, user=user)
def make_change_log_greate_again(change_log, git_info, new_version):
"""
Set correct change log without duplication and emply changes
"""
changes = {package: changes for package, changes in git_info.items() if changes['changes']}
if not changes:
return (change_log, False)
change_log['{}'.format(new_version)] = changes
return (change_log, True)
def save_git_info(git_info, new_version):
"""
Save git info(changes) to change log file
:param git_info: is mapping name packages to changes(list of commits)
:type git_info: ABC MutableMapping
:param new_version: new version from which will be created tag
:type new_version: str
"""
mode = 'r+' if os.path.exists(CHANGE_LOG_PATH) else 'w+'
try:
with open(CHANGE_LOG_PATH, mode) as file_handler:
change_log = get_change_log(file_handler)
if new_version in change_log:
LOGGER.warning("The version '%s' already exist in change log"
" file '%s'", new_version, CHANGE_LOG_PATH)
sys.exit(0)
change_log, change = make_change_log_greate_again(change_log,
git_info,
new_version)
if not change:
LOGGER.info("There were no changes in packages")
sys.exit(0)
file_handler.write(get_change_log_header())
yaml.dump(change_log, file_handler, default_flow_style=False)
except ParserError:
LOGGER.warning("Error then parse yaml file '%s'", CHANGE_LOG_PATH)
sys.exit(1)
def undo_git_changes(repo, new_version, forse=None):
"""
Undo git changes
Reset created commit, delete tags and force push
:param repo: instance of repository where was script run
:type repo: git.Repo
:param new_version: new version from which will be created tag
:type new_version: str
:param forse: boolean if True make forse push
:type forse: bool
"""
try:
LOGGER.info("Reset created commit")
repo.head.reset('HEAD~1', index=True, working_tree=True)
LOGGER.info("Commit: was reseted")
LOGGER.info("Delete: created tag '%s'", new_version)
repo.delete_tag(new_version)
LOGGER.info("Tag: was deleted")
if forse:
LOGGER.info("Forse pushing for removing created "
"commit in remote repo")
repo.git.push(force=True)
LOGGER.info("Removed created commit in remote repo")
except GitCommandError:
pass
def git_push_obj(verbose, remote, obj=None):
"""
Push given object to git
:param obj: object wich will be pushed to git
:type obj: any one implements the interface of pushing to git
:rparam boolean: If success return True
"""
try:
LOGGER.info("%s: pushing to remote - %s", verbose, remote.name)
if not obj:
remote.push()
else:
remote.push(obj)
LOGGER.info("%s: successfully pushed", verbose)
except GitCommandError as err:
LOGGER.warning("%s: was not pushed because %s", verbose,
err.stderr.split(':')[-1])
return None
return True
def ask_permission(question):
"""
Asking for permisson, print prompt question
y,yes: mean confirm
n,not: mean reject
"""
while True:
said = raw_input("{} (y or n): ".format(question))
if said in ("yes", "y"):
return True
if said in ("not", "n"):
return False
print("Please choose (y or n)")
def pin_indeed(repo, new_version):
"""
Pin version
Add file, create commit, create tag, push commit and tag
:param repo: instance of repository where was script run
:type repo: git.Repo
:param new_version: new version from which will be created tag
:type new_version: str
"""
repo.index.add([CHANGE_LOG_PATH])
repo.index.add([SANDBOX_PATH])
try:
if ask_permission("Do you want to create commit?"):
commit = repo.index.commit(message='Pin new version: {}'.format(new_version))
LOGGER.info("Commit: was successfully created")
if ask_permission("Do you want to create tag '{}'?".format(new_version)):
repo.create_tag(new_version, ref=commit, message="sandbox versions update")
LOGGER.info("Tag: '%s' was successfully created", new_version)
except GitCommandError as err:
LOGGER.warning("%s", err.stderr.strip())
return None
return True
def save_new_rew(sandbox):
"""
Save new revision to file
:param sandox: it is config from loading file config (*.cfg)
:type sandox: ConfigParser
| |
import argparse
import numpy
import numpy.random
import mir3.data.linear_decomposition as ld
import mir3.data.metadata as md
import mir3.data.spectrogram as spectrogram
import mir3.module
# TODO: maybe split this into 2 modules to compute activation and
# basis+activation
class BetaNMF(mir3.module.Module):
def get_help(self):
return """use beta nmf algorithm to compute the activations"""
def build_arguments(self, parser):
parser.add_argument('-b','--beta', type=float, default=2., help="""beta
value to be used by the algorithm (default:
%(default)s)""")
parser.add_argument('-i','--max-iterations', type=int, default=100,
help="""maximum number of iterations""")
parser.add_argument('-d','--min-delta', type=float, default=0.,
help="""minimum difference between iterations to
consider convergence""")
parser.add_argument('-B','--basis', type=argparse.FileType('rb'),
help="""basis file to be used""")
parser.add_argument('-s','--size', nargs=3, metavar=('SIZE',
'INSTRUMENT', 'NOTE'), help="""size of the
decomposition and instrument and note names to be
used for the basis. 'INSTRUMENT' or 'NOTE' can be
set to 'None' or 'null' to ignore that parameter""")
parser.add_argument('piece', nargs='+', help="""piece spectrogram
file""")
parser.add_argument('outfile', type=argparse.FileType('wb'),
help="""linear decomposition file""")
def run(self, args):
# Loads basis if present
if args.basis is not None:
b = ld.LinearDecomposition().load(args.basis)
else:
b = None
if args.basis is not None and b.data.right != {}:
print "Basis doesn't have empty right side. Ignoring it."
# Size of the decomposition (used when finding a basis too)
if args.size is None:
args.size = [None, None, None] # Simulate 3 values
for i in range(len(args.size)):
if args.size[i] == 'None' or args.size[i] == 'null':
args.size[i] = None
# Gather input spectrograms
s_list = []
s_meta = []
for filename in args.piece:
with open(filename, 'rb') as handler:
s_list.append(spectrogram.Spectrogram().load(handler))
s_meta.append(md.FileMetadata(handler))
# Converts arguments
size = int(args.size[0]) if args.size[0] is not None else None
instrument = args.size[1] if args.size[1] is not None else ''
note = args.size[2] if args.size[2] is not None else ''
# Decompose
d = self.compute(s_list,
size,
instrument,
note,
b,
args.beta,
args.min_delta,
args.max_iterations,
False)
# Associates an activation metadata with its given spectrogram's
# metadata
for k, data, metadata in d.right():
metadata.spectrogram_input = s_meta[k[-1]]
# Checks if basis was provided
if b is not None:
# If provided, adds it as basis metadata for each activation
meta = md.FileMetadata(args.basis)
for k, data, metadata in d.right():
metadata.basis_input = meta
else:
# Otherwise, the basis was computed right now, so we set its
# metadata with the list of all spectrograms' metadata
d.metadata.left[(args.size[1], args.size[2])].spectrogram_input = \
s_meta
d.save(args.outfile)
def compute(self, spectrograms, size=None, instrument=None, note=None,
basis=None, beta=2., min_delta=0., max_iterations=100,
save_metadata=True):
"""Computes the activation matrix from a basis matrix and a spectrogram.
Uses the beta divergence to compute the activations.
If min_delta is zero, the code may run faster because no beta divergence
is actually computed. Otherwise, the code stops computing if two
iterations of the algorithm don't improve the result by more than
min_delta.
Only one of 'basis' and 'size' arguments may be set, as they specify
different things. With 'size', the user extracts both a basis and an
activation from the spectrogram, while with 'basis' only an activation
is computed.
Each activation computed has the same key as the corresponding basis
plus the spectrogram's index in the list provided.
If a basis is being created, it's name is a tuple of (instrument, note),
even if they are None.
Args:
spectrograms: list of Spectrograms to be merged and used to compute
the activations.
size: Number of basis to extract from the spectrogram. Must be None
if the 'basis' argument is defined.
instrument: Name of the instrument. This is used only if size is
set. If None, it's ignored. Default: None.
note: Name of the note. This is used only if size is set. If None,
it's ignored. Default: None.
basis: LinearDecomposition object describing the basis to be used.
Must be none if the 'size' argument is defined.
beta: value for the beta used in divergence. Default: 2.
min_delta: threshold for early stop. Default: 0.
max_iterations: maximum number of iterations to use. Default: 100.
save_metadata: flag indicating whether the metadata should be
computed. Default: True.
Returns:
LinearDecomposition object with basis and activations for the
spectrograms.
Raises:
ValueError: matrices have incompatible sizes.
"""
# Check arguments compatibility
if size is None and basis is None:
raise ValueError("One of 'size' or 'basis' must not be None.")
if basis is not None and size is not None:
raise ValueError("Only one of 'size' or 'basis' must not be None.")
# Saves metadata
if save_metadata:
s_meta = [md.ObjectMetadata(s) for s in spectrograms]
else:
s_meta = [None for s in spectrograms]
# Marks the limits of each spectrogram
X_start = [0]
for s in spectrograms:
X_start.append(X_start[-1]+s.data.shape[1])
# Merges spectrograms
X = numpy.hstack([s.data for s in spectrograms])
# If we have a basis, we only need to compute the activations
if basis is not None:
# Merges basis but keep track where each one starts so that it can
# be used to characterize the activations
B = []
B_start = [0]
for k, data, metadata in basis.left():
B.append(data)
B_start.append(B_start[-1]+data.shape[1])
B = numpy.hstack(B)
# Saves metadata
if save_metadata:
b_meta = md.ObjectMetadata(B)
else:
b_meta = None
# Initilizes activations
A = numpy.ones((B.shape[1], X.shape[1]))
# Computes the activation
self.compute_activation(X, B, A, beta, min_delta, max_iterations)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Copy the left stuff from the basis, since they came from there
d.data.left = basis.data.left
d.metadata.left = basis.metadata.left
# Cuts the activation. For each combination of basis and
# spectrograms, we get an activation
i = 0
for k, data, metadata in basis.left():
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its
# sequence number
s_name = (j,)
# Cuts the activation
A_cut = A[B_start[i]:B_start[i+1], X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a
# key for the activation. Then stores a lot of metadata
# about what was used to compute it.
d.add(k+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata,
basis_input=b_meta,
basis=metadata))
# Increase basis iterator
i += 1
else:
# Everyone gets the same matrices to work with every time, so we
# avoid consistency problems. However, we can't have the same values
# filling the matrices or the algorithm can't separate the basis and
# activations (everyone keeps getting the same value).
numpy.random.seed(0)
B = numpy.random.rand(X.shape[0], size)
A = numpy.random.rand(size, X.shape[1])
# Computes both basis and activations
self.compute_both(X, B, A, beta, min_delta, max_iterations)
# Key for the basis created
key = (instrument, note)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Adds basis
d.add(key,
left=B,
left_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta,
spectrogram=[s.metadata for s in spectrograms]))
# Adds the activations cutted to match the spectrograms
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its sequence
# number
s = spectrograms[j]
s_name = (j,)
# Cuts the activation
A_cut = A[:, X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a key
# for the activation. Then stores a lot of metadata about what
# was used to compute it.
d.add(key+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata))
return d
def compute_both(self, X, B, A, beta=2., min_delta=0., max_iterations=100):
"""Computes both the basis and activation.
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: 100;
Raises:
ValueError: matrices have incompatible sizes.
"""
# Checks shapes match
if X.shape[0] != B.shape[0] or X.shape[1] != A.shape[1]:
raise ValueError("Incompatible matrix sizes: %r = %r * %r." %
(X.shape, B.shape, A.shape))
# Makes decomposition
self.beta_nmf(1e-6+X, # Avoids near-zero values
B,
A,
beta=beta,
update_B=True,
update_A=True,
min_delta=min_delta,
max_iterations=max_iterations)
def compute_activation(self, X, B, A, beta=2., min_delta=0.,
max_iterations=100):
"""Computes both the activation for a given basis.
Args:
X: matrix to be approximated.
B: basis to be used.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: | |
= []
for node in nodes_list:
inputs_list.append(ProActiveKernel.__extract_task_inputs_from_graph_data__(node, edges_list))
return inputs_list
def __import_dot__(self, input_data):
if os.path.isfile(input_data['path']):
Gtmp = pgv.AGraph(input_data['path'])
nodes = Gtmp.nodes()
edges = Gtmp.edges()
inputs_data = ProActiveKernel.__extract_tasks_inputs_from_graph__(nodes, edges)
for temp_input_data in inputs_data:
self.__create_task__(temp_input_data)
else:
raise ConfigError(input_data['path'] + ': No such file.\n')
return 0
def __create_export_xml__(self, input_data):
self.__kernel_print_ok_message__('Exporting the job workflow (xml format) ...\n')
title = self.__get_saving_file_name__(input_data)
filename = './' + title + '.xml'
self.gateway.saveJob2XML(self.proactive_job, filename, debug=False)
self.__kernel_print_ok_message__('\'' + title + '.xml\' file created.\n')
return 0
def __connect__(self, input_data):
if self.proactive_connected:
self.__kernel_print_ok_message__('WARNING: Proactive is already connected.\n')
self.__kernel_print_ok_message__('Disconnecting from server: ' + self.gateway.base_url + ' ...\n')
self.gateway.disconnect()
self.gateway.terminate()
self.proactive_connected = False
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
try:
# raise Exception(self.config)
self.proactive_config = cp.ConfigParser()
self.proactive_config.read(input_data['path'])
if 'host' in self.proactive_config['proactive_server']:
proactive_host = self.proactive_config['proactive_server']['host']
proactive_port = self.proactive_config['proactive_server']['port']
proactive_url = "http://" + proactive_host + ":" + proactive_port
self.proactive_config['proactive_server']['url'] = proactive_url
elif 'url' in self.proactive_config['proactive_server']:
proactive_url = self.proactive_config['proactive_server']['url']
else:
raise ConfigError('Activeeon server host and url not found in the config file.')
self.gateway = proactive.ProActiveGateway(proactive_url)
self.gateway.connect(username=self.proactive_config['user']['login'],
password=self.proactive_config['user']['password'])
self.__kernel_print_ok_message__('Connecting to server ...\n')
assert self.gateway.isConnected() is True
self.__kernel_print_ok_message__('Connected as \'' + self.proactive_config['user']['login']
+ '\'!\n')
self.proactive_connected = True
self.proactive_default_connection = False
return 0
except AssertionError as ae:
raise AssertionError(ae)
except Exception as e:
raise ConfigError(str(e))
else:
raise ConfigError(input_data['path'] + ': No such file.\n')
if 'host' in input_data:
self.proactive_config['proactive_server']['host'] = input_data['host']
if 'port' in input_data:
self.proactive_config['proactive_server']['port'] = input_data['port']
else:
self.proactive_config['proactive_server']['port'] = '8080'
proactive_url = "http://" + self.proactive_config['proactive_server']['host'] + ":" + \
self.proactive_config['proactive_server']['port']
self.proactive_default_connection = False
elif 'port' in input_data:
self.proactive_config['proactive_server']['port'] = input_data['port']
self.proactive_config['proactive_server']['host'] = 'try.activeeon.com'
proactive_url = "http://" + self.proactive_config['proactive_server']['host'] + ":" + \
self.proactive_config['proactive_server']['port']
self.proactive_default_connection = False
elif 'url' in input_data:
proactive_url = input_data['url']
self.proactive_default_connection = False
else:
self.proactive_config['proactive_server']['host'] = 'try.activeeon.com'
self.proactive_config['proactive_server']['port'] = '8080'
proactive_url = "http://" + self.proactive_config['proactive_server']['host'] + ":" + \
self.proactive_config['proactive_server']['port']
self.proactive_default_connection = True
self.proactive_config['proactive_server']['url'] = proactive_url
self.gateway = proactive.ProActiveGateway(proactive_url)
if 'login' not in input_data:
input_data['login'] = self.raw_input("Login: ")
if 'password' not in input_data:
input_data['password'] = self.getpass("Password: ")
self.__kernel_print_ok_message__('Connecting to server ...\n')
self.gateway.connect(username=input_data['login'], password=input_data['password'])
assert self.gateway.isConnected() is True
self.__kernel_print_ok_message__('Connected as \'' + input_data['login'] + '\'!\n')
self.proactive_connected = True
return 0
def __print_usage_from_pragma__(self, pragma):
trigger = pragma.strip(" #%)").split('(', 1)[0].strip(" ")
self.__kernel_print_error_message({'ename': 'Usages', 'evalue': '\n' + get_usage(trigger)})
def __help__(self, input_data):
if 'pragma' in input_data:
self.__kernel_print_ok_message__(get_help(input_data['pragma']))
else:
# TODO: automatize the help output and relate it more to pragma.py
self.__kernel_print_ok_message__('\n#%connect(): connects to an ActiveEon server\n'
+ '#%import(): import specified libraries to all tasks of a same script language\n'
+ '#%configure(): configures the ProActive kernel\'s behavior\n'
+ '#%task(): creates/modifies a task\n'
+ '#%delete_task(): removes a task from the workflow\n'
+ "#%pre_script(): sets the pre-script of a task\n"
+ "#%post_script(): sets the post-script of a task\n"
+ '#%selection_script(): sets the selection script of a task\n'
+ '#%job_selection_script(): sets the default selection script of a job\n'
+ '#%fork_env(): sets the fork environment script\n'
+ '#%job_fork_env(): sets the default fork environment of a job\n'
+ '#%split(): creates/modifies a splitting task of a replicate control\n'
+ '#%runs(): creates/modifies the configuration script of a replicate control\n'
+ '#%process(): creates/modifies the script of a replicated processing task\n'
+ '#%merge(): creates/modifies a merging task of a replicate control\n'
+ '#%start(): creates/modifies a start task of a loop control\n'
+ '#%loop(): creates/modifies a loop task of a loop control\n'
+ '#%condition(): creates/modifies the condition script of a branch/loop control\n'
+ '#%branch(): creates/modifies a branch task of a branching control\n'
+ '#%if(): creates/modifies an if task of a branching control\n'
+ '#%else(): creates/modifies an else task of a branching control\n'
+ '#%continuation(): creates/modifies a continuation task of a branching control\n'
+ '#%job(): creates/renames the job\n'
+ '#%draw_job(): plots the workflow\n'
+ '#%write_dot(): writes the workflow in .dot format\n'
+ '#%import_dot(): imports the workflow from a .dot file\n'
+ '#%submit_job(): submits the job to the scheduler\n'
+ '#%get_job_result(): gets and prints the job results\n'
+ '#%get_task_result(): gets and prints the results of a given task\n'
+ '#%print_job_output(): gets and prints the job outputs\n'
+ '#%print_task_output(): gets and prints the outputs of a given task\n'
+ '#%list_submitted_jobs(): gets and prints the ids and names of the submitted jobs\n'
+ '#%export_xml(): exports the workflow in .xml format\n'
+ '#%show_resource_manager(): opens the ActiveEon resource manager portal\n'
+ '#%show_scheduling_portal(): opens the ActiveEon scheduling portal\n'
+ '#%show_workflow_automation(): opens the ActiveEon workflow automation portal\n\n'
+ 'To know the usage of a pragma use: #%help(pragma=PRAGMA_NAME)\n\n'
+ 'For more information, please check: https://github.com/ow2-proactive/'
'proactive-jupyter-kernel/blob/master/README.md\n')
def __import__(self, input_data):
# TODO: should we update old tasks to add new added imports?
if 'language' in input_data:
if input_data['language'] in self.proactive_script_languages:
self.__kernel_print_ok_message__('Saving \'' + input_data['language'] + '\' imports ...\n')
self.imports[input_data['language']] = input_data['code']
else:
raise ParameterError('Language \'' + input_data['language'] +
'\' not supported!\n Supported Languages:\n' + self.script_languages)
else:
self.__kernel_print_ok_message__('Saving \'Python\' imports ...\n')
self.imports['Python'] = input_data['code']
self.__kernel_print_ok_message__('Saved.\n')
def __create_pre_script_from_task__(self, input_data):
if input_data['language'] in self.proactive_script_languages:
pre_script = self.gateway.createPreScript(self.proactive_script_languages[input_data['language']])
else:
raise ParameterError('Language \'' + input_data['language'] +
'\' not supported!\n Supported Languages:\n' + self.script_languages)
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
pre_script.setImplementationFromFile(input_data['path'])
if input_data['code'] != '':
self.__kernel_print_ok_message__('WARNING: The written code is ignored.\n')
else:
raise Exception('The file \'' + input_data['path'] + '\' does not exist')
else:
pre_script.setImplementation(input_data['code'])
input_data['task'].setPreScript(pre_script)
def __create_pre_script_from_name__(self, input_data):
for value in self.proactive_tasks:
if value.getTaskName() == input_data['name']:
self.__kernel_print_ok_message__('Adding a pre-script to the proactive task ...\n')
input_data['task'] = value
self.__create_pre_script_from_task__(input_data)
self.__kernel_print_ok_message__('Pre-script added to \'' + input_data['name'] + '\'.\n')
self.job_up_to_date = False
return 0
raise Exception('The task named \'' + input_data['name'] + '\' does not exist.')
def __create_post_script_from_task__(self, input_data):
if input_data['language'] in self.proactive_script_languages:
post_script = self.gateway.createPostScript(self.proactive_script_languages[input_data['language']])
else:
raise ParameterError('Language \'' + input_data['language'] +
'\' not supported!\n Supported Languages:\n' + self.script_languages)
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
post_script.setImplementationFromFile(input_data['path'])
if input_data['code'] != '':
self.__kernel_print_ok_message__('WARNING: The written code is ignored.\n')
else:
raise Exception('The file \'' + input_data['path'] + '\' does not exist')
else:
post_script.setImplementation(input_data['code'])
input_data['task'].setPostScript(post_script)
def __create_post_script_from_name__(self, input_data):
for value in self.proactive_tasks:
if value.getTaskName() == input_data['name']:
self.__kernel_print_ok_message__('Adding a post-script to the proactive task ...\n')
input_data['task'] = value
self.__create_post_script_from_task__(input_data)
self.__kernel_print_ok_message__('Post-script added to \'' + input_data['name'] + '\'.\n')
self.job_up_to_date = False
return 0
raise Exception('The task named \'' + input_data['name'] + '\' does not exist.')
def __create_selection_script_from_task__(self, input_data):
# TODO: add different script language handling
proactive_selection_script = self.gateway.createDefaultSelectionScript()
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
proactive_selection_script.setImplementationFromFile(input_data['path'])
if input_data['code'] != '':
self.__kernel_print_ok_message__('WARNING: The written code is ignored.\n')
else:
raise Exception('The file \'' + input_data['path'] + '\' does not exist')
else:
proactive_selection_script.setImplementation(input_data['code'])
input_data['task'].setSelectionScript(proactive_selection_script)
def __create_selection_script_from_name__(self, input_data):
for task in self.proactive_tasks:
if task.getTaskName() == input_data['name']:
self.__kernel_print_ok_message__('Adding a selection script to the proactive task ...\n')
input_data['task'] = task
self.__create_selection_script_from_task__(input_data)
self.__kernel_print_ok_message__('Selection script added to \'' + input_data['name'] + '\'.\n')
self.job_up_to_date = False
return 0
raise Exception('The task named \'' + input_data['name'] + '\' does not exist.')
def __create_job_selection_script__(self, input_data):
# TODO: add different script language handling
proactive_selection_script = self.gateway.createDefaultSelectionScript()
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
proactive_selection_script.setImplementationFromFile(input_data['path'])
if input_data['code'] != '':
self.__kernel_print_ok_message__('WARNING: The written code is ignored.\n')
else:
raise Exception('The file \'' + input_data['path'] + '\' does not exist')
else:
proactive_selection_script.setImplementation(input_data['code'])
self.__kernel_print_ok_message__('Saving selection script ...\n')
self.default_selection_script = proactive_selection_script
if 'force' in input_data and input_data['force'] == 'on':
self.__kernel_print_ok_message__('Updating created tasks ...\n')
for task in self.proactive_tasks:
self.__kernel_print_ok_message__('Setting the selection script of the task \'' + task.getTaskName()
+ '\' ...\n')
task.setSelectionScript(self.default_selection_script)
self.job_up_to_date = False
self.__kernel_print_ok_message__('Done.\n')
def __create_fork_environment_from_task__(self, input_data):
# TODO: add different script language handling
proactive_fork_env = self.gateway.createDefaultForkEnvironment()
if 'path' in input_data:
exists = os.path.isfile(input_data['path'])
if exists:
proactive_fork_env.setImplementationFromFile(input_data['path'])
if input_data['code'] != '':
self.__kernel_print_ok_message__('WARNING: The written code is ignored.\n')
else:
raise Exception('The file \'' + input_data['path'] + '\' does not exist')
else:
proactive_fork_env.setImplementation(input_data['code'])
input_data['task'].setForkEnvironment(proactive_fork_env)
def __create_fork_environment_from_name__(self, input_data):
for task in self.proactive_tasks:
if task.getTaskName() == input_data['name']:
self.__kernel_print_ok_message__('Adding a fork environment to the proactive task ...\n')
input_data['task'] = task
self.__create_fork_environment_from_task__(input_data)
self.__kernel_print_ok_message__('Fork environment added to \'' + input_data['name'] + '\'.\n')
self.job_up_to_date = False
return 0
raise Exception('The task named \'' + input_data['name'] + '\' does not exist.')
def __create_job_fork_environment__(self, input_data):
# TODO: add different script language handling
proactive_fork_env = self.gateway.createDefaultForkEnvironment()
if 'path' in input_data:
| |
probabilities," or just "posteriors," which are the probabilities we are looking to compute using the "priors".
#
# Let us implement the Bayes Theorem from scratch using a simple example. Let's say we are trying to find the odds of an individual having diabetes, given that he or she was tested for it and got a positive result.
# In the medical field, such probabilities play a very important role as they often deal with life and death situations.
#
# We assume the following:
#
# `P(D)` is the probability of a person having Diabetes. Its value is `0.01`, or in other words, 1% of the general population has diabetes (disclaimer: these values are assumptions and are not reflective of any actual medical study).
#
# `P(Pos)` is the probability of getting a positive test result.
#
# `P(Neg)` is the probability of getting a negative test result.
#
# `P(Pos|D)` is the probability of getting a positive result on a test done for detecting diabetes, given that you have diabetes. This has a value `0.9`. In other words the test is correct 90% of the time. This is also called the Sensitivity or True Positive Rate.
#
# `P(Neg|~D)` is the probability of getting a negative result on a test done for detecting diabetes, given that you do not have diabetes. This also has a value of `0.9` and is therefore correct, 90% of the time. This is also called the Specificity or True Negative Rate.
#
# The Bayes formula is as follows:
#
# <img src="images/bayes_formula.png" height="242" width="242">
#
# * `P(A)` is the prior probability of A occurring independently. In our example this is `P(D)`. This value is given to us.
#
# * `P(B)` is the prior probability of B occurring independently. In our example this is `P(Pos)`.
#
# * `P(A|B)` is the posterior probability that A occurs given B. In our example this is `P(D|Pos)`. That is, **the probability of an individual having diabetes, given that this individual got a positive test result. This is the value that we are looking to calculate.**
#
# * `P(B|A)` is the prior probability of B occurring, given A. In our example this is `P(Pos|D)`. This value is given to us.
# Putting our values into the formula for Bayes theorem we get:
#
# `P(D|Pos) = P(D) * P(Pos|D) / P(Pos)`
#
# The probability of getting a positive test result `P(Pos)` can be calculated using the Sensitivity and Specificity as follows:
#
# `P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]`
# In[17]:
'''
Instructions:
Calculate probability of getting a positive test result, P(Pos)
'''
# In[18]:
'''
Solution (skeleton code will be provided)
'''
# P(D)
p_diabetes = 0.01
# P(~D)
p_no_diabetes = 0.99
# Sensitivity or P(Pos|D)
p_pos_diabetes = 0.9
# Specificity or P(Neg|~D)
p_neg_no_diabetes = 0.9
# P(Pos)
p_pos = (p_diabetes*p_pos_diabetes)+(p_no_diabetes*(1-p_neg_no_diabetes))# TODO
print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos))
# **Using all of this information we can calculate our posteriors as follows:**
#
# The probability of an individual having diabetes, given that, that individual got a positive test result:
#
# `P(D|Pos) = (P(D) * Sensitivity)) / P(Pos)`
#
# The probability of an individual not having diabetes, given that, that individual got a positive test result:
#
# `P(~D|Pos) = (P(~D) * (1-Specificity)) / P(Pos)`
#
# The sum of our posteriors will always equal `1`.
# In[19]:
'''
Instructions:
Compute the probability of an individual having diabetes, given that, that individual got a positive test result.
In other words, compute P(D|Pos).
The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)
'''
# In[20]:
'''
Solution
'''
# P(D|Pos)
p_diabetes_pos = ((p_diabetes*p_pos_diabetes))/p_pos# TODO
print('Probability of an individual having diabetes, given that that individual got a positive test result is:',format(p_diabetes_pos))
# In[21]:
'''
Instructions:
Compute the probability of an individual not having diabetes, given that, that individual got a positive test result.
In other words, compute P(~D|Pos).
The formula is: P(~D|Pos) = P(~D) * P(Pos|~D) / P(Pos)
Note that P(Pos|~D) can be computed as 1 - P(Neg|~D).
Therefore:
P(Pos|~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1
'''
# In[22]:
'''
Solution
'''
# P(Pos|~D)
p_pos_no_diabetes = 0.1
# P(~D|Pos)
p_no_diabetes_pos = (p_no_diabetes*p_pos_no_diabetes)/p_pos# TODO
print('Probability of an individual not having diabetes, given that that individual got a positive test result is:',p_no_diabetes_pos)
# Congratulations! You have implemented Bayes Theorem from scratch. Your analysis shows that even if you get a positive test result, there is only an 8.3% chance that you actually have diabetes and a 91.67% chance that you do not have diabetes. This is of course assuming that only 1% of the entire population has diabetes which is only an assumption.
# **What does the term 'Naive' in 'Naive Bayes' mean ?**
#
# The term 'Naive' in Naive Bayes comes from the fact that the algorithm considers the features that it is using to make the predictions to be independent of each other, which may not always be the case. So in our Diabetes example, we are considering only one feature, that is the test result. Say we added another feature, 'exercise'. Let's say this feature has a binary value of `0` and `1`, where the former signifies that the individual exercises less than or equal to 2 days a week and the latter signifies that the individual exercises greater than or equal to 3 days a week. If we had to use both of these features, namely the test result and the value of the 'exercise' feature, to compute our final probabilities, Bayes' theorem would fail. Naive Bayes' is an extension of Bayes' theorem that assumes that all the features are independent of each other.
# ### Step 4.2: Naive Bayes implementation from scratch ###
#
#
# Now that you have understood the ins and outs of Bayes Theorem, we will extend it to consider cases where we have more than one feature.
#
# Let's say that we have two political parties' candidates, '<NAME>' of the Green Party and '<NAME>' of the Libertarian Party and we have the probabilities of each of these candidates saying the words 'freedom', 'immigration' and 'environment' when they give a speech:
#
# * Probability that <NAME> says 'freedom': 0.1 ---------> `P(F|J)`
# * Probability that Jill Stein says 'immigration': 0.1 -----> `P(I|J)`
# * Probability that Jill Stein says 'environment': 0.8 -----> `P(E|J)`
#
#
# * Probability that <NAME> says 'freedom': 0.7 -------> `P(F|G)`
# * Probability that <NAME> says 'immigration': 0.2 ---> `P(I|G)`
# * Probability that Gary Johnson says 'environment': 0.1 ---> `P(E|G)`
#
#
# And let us also assume that the probability of Jill Stein giving a speech, `P(J)` is `0.5` and the same for Gary Johnson, `P(G) = 0.5`.
#
#
# Given this, what if we had to find the probabilities of Jill Stein saying the words 'freedom' and 'immigration'? This is where the Naive Bayes' theorem comes into play as we are considering two features, 'freedom' and 'immigration'.
#
# Now we are at a place where we can define the formula for the Naive Bayes' theorem:
#
# <img src="images/naivebayes.png" height="342" width="342">
#
# Here, `y` is the class variable (in our case the name of the candidate) and `x1` through `xn` are the feature vectors (in our case the individual words). The theorem makes the assumption that each of the feature vectors or words (`xi`) are independent of each other.
# To break this down, we have to compute the following posterior probabilities:
#
# * `P(J|F,I)`: Given the words 'freedom' and 'immigration' were said, what's the probability they were said by Jill?
#
# Using the formula and our knowledge of Bayes' theorem, we can compute this as follows: `P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`. Here `P(F,I)` is the probability of the words 'freedom' and 'immigration' being said in a speech.
#
#
# * `P(G|F,I)`: Given the words 'freedom' and 'immigration' were said, what's the probability they were said by Gary?
#
# Using the formula, we can compute this as follows: `P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)`
# In[23]:
'''
Instructions: Compute the probability of | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 21:37:40 2021
@author: <NAME>
"""
# Problem 7 - Regularized Linear Regression
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.style as style
style.use('bmh')
train = pd.read_csv("D:\\AI\\lfd\\final\\features.train.txt", names = ['digit', 'intensity', 'symmetry'], sep='\s+', dtype=np.float64)
TRAIN_DATA = np.loadtxt("D:\\AI\\lfd\\final\\features.train.txt")
# training data ignoring 0-th column which contains the digit labels
X_TRAIN = TRAIN_DATA[:,1:]
# 0-th column contains digit labels
DIGIT_LABELS_TRAIN = TRAIN_DATA[:, 0]
# number of training points
N_TRAIN = X_TRAIN.shape[0]
def get_labels(x, digit_labels):
'''
- Takes integer 'x' (digit 0-9)
- Takes labels
- Returns new labels
where the label is 1 for digit == x, and -1 otherwise.
'''
y = []
for d in digit_labels:
if d == x:
y.append(1)
else:
y.append(-1)
return np.array(y)
# We will use regularization with weight decay (see slide 11-13 of lecture 12).
def linear_regression_reg(Z, y, lambda_param):
'''
- Takes feature matrix Z with rows of the form (1, z1, z2, ..., zd)
- Takes labels y
- Takes lambda parameter lambda_param
- returns weight vector w_tilde_reg
'''
num_columns_Z = Z.shape[1]
# see lecture 12, slide 11
Z_dagger_reg = np.dot(np.linalg.inv(np.dot(Z.T, Z) + lambda_param * np.identity(num_columns_Z)), Z.T)
# Use linear regression to get weight vector
w_tilde_reg = np.dot(Z_dagger_reg, y)
return w_tilde_reg
# Compute the in-sample error E_in
def problem7():
Z_TRAIN = np.c_[np.ones(N_TRAIN), X_TRAIN]
digit_list = [5,6,7,8,9]
E_in_list = []
lambda_value = 1
min_E_in = np.inf
min_digit = None
for digit in digit_list:
y = get_labels(digit, DIGIT_LABELS_TRAIN)
w_tilde_reg = linear_regression_reg(Z_TRAIN, y, lambda_value)
predicted_y = np.sign(np.dot(Z_TRAIN, w_tilde_reg))
E_in = sum(predicted_y != y) / N_TRAIN
E_in_list.append(E_in)
if E_in < min_E_in:
min_E_in = E_in
min_digit = digit
print("\nQ7\nThe lowest in-sample error: \nE_in = {:0.5f}, \nis achieved for the {}-vs-all classifier".format(min_E_in, min_digit))
plt.plot(digit_list, E_in_list, 'ro-')
plt.ylabel("in-sample error $E_{in}$")
plt.xlabel("digit")
plt.title("HW 9 - Problem 7")
plt.show()
problem7()
# ANSWER = 7[d]
# Q8
# Reading in data with np.loadtxt instead of Pandas
TEST_DATA = np.loadtxt('D:\\AI\\lfd\\final\\features.test.txt')
# test data ignoring 0-th column which contains the digit labels
X_TEST = TEST_DATA[:, 1:]
# 0-th column contains digit labels
DIGIT_LABELS_TEST = TEST_DATA[:, 0]
# number of test points
N_TEST = X_TEST.shape[0]
# 1. We first have to apply the transform
# z = (1, x_1, x_2, x_1 x_2, x_1^2, x_2^2) for our feature matrix Z.
# 2. This time we consider the digits in [0,1,2,3,4].
# 3. We compute the out-of sample error E_{out}.
# Compute the out-of-sample error E_out
def problem8():
x1 = X_TRAIN[:,0]
x2 = X_TRAIN[:,1]
Z_TRAIN = np.c_[np.ones(N_TRAIN), x1, x2, x1*x2, x1*x1, x2*x2]
#------------
x1 = X_TEST[:,0]
x2 = X_TEST[:,1]
Z_TEST = np.c_[np.ones(N_TEST), x1, x2, x1*x2, x1*x1, x2*x2]
#------------
digit_list_2 = [0,1,2,3,4]
E_out_list = []
lambda_value = 1
#------------
min_E_out = np.inf
min_digit = None
for digit in digit_list_2:
# train with training data!
y_train = get_labels(digit, DIGIT_LABELS_TRAIN)
w_tilde_reg = linear_regression_reg(Z_TRAIN, y_train, lambda_value)
# compute E_out by using test data!
predicted_y_test = np.sign(np.dot(Z_TEST, w_tilde_reg))
y_test = get_labels(digit, DIGIT_LABELS_TEST)
E_out = sum(predicted_y_test != y_test) / N_TEST
E_out_list.append(E_out)
if E_out < min_E_out:
min_E_out = E_out
min_digit = digit
print("\nQ8\nThe lowest out-of-sample error: \nE_out = {:0.5f}, \nis achieved for the {}-vs-all classifier".format(min_E_out, min_digit))
plt.plot(digit_list_2, E_out_list, 'bo-')
plt.ylabel("out-of-sample error $E_{out}$")
plt.xlabel("digit")
plt.title("HW 9 - Problem 8")
plt.show()
problem8()
# ANSWER = 8[b]
# Q9
# We first compare $E_{out}$ without transform versus E_{out} with transform.
# Compute the in-sample error E_out
def problem9_E_out_without_transform():
x1 = X_TRAIN[:,0]
x2 = X_TRAIN[:,1]
Z_TRAIN = np.c_[np.ones(N_TRAIN), x1, x2]
#------------
x1 = X_TEST[:,0]
x2 = X_TEST[:,1]
Z_TEST = np.c_[np.ones(N_TEST), x1, x2]
#------------
digit_list_all = list(range(10))
E_out_list_all_without_transform = []
E_in_list_all_without_transform = []
lambda_value = 1
#------------
min_E_out = np.inf
min_digit = None
for digit in digit_list_all:
# train with training data!
y_train = get_labels(digit, DIGIT_LABELS_TRAIN)
w_tilde_reg = linear_regression_reg(Z_TRAIN, y_train, lambda_value)
# compute E_in
predicted_y_train = np.sign(np.dot(Z_TRAIN, w_tilde_reg))
E_in = sum(predicted_y_train != y_train) / N_TRAIN
E_in_list_all_without_transform.append(E_in)
# compute E_out by using test data!
predicted_y_test = np.sign(np.dot(Z_TEST, w_tilde_reg))
y_test = get_labels(digit, DIGIT_LABELS_TEST)
E_out = sum(predicted_y_test != y_test) / N_TEST
E_out_list_all_without_transform.append(E_out)
if E_out < min_E_out:
min_E_out = E_out
min_digit = digit
print("\nQ9\nThe lowest out-of-sample error: \nE_out = {:0.5f}, \nis achieved for the {}-vs-all classifier".format(min_E_out, min_digit))
plt.plot(digit_list_all, E_in_list_all_without_transform, 'ro-', label='E_in without transform')
plt.plot(digit_list_all, E_out_list_all_without_transform, 'bo-', label='E_out without transform')
plt.ylabel("out-of-sample error $E_{out}$")
plt.xlabel("digit")
plt.title("HW 9 - Problem 9 \n$E_{out}$ without transform ")
plt.legend()
plt.show()
return E_in_list_all_without_transform, E_out_list_all_without_transform
E_in_list_all_without_transform, E_out_list_all_without_transform = problem9_E_out_without_transform()
# Compute the in-sample error E_out
def problem9_E_out_with_transform():
x1 = X_TRAIN[:,0]
x2 = X_TRAIN[:,1]
Z_TRAIN = np.c_[np.ones(N_TRAIN), x1, x2, x1*x2, x1*x1, x2*x2]
#------------
x1 = X_TEST[:,0]
x2 = X_TEST[:,1]
Z_TEST = np.c_[np.ones(N_TEST), x1, x2, x1*x2, x1*x1, x2*x2]
#------------
digit_list_all = list(range(10))
E_out_list_all_with_transform = []
E_in_list_all_with_transform = []
lambda_value = 1
#------------
min_E_out = np.inf
min_digit = None
for digit in digit_list_all:
# train with training data!
y_train = get_labels(digit, DIGIT_LABELS_TRAIN)
w_tilde_reg = linear_regression_reg(Z_TRAIN, y_train, lambda_value)
#print(w_tilde_reg.shape)
# compute E_in
predicted_y_train = np.sign(np.dot(Z_TRAIN, w_tilde_reg))
E_in = sum(predicted_y_train != y_train) / N_TRAIN
E_in_list_all_with_transform.append(E_in)
# compute E_out by using test data!
predicted_y_test = np.sign(np.dot(Z_TEST, w_tilde_reg))
y_test = get_labels(digit, DIGIT_LABELS_TEST)
E_out = sum(predicted_y_test != y_test) / N_TEST
E_out_list_all_with_transform.append(E_out)
if E_out < min_E_out:
min_E_out = E_out
min_digit = digit
print("\nQ9\nThe lowest out-of-sample error: \nE_out = {:0.5f}, \nis achieved for the {}-vs-all classifier".format(min_E_out, min_digit))
plt.plot(digit_list_all, E_in_list_all_with_transform, 'ro-', label='E_in with transform')
plt.plot(digit_list_all, E_out_list_all_with_transform, 'bo-', label='E_out with transform')
plt.ylabel("out-of-sample error $E_{out}$")
plt.xlabel("digit")
plt.title("HW 9 - Problem 9 \n$E_{out}$ with transform ")
plt.legend()
plt.show()
return E_in_list_all_with_transform, E_out_list_all_with_transform
E_in_list_all_with_transform, E_out_list_all_with_transform = problem9_E_out_with_transform()
def problem9_comparison_E_out_with_and_without_transform():
digit_list_all = list(range(10))
plt.plot(digit_list_all, E_out_list_all_without_transform, 'go-', label='E_out without transform')
plt.plot(digit_list_all, E_out_list_all_with_transform, 'mo-', label='E_out with transform')
plt.ylabel("out-of-sample error $E_{out}$")
plt.xlabel("digit")
plt.title("HW 9 - Problem 9 \nComparing $E_{out}$ with and without transform ")
plt.legend()
plt.show()
print("Let's plot the difference between the E_out values without and with transform:")
difference_E_out_with_without_transform = np.array(E_out_list_all_without_transform) - np.array(E_out_list_all_with_transform)
plt.plot(digit_list_all, difference_E_out_with_without_transform, '-ro')
plt.xlabel("digit")
plt.ylabel("$E_{out}$(without) - $E_{out}$(with)")
plt.title("difference $E_{out}$ without and with transform")
plt.show()
print("difference: E_out without - E_out with transform:")
for digit, E_out in zip(digit_list_all, difference_E_out_with_without_transform):
print("digit = {} => difference = {}".format(digit, E_out))
problem9_comparison_E_out_with_and_without_transform()
"""
Exploring option 9[b], 9[c] and 9[d]
Conclusion: The out-of-sample error $E_{out}$ is smaller with a transform for digits 0, 1, 5. For the other digits the performance with and without transform is equal. We can exclude options 9[b], 9[c] and 9[d].
Exploring option 9[e]
For digit 5 there is an improvement of the out-of-sample error when using the transform. Let's check if the transform improves the out-of-sample performance by at least $5\%$ for 5-vs-all
"""
print("ratio E_out_with / E_out_without = ", E_out_list_all_with_transform[5] / E_out_list_all_without_transform[5])
print("\nE_out_with <= 0.95 E_out_without ?", E_out_list_all_with_transform[5] <= 0.95 * E_out_list_all_without_transform[5])
"""
Exploring option 9[a]
Let's examine if overfitting occurs. First, let's plot how $E_{in}$ behaves with and without transform. Let's think about what we expect.
$E_{in}$ should go down if we use a more complex model, i.e. if we use the feature transform $\mathbf{z} = (1, x_1, x_2, x_1 x_2, x_1^2, x_2^2)$ which is more complex than the feature transform $\mathbf{z} = (1, x_1, x_2)$, then $E_{in}$ should go down.
The question is what happens with $E_{out}$ when we go from the less complex to the more complex model. For overfitting to occur $E_{out}$ should then go up.
"""
"""
Conclusion: We cannot observe overfitting, i.e. we cannot observe that when $E_{in}$ goes down, then $E_{out}$ goes up.
"""
# Q10
"""
Problem 10
We train the 1-vs-5 classifier with $\mathbf{z} = (1, x_1, x_2, x_1 x_2, x_1^2, x_2^2)$ for $\lambda = 0.01$ and $\lambda = 1$.
"""
df_train = pd.read_csv('D:\\AI\\lfd\\final\\features.train.txt', names = ['digit', 'intensity', 'symmetry'], sep='\s+', dtype=np.float64)
# Consider 1 vs 5 classifier
# We choose only rows with digits == 1 or digits == 5
df_train[df_train['digit'] == 1]
df_train[df_train['digit'] == 5]
# Append a column y with labels y=+1 for digit==1
ones = df_train[df_train['digit'] == 1].assign(y = np.ones(df_train[df_train['digit'] == 1].shape[0]))
# Append a column y with labels y=-1 for digit==5
# https://chrisalbon.com/python/pandas_assign_new_column_dataframe.html
fives = df_train[df_train['digit'] == 5].assign(y = -np.ones(df_train[df_train['digit'] == 5].shape[0]))
# Glue together the dataframes for ones and fives
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.append.html
df_1_vs_5 = ones.append(fives, ignore_index=True)
# Training data
X_train_1_vs_5 = np.c_[df_1_vs_5['intensity'], df_1_vs_5['symmetry']]
# labels
y_train_1_vs_5 = np.array(df_1_vs_5['y'])
N_train_1_vs_5 = X_train_1_vs_5.shape[0]
# Let's first read in the test set
df_test = pd.read_csv('D:\\AI\\lfd\\final\\features.test.txt', names = ['digit', 'intensity', 'symmetry'], sep='\s+', dtype=np.float64)
# Consider test set for 1-vs-5 classifier
# Append a column y with labels y=+1 for digit==1
ones_test = df_test[df_test['digit'] == 1].assign(y = | |
from __future__ import annotations
import logging
import os
import uuid
import pytest
from dask.sizeof import sizeof
from distributed.compatibility import WINDOWS
from distributed.protocol import serialize_bytelist
from distributed.spill import SpillBuffer, has_zict_210, has_zict_220
from distributed.utils_test import captured_logger
requires_zict_210 = pytest.mark.skipif(
not has_zict_210,
reason="requires zict version >= 2.1.0",
)
requires_zict_220 = pytest.mark.skipif(
not has_zict_220,
reason="requires zict version >= 2.2.0",
)
def psize(*objs) -> tuple[int, int]:
return (
sum(sizeof(o) for o in objs),
sum(len(frame) for obj in objs for frame in serialize_bytelist(obj)),
)
def assert_buf(buf: SpillBuffer, expect_fast: dict, expect_slow: dict) -> None:
# assertions on fast
assert dict(buf.fast) == expect_fast
assert buf.fast.weights == {k: sizeof(v) for k, v in expect_fast.items()}
assert buf.fast.total_weight == sum(sizeof(v) for v in expect_fast.values())
for k, v in buf.fast.items():
assert buf[k] is v
# assertions on slow
assert set(buf.slow) == expect_slow.keys()
slow = buf.slow.data if has_zict_220 else buf.slow # type: ignore
assert slow.weight_by_key == {k: psize(v) for k, v in expect_slow.items()}
total_weight = psize(*expect_slow.values())
assert slow.total_weight == total_weight
assert buf.spilled_total == total_weight
def test_spillbuffer(tmpdir):
buf = SpillBuffer(str(tmpdir), target=300)
# Convenience aliases
assert buf.memory is buf.fast
assert buf.disk is buf.slow
assert_buf(buf, {}, {})
a, b, c, d = "a" * 100, "b" * 99, "c" * 98, "d" * 97
# Test assumption made by this test, mostly for non CPython implementations
assert 100 < sizeof(a) < 200
assert psize(a)[0] != psize(a)[1]
buf["a"] = a
assert_buf(buf, {"a": a}, {})
assert buf["a"] == a
buf["b"] = b
assert_buf(buf, {"a": a, "b": b}, {})
buf["c"] = c
assert_buf(buf, {"b": b, "c": c}, {"a": a})
assert buf["a"] == a
assert_buf(buf, {"a": a, "c": c}, {"b": b})
buf["d"] = d
assert_buf(buf, {"a": a, "d": d}, {"b": b, "c": c})
# Deleting an in-memory key does not automatically move spilled keys back to memory
del buf["a"]
assert_buf(buf, {"d": d}, {"b": b, "c": c})
with pytest.raises(KeyError):
buf["a"]
# Deleting a spilled key updates the metadata
del buf["b"]
assert_buf(buf, {"d": d}, {"c": c})
with pytest.raises(KeyError):
buf["b"]
# Updating a spilled key moves it to the top of the LRU and to memory
c2 = c * 2
buf["c"] = c2
assert_buf(buf, {"c": c2}, {"d": d})
# Single key is larger than target and goes directly into slow
e = "e" * 500
buf["e"] = e
assert_buf(buf, {"c": c2}, {"d": d, "e": e})
# Updating a spilled key with another larger than target updates slow directly
d = "d" * 500
buf["d"] = d
assert_buf(buf, {"c": c2}, {"d": d, "e": e})
@requires_zict_210
def test_spillbuffer_maxlim(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=600, min_log_interval=0)
a, b, c, d, e = "a" * 200, "b" * 100, "c" * 99, "d" * 199, "e" * 98
# size of a is bigger than target and is smaller than max_spill;
# key should be in slow
buf["a"] = a
assert_buf(buf, {}, {"a": a})
assert buf["a"] == a
# size of b is smaller than target key should be in fast
buf["b"] = b
assert_buf(buf, {"b": b}, {"a": a})
# size of c is smaller than target but b+c > target, c should stay in fast and b
# move to slow since the max_spill limit has not been reached yet
buf["c"] = c
assert_buf(buf, {"c": c}, {"a": a, "b": b})
# size of e < target but e+c > target, this will trigger movement of c to slow
# but the max spill limit prevents it. Resulting in e remaining in fast
with captured_logger(logging.getLogger("distributed.spill")) as logs_e:
buf["e"] = e
assert "disk reached capacity" in logs_e.getvalue()
assert_buf(buf, {"c": c, "e": e}, {"a": a, "b": b})
# size of d > target, d should go to slow but slow reached the max_spill limit then
# d will end up on fast with c (which can't be move to slow because it won't fit
# either)
with captured_logger(logging.getLogger("distributed.spill")) as logs_d:
buf["d"] = d
assert "disk reached capacity" in logs_d.getvalue()
assert_buf(buf, {"c": c, "d": d, "e": e}, {"a": a, "b": b})
# Overwrite a key that was in slow, but the size of the new key is larger than
# max_spill
a_large = "a" * 500
assert psize(a_large)[1] > 600 # size of max_spill
with captured_logger(logging.getLogger("distributed.spill")) as logs_alarge:
buf["a"] = a_large
assert "disk reached capacity" in logs_alarge.getvalue()
assert_buf(buf, {"a": a_large, "d": d, "e": e}, {"b": b, "c": c})
# Overwrite a key that was in fast, but the size of the new key is larger than
# max_spill
d_large = "d" * 501
with captured_logger(logging.getLogger("distributed.spill")) as logs_dlarge:
buf["d"] = d_large
assert "disk reached capacity" in logs_dlarge.getvalue()
assert_buf(buf, {"a": a_large, "d": d_large, "e": e}, {"b": b, "c": c})
class MyError(Exception):
pass
class Bad:
def __init__(self, size):
self.size = size
def __getstate__(self):
raise MyError()
def __sizeof__(self):
return self.size
@requires_zict_210
def test_spillbuffer_fail_to_serialize(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=600, min_log_interval=0)
# bad data individually larger than spill threshold target 200
a = Bad(size=201)
# Exception caught in the worker
with pytest.raises(TypeError, match="Could not serialize"):
with captured_logger(logging.getLogger("distributed.spill")) as logs_bad_key:
buf["a"] = a
# spill.py must remain silent because we're already logging in worker.py
assert not logs_bad_key.getvalue()
assert_buf(buf, {}, {})
b = Bad(size=100) # this is small enough to fit in memory/fast
buf["b"] = b
assert_buf(buf, {"b": b}, {})
c = "c" * 100
with captured_logger(logging.getLogger("distributed.spill")) as logs_bad_key_mem:
# This will go to fast and try to kick b out,
# but keep b in fast since it's not pickable
buf["c"] = c
# worker.py won't intercept the exception here, so spill.py must dump the traceback
logs_value = logs_bad_key_mem.getvalue()
assert "Failed to pickle" in logs_value # from distributed.spill
assert "Traceback" in logs_value # from distributed.spill
assert_buf(buf, {"b": b, "c": c}, {})
@requires_zict_210
@pytest.mark.skipif(WINDOWS, reason="Needs chmod")
def test_spillbuffer_oserror(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=800, min_log_interval=0)
a, b, c, d = (
"a" * 200,
"b" * 100,
"c" * 201,
"d" * 101,
)
# let's have something in fast and something in slow
buf["a"] = a
buf["b"] = b
assert_buf(buf, {"b": b}, {"a": a})
# modify permissions of disk to be read only.
# This causes writes to raise OSError, just like in case of disk full.
os.chmod(tmpdir, 0o555)
# Add key > than target
with captured_logger(logging.getLogger("distributed.spill")) as logs_oserror_slow:
buf["c"] = c
assert "Spill to disk failed" in logs_oserror_slow.getvalue()
assert_buf(buf, {"b": b, "c": c}, {"a": a})
del buf["c"]
assert_buf(buf, {"b": b}, {"a": a})
# add key to fast which is smaller than target but when added it triggers spill,
# which triggers OSError
with captured_logger(logging.getLogger("distributed.spill")) as logs_oserror_evict:
buf["d"] = d
assert "Spill to disk failed" in logs_oserror_evict.getvalue()
assert_buf(buf, {"b": b, "d": d}, {"a": a})
@requires_zict_210
def test_spillbuffer_evict(tmpdir):
buf = SpillBuffer(str(tmpdir), target=300, min_log_interval=0)
bad = Bad(size=100)
a = "a" * 100
buf["a"] = a
assert_buf(buf, {"a": a}, {})
# successful eviction
weight = buf.evict()
assert weight == sizeof(a)
assert_buf(buf, {}, {"a": a})
buf["bad"] = bad
assert_buf(buf, {"bad": bad}, {"a": a})
# unsuccessful eviction
with captured_logger(logging.getLogger("distributed.spill")) as logs_evict_key:
weight = buf.evict()
assert weight == -1
assert "Failed to pickle" in logs_evict_key.getvalue()
# bad keys stays in fast
assert_buf(buf, {"bad": bad}, {"a": a})
class NoWeakRef:
"""A class which
1. reports an arbitrary managed memory usage
2. does not support being targeted by weakref.ref()
3. has a property `id` which changes every time it is unpickled
"""
__slots__ = ("size", "id")
def __init__(self, size):
self.size = size
self.id = uuid.uuid4()
def __sizeof__(self):
return self.size
def __reduce__(self):
return (type(self), (self.size,))
class SupportsWeakRef(NoWeakRef):
__slots__ = ("__weakref__",)
@pytest.mark.parametrize(
"cls,expect_cached",
[
(SupportsWeakRef, has_zict_220),
(NoWeakRef, False),
],
)
@pytest.mark.parametrize("size", [60, 110])
def test_weakref_cache(tmpdir, cls, expect_cached, size):
buf = SpillBuffer(str(tmpdir), target=100)
# Run this test twice:
# - x is smaller than target and is evicted by y;
# - x is individually larger than target and it never touches fast
x = cls(size)
buf["x"] = x
if size < 100:
buf["y"] = cls(60) # spill x
assert "x" in buf.slow
# Test that we update the weakref cache on setitem
assert (buf["x"] is x) == expect_cached
# Do not use id_x = id(x), as in CPython id's are C memory addresses and are reused
# by PyMalloc when you descope | |
<gh_stars>0
#!/usr/bin/env/python
"""
Usage:
chem_tensorflow_sparse.py [options]
Options:
-h --help Show this screen.
--config-file FILE Hyperparameter configuration file path (in JSON format).
--config CONFIG Hyperparameter configuration dictionary (in JSON format).
--log_dir DIR Log dir name.
--data_dir DIR Data dir name.
--restore FILE File to restore weights from.
--freeze-graph-model Freeze weights of graph model components.
--evaluate example evaluation mode using a restored model
"""
from typing import List, Tuple, Dict, Sequence, Any
from docopt import docopt
from collections import defaultdict, namedtuple
import numpy as np
import tensorflow as tf
import sys, traceback
import pdb
import json
import math
from chem_tensorflow import ChemModel
from utils import glorot_init, SMALL_NUMBER
from sklearn.metrics import confusion_matrix
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
GGNNWeights = namedtuple('GGNNWeights', ['edge_weights',
'edge_biases',
'edge_type_attention_weights',
'rnn_cells',])
class SparseGGNNChemModel(ChemModel):
def __init__(self, args):
super().__init__(args)
@classmethod
def default_params(cls):
params = dict(super().default_params())
params.update({
'batch_size': 100000,
'use_edge_bias': False,
'use_propagation_attention': False,
'use_edge_msg_avg_aggregation': True,
#'residual_connections': { # For layer i, specify list of layers whose output is added as an input
# "2": [0],
# "4": [0, 2]
# },
'residual_connections': {},
#'layer_timesteps': [2, 2, 1, 2, 1], # number of layers & propagation steps per layer
'layer_timesteps': [5],
'graph_rnn_cell': 'GRU', # GRU, CudnnCompatibleGRUCell, or RNN
'graph_rnn_activation': 'tanh', # tanh, ReLU
'graph_state_dropout_keep_prob': 1.,
'task_sample_ratios': {},
'edge_weight_dropout_keep_prob': .8
})
return params
def prepare_specific_graph_model(self) -> None:
h_dim = self.params['hidden_size']
self.placeholders['initial_node_representation'] = tf.placeholder(tf.float32, [None, h_dim],
name='node_features')
self.placeholders['adjacency_lists'] = [tf.placeholder(tf.int32, [None, 2], name='adjacency_e%s' % e)
for e in range(self.num_edge_types)]
self.placeholders['num_incoming_edges_per_type'] = tf.placeholder(tf.float32, [None, self.num_edge_types],
name='num_incoming_edges_per_type')
self.placeholders['graph_nodes_list'] = tf.placeholder(tf.int32, [None], name='graph_nodes_list')
self.placeholders['graph_state_keep_prob'] = tf.placeholder(tf.float32, None, name='graph_state_keep_prob')
self.placeholders['edge_weight_dropout_keep_prob'] = tf.placeholder(tf.float32, None, name='edge_weight_dropout_keep_prob')
activation_name = self.params['graph_rnn_activation'].lower()
if activation_name == 'tanh':
activation_fun = tf.nn.tanh
elif activation_name == 'relu':
activation_fun = tf.nn.relu
else:
raise Exception("Unknown activation function type '%s'." % activation_name)
# Generate per-layer values for edge weights, biases and gated units:
self.weights = {} # Used by super-class to place generic things
self.gnn_weights = GGNNWeights([], [], [], [])
for layer_idx in range(len(self.params['layer_timesteps'])):
with tf.variable_scope('gnn_layer_%i' % layer_idx):
edge_weights = tf.Variable(glorot_init([self.num_edge_types * h_dim, h_dim]),
name='gnn_edge_weights_%i' % layer_idx)
edge_weights = tf.reshape(edge_weights, [self.num_edge_types, h_dim, h_dim])
edge_weights = tf.nn.dropout(edge_weights, keep_prob=self.placeholders['edge_weight_dropout_keep_prob'])
self.gnn_weights.edge_weights.append(edge_weights)
if self.params['use_propagation_attention']:
self.gnn_weights.edge_type_attention_weights.append(tf.Variable(np.ones([self.num_edge_types], dtype=np.float32),
name='edge_type_attention_weights_%i' % layer_idx))
if self.params['use_edge_bias']:
self.gnn_weights.edge_biases.append(tf.Variable(np.zeros([self.num_edge_types, h_dim], dtype=np.float32),
name='gnn_edge_biases_%i' % layer_idx))
cell_type = self.params['graph_rnn_cell'].lower()
if cell_type == 'gru':
cell = tf.nn.rnn_cell.GRUCell(h_dim, activation=activation_fun)
elif cell_type == 'cudnncompatiblegrucell':
assert(activation_name == 'tanh')
import tensorflow.contrib.cudnn_rnn as cudnn_rnn
cell = cudnn_rnn.CudnnCompatibleGRUCell(h_dim)
elif cell_type == 'rnn':
cell = tf.nn.rnn_cell.BasicRNNCell(h_dim, activation=activation_fun)
else:
raise Exception("Unknown RNN cell type '%s'." % cell_type)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
state_keep_prob=self.placeholders['graph_state_keep_prob'])
self.gnn_weights.rnn_cells.append(cell)
def compute_final_node_representations(self) -> tf.Tensor:
node_states_per_layer = [] # one entry per layer (final state of that layer), shape: number of nodes in batch v x D
node_states_per_layer.append(self.placeholders['initial_node_representation'])
num_nodes = tf.shape(self.placeholders['initial_node_representation'], out_type=tf.int32)[0]
message_targets = [] # list of tensors of message targets of shape [E]
message_edge_types = [] # list of tensors of edge type of shape [E]
for edge_type_idx, adjacency_list_for_edge_type in enumerate(self.placeholders['adjacency_lists']):
edge_targets = adjacency_list_for_edge_type[:, 1]
message_targets.append(edge_targets)
message_edge_types.append(tf.ones_like(edge_targets, dtype=tf.int32) * edge_type_idx)
message_targets = tf.concat(message_targets, axis=0) # Shape [M]
message_edge_types = tf.concat(message_edge_types, axis=0) # Shape [M]
for (layer_idx, num_timesteps) in enumerate(self.params['layer_timesteps']):
with tf.variable_scope('gnn_layer_%i' % layer_idx):
# Used shape abbreviations:
# V ~ number of nodes
# D ~ state dimension
# E ~ number of edges of current type
# M ~ number of messages (sum of all E)
# Extract residual messages, if any:
layer_residual_connections = self.params['residual_connections'].get(str(layer_idx))
if layer_residual_connections is None:
layer_residual_states = []
else:
layer_residual_states = [node_states_per_layer[residual_layer_idx]
for residual_layer_idx in layer_residual_connections]
if self.params['use_propagation_attention']:
message_edge_type_factors = tf.nn.embedding_lookup(params=self.gnn_weights.edge_type_attention_weights[layer_idx],
ids=message_edge_types) # Shape [M]
# Record new states for this layer. Initialised to last state, but will be updated below:
node_states_per_layer.append(node_states_per_layer[-1])
for step in range(num_timesteps):
with tf.variable_scope('timestep_%i' % step):
messages = [] # list of tensors of messages of shape [E, D]
message_source_states = [] # list of tensors of edge source states of shape [E, D]
# Collect incoming messages per edge type
for edge_type_idx, adjacency_list_for_edge_type in enumerate(self.placeholders['adjacency_lists']):
edge_sources = adjacency_list_for_edge_type[:, 0]
edge_source_states = tf.nn.embedding_lookup(params=node_states_per_layer[-1],
ids=edge_sources) # Shape [E, D]
all_messages_for_edge_type = tf.matmul(edge_source_states,
self.gnn_weights.edge_weights[layer_idx][edge_type_idx]) # Shape [E, D]
messages.append(all_messages_for_edge_type)
message_source_states.append(edge_source_states)
messages = tf.concat(messages, axis=0) # Shape [M, D]
if self.params['use_propagation_attention']:
message_source_states = tf.concat(message_source_states, axis=0) # Shape [M, D]
message_target_states = tf.nn.embedding_lookup(params=node_states_per_layer[-1],
ids=message_targets) # Shape [M, D]
message_attention_scores = tf.einsum('mi,mi->m', message_source_states, message_target_states) # Shape [M]
message_attention_scores = message_attention_scores * message_edge_type_factors
# The following is softmax-ing over the incoming messages per node.
# As the number of incoming varies, we can't just use tf.softmax. Reimplement with logsumexp trick:
# Step (1): Obtain shift constant as max of messages going into a node
message_attention_score_max_per_target = tf.unsorted_segment_max(data=message_attention_scores,
segment_ids=message_targets,
num_segments=num_nodes) # Shape [V]
# Step (2): Distribute max out to the corresponding messages again, and shift scores:
message_attention_score_max_per_message = tf.gather(params=message_attention_score_max_per_target,
indices=message_targets) # Shape [M]
message_attention_scores -= message_attention_score_max_per_message
# Step (3): Exp, sum up per target, compute exp(score) / exp(sum) as attention prob:
message_attention_scores_exped = tf.exp(message_attention_scores) # Shape [M]
message_attention_score_sum_per_target = tf.unsorted_segment_sum(data=message_attention_scores_exped,
segment_ids=message_targets,
num_segments=num_nodes) # Shape [V]
message_attention_normalisation_sum_per_message = tf.gather(params=message_attention_score_sum_per_target,
indices=message_targets) # Shape [M]
message_attention = message_attention_scores_exped / (message_attention_normalisation_sum_per_message + SMALL_NUMBER) # Shape [M]
# Step (4): Weigh messages using the attention prob:
messages = messages * tf.expand_dims(message_attention, -1)
incoming_messages = tf.unsorted_segment_sum(data=messages,
segment_ids=message_targets,
num_segments=num_nodes) # Shape [V, D]
if self.params['use_edge_bias']:
incoming_messages += tf.matmul(self.placeholders['num_incoming_edges_per_type'],
self.gnn_weights.edge_biases[layer_idx]) # Shape [V, D]
if self.params['use_edge_msg_avg_aggregation']:
num_incoming_edges = tf.reduce_sum(self.placeholders['num_incoming_edges_per_type'],
keep_dims=True, axis=-1) # Shape [V, 1]
incoming_messages /= num_incoming_edges + SMALL_NUMBER
incoming_information = tf.concat(layer_residual_states + [incoming_messages],
axis=-1) # Shape [V, D*(1 + num of residual connections)]
# pass updated vertex features into RNN cell
node_states_per_layer[-1] = self.gnn_weights.rnn_cells[layer_idx](incoming_information,
node_states_per_layer[-1])[1] # Shape [V, D]
return node_states_per_layer[-1]
def gated_regression(self, last_h, regression_gate, regression_transform):
# last_h: [v x h]
gate_input = tf.concat([last_h, self.placeholders['initial_node_representation']], axis=-1) # [v x 2h]
gated_outputs = tf.nn.sigmoid(regression_gate(gate_input)) * regression_transform(last_h) # [v x 1]
# Sum up all nodes per-graph
graph_representations = tf.unsorted_segment_sum(data=gated_outputs,
segment_ids=self.placeholders['graph_nodes_list'],
num_segments=self.placeholders['num_graphs']) # [g x 1]
output = tf.squeeze(graph_representations) # [g]
self.output = output
return output
def classification_task(self, last_h, classification_gate, classification_transform):
# last_h: [v x h]
gate_input = tf.concat([last_h, self.placeholders['initial_node_representation']], axis=-1) # [v x 2h]
gated_outputs = tf.nn.sigmoid(classification_gate(gate_input)) * classification_transform(last_h) # [v x 2]
# Sum up all nodes per-graph
graph_representations = tf.unsorted_segment_sum(data=gated_outputs,
#graph_representations = tf.unsorted_segment_mean(data=_output,
segment_ids=self.placeholders['graph_nodes_list'],
num_segments=self.placeholders['num_graphs']) # [g x 2]
output = graph_representations
self.output = output
self.node_outputs = gated_outputs
return output
def classification_task_graphb4classify(self, last_h, classification_layer):
_input = last_h # [v x h]
graph_representations = tf.unsorted_segment_mean(data=_input,
segment_ids=self.placeholders['graph_nodes_list'],
num_segments=self.placeholders['num_graphs']) # [g x h]
output = classification_layer(graph_representations) # [g x 2]
self.output = output
return output
def classification_task_org(self, last_h, classification_layer):
_input = last_h # [v x h]
_output = classification_layer(_input) # [v x 2]
# Sum up all nodes per-graph
#graph_representations = tf.unsorted_segment_sum(data=_output,
graph_representations = tf.unsorted_segment_mean(data=_output,
segment_ids=self.placeholders['graph_nodes_list'],
num_segments=self.placeholders['num_graphs']) # [g x 2]
output = graph_representations
self.output = output
return output
def classification_task_1(self, last_h):
# convert last hidden layer of size [v x h] to [v, 2]
# last_h: [v x h]
num_input_units = last_h.shape[0] * last_h.shape[1]
num_output_units = 2
num_hidden_units = 500
last_h = tf.reshape(last_h, [-1, num_input_units])
_input = last_h
weights = {
'hidden': tf.Variable(tf.random_normal([input_num_units, hidden_num_units], seed=0)),
'output': tf.Variable(tf.random_normal([hidden_num_units, output_num_units], seed=0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([hidden_num_units], seed=0)),
'output': tf.Variable(tf.random_normal([output_num_units], seed=0))
}
hidden_layer = tf.add(tf.matmul(_input, weights['hidden']), biases['hidden'])
hidden_layer = tf.nn.relu(hidden_layer)
output_layer = tf.add(tf.matmul(hidden_layer, weights['output']), biases['output'])
#_input = tf.concat([last_h, self.placeholders['initial_node_representation']], axis=-1) # [v x 2h]
self.output = output
return output
# ----- Data preprocessing and chunking into minibatches:
#def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool) -> Any:
def process_raw_graphs(self, raw_data, is_training_data):
for d in raw_data:
(adjacency_lists, num_incoming_edge_per_type) = self.__graph_to_adjacency_lists(d['graph'])
processed_graph = ({"adjacency_lists": adjacency_lists,
"num_incoming_edge_per_type": num_incoming_edge_per_type,
"init": d["node_features"],
"labels": [d["targets"][task_id][0] for task_id in self.params['task_ids']]})
yield processed_graph
#if is_training_data:
# np.random.shuffle(processed_graphs)
# for task_id in self.params['task_ids']:
# task_sample_ratio = self.params['task_sample_ratios'].get(str(task_id))
# if task_sample_ratio is not None:
# ex_to_sample = int(len(processed_graphs) * task_sample_ratio)
# for ex_id in range(ex_to_sample, len(processed_graphs)):
# processed_graphs[ex_id]['labels'][task_id] = None
def __graph_to_adjacency_lists(self, graph) -> Tuple[Dict[int, np.ndarray], Dict[int, Dict[int, int]]]:
adj_lists = defaultdict(list)
num_incoming_edges_dicts_per_type = defaultdict(lambda: defaultdict(lambda: 0))
for src, e, dest in graph:
fwd_edge_type = e - 1 # Make edges start from 0
adj_lists[fwd_edge_type].append((src, dest))
num_incoming_edges_dicts_per_type[fwd_edge_type][dest] += 1
if self.params['tie_fwd_bkwd']:
adj_lists[fwd_edge_type].append((dest, src))
num_incoming_edges_dicts_per_type[fwd_edge_type][src] += 1
final_adj_lists = {e: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.