input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
for sum_ambiguous_key in sum_ambiguous_keys: # for different comparison types
sum_ambiguous = results[sum_ambiguous_key]
for sum_ambiguous_type, val in sum_ambiguous.items(): # for different features
logger_utils.log_metric("{}".format(sum_ambiguous_type), val)
# 3) summaries
# ------------------------------------------------------------
if self._summarize:
# ---- Test VC Summaries
summary = tf.Summary()
if self.test_vc_mode():
summary = self._add_test_vc_summaries(self._vc_overlap, batch)
self._writer.add_summary(summary, batch)
# If also test Interest Filter, see vc core olap (the SCAE before Interest Filter and pooling, norming etc)
if self.test_if_mode():
summary = self._add_test_vc_summaries(self._vc_core_overlap, batch, fig_scope="core")
self._writer.add_summary(summary, batch)
# ---- Test DG Summaries
if self.test_dg_mode():
summary = self._add_test_dg_summaries(self._dg_overlap, batch)
self._writer.add_summary(summary, batch)
# ---- Lake test results
# even if not running oneshot or instance modes, workflow records what it can, so just print what's available
self._add_completion_summary(summary, batch) # big table of characters and images at every stage in AMTL
# images of comparison metrics (few shot and instance tests)
if match_mse_key in results:
matching_matrices = results.pop(match_mse_key)
# remove but ignore the olap one
if match_olap_key in results:
results.pop(match_olap_key)
logging.debug(matching_matrices.keys())
accuracy_metrics = ['truth', 'dg_hidden', 'dg_recon', 'vc_recon']
matrices = []
present_features = []
for feature, matrix in matching_matrices.items():
if feature not in accuracy_metrics:
continue
matrices.append(matrix)
present_features.append(feature)
matrices_4d = np.expand_dims(np.array(matrices), axis=3)
image_utils.arbitrary_image_summary(summary, matrices_4d,
name=self._component.name + '/few-shot-summaries/' + 'matching_matrices',
image_names=present_features)
# all the metrics added to losses[] (called results here) at different stages
for metric, metric_value in results.items():
if metric in skip_summaries:
continue
summary.value.add(tag=self._component.name + '/few-shot-summaries/' + metric, simple_value=metric_value)
self._writer.add_summary(summary, batch)
self._writer.flush()
def _create_data_subset(self):
# if the overlap main modes are: mu_intra, and mu_inter, with mu_intra > mu_inter
# and a good point in between is T, then T is threshold to separate good and bad samples Si, from the batch, B
# THEN: only keep if: min(overlap_intra(Si, B)) > T and max(overlap_inter(Si, B)) < T
thresh = 60
margin = 0
def overlap_test_thresh(idx, vals, labels):
overlap_intra, overlap_inter = overlap_sample_batch(idx, vals, labels, vals_test, labels_test)
keep = (np.max(overlap_intra) > thresh + margin) and (np.max(overlap_inter) < thresh - margin)
return keep
def overlap_test_error(idx, vals, labels, vals_test, labels_test):
overlap_intra, overlap_inter = overlap_sample_batch(idx, vals, labels, vals_test, labels_test)
return np.max(overlap_intra) > np.max(overlap_inter)
vals_train, labels_train = self._training_features['vc'], self._training_features['labels']
vals_test, labels_test = self._testing_features['vc'], self._testing_features['labels']
filename = './data/mnist_recordset/mnist_subset.tfrecords'
dataset_shape = self._dataset.shape # [-1, 28, 28, 1]
data_utils.write_subset(filename, dataset_shape,
vals_train, labels_train,
vals_test, labels_test,
keep_fn=overlap_test_error)
def _prep_for_summaries_core(self, input_images, batch_type):
""" Add main EpisodicComponent signals to list for later use by summary """
_, vc_input_shape = self._component.get_signal('vc_input')
self._summary_images['vc_input_' + batch_type] = ('vc_input', input_images, vc_input_shape)
# vc = self._component.get_vc().get_encoding()
vc = self._component.get_vc_encoding() # pooled and normed (if use vc direct pooling, we get the unpooled encoding)
_, vc_shape = self._component.get_signal('vc')
self._summary_images['vc_' + batch_type] = ('vc', vc, vc_shape)
if self._component.get_pc() is not None:
pc_input_direct = self._component.get_pc().get_input(batch_type) # 'training' will be target from DG (-1, 1), 'encoding' will be PR output (0, 1)
_, pc_input_shape = self._component.get_signal('pc_input')
self._summary_images['pc_in_' + batch_type] = ('pc_input', pc_input_direct, pc_input_shape)
def _prep_for_summaries_after_training(self, input_images):
# 0. Training Input
# 1. Training VC Output
# 2. Training PC Input (if pc)
self._summary_images.clear()
self._prep_for_summaries_core(input_images, 'training')
def _prep_for_summaries_after_completion(self, input_images, with_comparison_images=True):
# 3. Encoding Input
# 4. Encoding VC Output
# 5. Encoding PC Input (if pc)
# 6. Encoding PC Output (if pc)
self._prep_for_summaries_core(input_images, 'encoding')
batch_size = self._component.batch_size()
test_labels = self._testing_features['labels']
train_labels = self._training_features['labels']
if with_comparison_images:
create_and_add_comparison_image(self._summary_images, batch_size, name='vc_ovlap',
train_key='vc_training', test_key='vc_encoding',
use_mse=False, k=-1, train_labels=train_labels, test_labels=test_labels)
if self._component.get_pc() is not None:
pc = self._component.get_pc().get_decoding()
_, pc_shape = self._component.get_signal('pc')
self._summary_images['pc_encoding'] = ('pc', pc, pc_shape)
self._summary_images['pc_training'] = ('pc', self._training_features['pc'], pc_shape)
if with_comparison_images:
create_and_add_comparison_image(self._summary_images, batch_size, name='pc_in_mse',
train_key='pc_in_training', test_key='pc_in_encoding',
use_mse=True, k=-1, train_labels=train_labels, test_labels=test_labels)
create_and_add_comparison_image(self._summary_images, batch_size, name='pc_in_tf_mse',
train_key='pc_in_encoding', test_key='pc_in_training',
use_mse=True, k=-1, train_labels=train_labels, test_labels=test_labels)
# create_and_add_comparison_image(self._summary_images, batch_size, name='pc_out_ovlap',
# train_key='pc_in_training', test_key='pc_encoding',
# use_mse=True, k=-1, train_labels=train_labels, test_labels=test_labels)
# create_and_add_comparison_image(self._summary_images, batch_size, name='pc_out_mse',
# train_key='pc_in_training', test_key='pc_encoding',
# use_mse=True, k=-1, train_labels=train_labels, test_labels=test_labels)
if self._component.get_pc().use_pm_raw is True:
ec_out_raw = self._component.get_pc().get_ec_out_raw()
_, ec_out_raw_shape = self._component.get_signal('ec_out_raw')
self._summary_images['ec_out_raw'] = ('ec_out_raw', ec_out_raw, ec_out_raw_shape)
if 'pc_at_vc' in self._testing_features:
pc_at_vc = self._testing_features['pc_at_vc']
_, pc_at_vc_shape = self._component.get_signal('vc_input')
self._summary_images['pc_at_vc'] = ('pc_at_vc', pc_at_vc, pc_at_vc_shape)
def _add_completion_summary(self, summary, batch):
"""
Show all the relevant images put in _summary_images by the _prep methods.
They are collected during training and testing.
"""
images_names = [
'vc_input_training',
# 'vc_training',
'pc_in_training',
'vc_input_encoding',
# 'vc_encoding', # not very informative
'pc_in_encoding',
'pc_encoding',
'pc_at_vc' # available when not using interest filter
]
if self._hparams.pc_type != 'hl':
images_names = [
'vc_input_training',
'pc_training',
'vc_input_encoding',
'pc_encoding',
'pc_at_vc' # available when not using interest filter
]
# images_names.remove('pc_in_training')
# images_names.remove('pc_in_encoding')
# images_names.remove('pc_encoding')
if self._hparams.use_pm:
images_names += ['ec_out_raw']
if self._add_comparison_images:
images_names += [
'vc_ovlap',
'pc_in_mse',
'pc_in_tf_mse',
'pc_out_mse'
]
summary_images = []
for image_name in images_names: # this defines the order
if image_name in self._summary_images: # only try to add if it is possible
summary_images.append(self._summary_images[image_name])
if self._opts['summarize_completion'] != 'none':
to_file = (self._opts['summarize_completion'] == 'to_file')
if self._paper is True:
add_completion_summary_paper(summary_images, self._summary_dir, batch, to_file)
else:
add_completion_summary(summary_images, self._summary_dir, summary, batch, to_file)
def _compute_few_shot_metrics(self, losses, modes, pc_in, pc_completion, pc_at_dg, pc_at_vc):
"""
After information flows through AMTL up to PC, then we decode back through the dg and vc.
Find the best matching sample from the train batch, for each of these stages,
by comparing with the equivalent signal (given by the 'key' in the features dic)
i.e. compare test pc completion with the train dg encoding
@:param losses add the metrics to this list
@:param mode 'oneshot' or 'instance' (see hyperparams for workflow)
"""
testing_features = {
'labels': self._testing_features['labels'],
'vc': self._testing_features['vc'] # hack, to test just using ff vc, with mse (instead of overlap)
}
if pc_completion is not None:
testing_features['pc'] = pc_completion # comparison with whole PC including cue_nn
testing_features['pc_in'] = pc_in # comparison _without_ PC (but with cue_nn)
if pc_at_dg is not None:
testing_features['dg_recon'] = pc_at_dg
if pc_at_vc is not None:
testing_features['vc_recon'] = pc_at_vc
testing_features['pc_at_vc'] = pc_at_vc
def add_matching_to_averages(matching_matrices_, matching_accuracies_, sum_ambiguous_, keys, prefixes):
losses[keys[0]] = matching_matrices_
losses[keys[1]] = matching_accuracies_
losses[keys[2]] = sum_ambiguous_
# record loss for the metrics - to report averages later on
for accuracy_type, val in matching_accuracies_.items():
if accuracy_type == 'labels':
continue
self._report_average_metric('{}_{}'.format(prefixes[0], accuracy_type), val)
for ambig_type, val in sum_ambiguous_.items():
self._report_average_metric('{}_{}'.format(prefixes[1], ambig_type), val)
# matching matrices method (mse on a completion at different levels of hierarchy)
matching_matrices, matching_accuracies, sum_ambiguous = compute_matching(modes, self._training_features,
testing_features, 'mse')
add_matching_to_averages(matching_matrices, matching_accuracies, sum_ambiguous,
keys=[match_mse_key, match_acc_mse_key, sum_ambiguous_mse_key],
prefixes=['acc_mse', 'amb_mse'])
# matching matrices method ----> *Test vs Train (inverse of Lake)*
matching_matrices, matching_accuracies, sum_ambiguous = compute_matching(modes, testing_features,
self._training_features, 'mse')
add_matching_to_averages(matching_matrices, matching_accuracies, sum_ambiguous,
keys=[match_mse_tf_key, match_acc_mse_tf_key, sum_ambiguous_mse_tf_key],
prefixes=['acc_mse_tf', 'amb_mse_tf'])
# matching matrices method (olap on a completion at different levels of hierarchy)
matching_matrices, matching_accuracies, sum_ambiguous = compute_matching(modes, self._training_features,
testing_features, 'overlap')
add_matching_to_averages(matching_matrices, matching_accuracies, sum_ambiguous,
keys=[match_olap_key, match_acc_olap_key, sum_ambiguous_olap_key],
prefixes=['acc_olap', 'amb_olap'])
matching_matrices, matching_accuracies, sum_ambiguous = compute_matching(modes, testing_features,
self._training_features, 'overlap')
add_matching_to_averages(matching_matrices, matching_accuracies, sum_ambiguous,
keys=[match_olap_tf_key, match_acc_olap_tf_key, sum_ambiguous_olap_tf_key],
prefixes=['acc_olap_tf', 'amb_olap_tf'])
def _report_average_metric(self, name, val):
"""
Report this metric as an average value across batches.
It is recorded here, and printed at the end of the run.
"""
logging.debug('report average: ' + name)
if name not in self._total_losses.keys():
self._total_losses[name] = []
self._total_losses[name].append(val)
def _extract_features(self, fetched):
"""
dic(key=feature, val=[label 1 value, label 2 value, label 3 value, ......])
feature can also = 'label' and values are the label names
"""
features = {
'labels': fetched['labels']
}
if self._component.is_build_dg():
dg_hidden = self._component.get_dg().get_encoding()
dg_recon = self._component.get_dg().get_decoding()
features.update({
'dg_hidden': dg_hidden,
'dg_recon': dg_recon
})
vc_hidden, vc_recon = self.get_vc_encoding_decoding()
features.update({
'vc': vc_hidden,
'vc_recon': vc_recon
})
vc_core_hidden, vc_core_recon = self.get_vc_core_encoding_decoding()
features.update({
'vc_core_hidden': vc_core_hidden,
'vc_core_recon': vc_core_recon
})
return features
def _add_test_dg_summaries(self, results, batch):
bins = 200
inter = results['inter']
# Calc dist stats
min_inter = np.min(inter)
max_inter = np.max(inter)
summary = tf_utils.histogram_summary(tag=self._component.name + '/fs/dg_inter', values=inter,
bins=bins, minimum=min_inter, maximum=max_inter)
self._writer.add_summary(summary, batch)
return summary
def _add_test_vc_summaries(self, results, batch, scope="", fig_scope=""):
bins = 200
num_sd = 4.0 # How much spread visible
if scope != "":
scope = scope + "_"
# overlap comparison
inter = results[scope + 'inter'] # important to pop so that we can add the rest to the summaries easily
intra = results[scope + 'intra']
inter_per_label = results.get(scope + 'inter_per_label', {})
intra_per_label = results.get(scope + 'intra_per_label', {})
dbug = False
if dbug:
# Calc dist stats
min_inter = np.min(inter)
max_inter | |
"""
Tests for Image Management Service (IMS) CLI subcommand (`cray ims`)
and options.
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
import json
import os
from ..utils.runner import cli_runner # pylint: disable=unused-import
from ..utils.rest import rest_mock # pylint: disable=unused-import
from ..utils.utils import new_random_string
def compare_output(expected, cli_output):
"""
Function helper to test if the expected values can
be found in the output text.
"""
found = False
actual = [elem.strip() for elem in cli_output.splitlines()]
for i, e in reversed(list(enumerate(actual))):
if ':' in e:
found = True
del actual[0:i+1]
break
assert found
assert set(expected) == set(actual)
# pylint: disable=redefined-outer-name
def test_cray_ims_base(cli_runner, rest_mock):
""" Test cray ims base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims'])
assert result.exit_code == 0
outputs = [
"deleted",
"public-keys",
"recipes",
"images",
"jobs"
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_base(cli_runner, rest_mock):
""" Test cray ims base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'deleted'])
assert result.exit_code == 0
outputs = [
"public-keys",
"recipes",
"images",
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_base(cli_runner, rest_mock):
""" Test cray ims public-keys base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'public-keys'])
assert result.exit_code == 0
outputs = [
"create",
"delete",
"deleteall",
"describe",
"list",
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_delete(cli_runner, rest_mock):
""" Test cray ims public_keys delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'public-keys', 'delete', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/public-keys/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_delete_all(cli_runner, rest_mock):
""" Test cray ims public_keys delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'public-keys', 'deleteall'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/public-keys'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_list(cli_runner, rest_mock):
""" Test cray ims public_keys list """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'public-keys', 'list'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/public-keys'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_describe(cli_runner, rest_mock):
""" Test cray ims public_keys describe """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'public-keys', 'describe', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/public-keys/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_create(cli_runner, rest_mock):
""" Test cray ims public_keys create ... happy path """
runner, cli, config = cli_runner
usersshpubkeyfile = os.path.join(
os.path.dirname(__file__),
'../files/text.txt'
)
result = runner.invoke(
cli,
['ims', 'public-keys', 'create', '--name', 'foo',
'--public-key', usersshpubkeyfile]
)
with open(usersshpubkeyfile, encoding='utf-8') as inf:
pubkeydata = inf.read()
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'POST'
assert data['url'] == '{}/apis/ims/v3/public-keys'.format(
config['default']['hostname']
)
assert data['body'] == {
'name': 'foo',
'public_key': pubkeydata
}
# pylint: disable=redefined-outer-name
def test_cray_ims_public_keys_create_missing_required(cli_runner, rest_mock):
"""Test cray ims public_keys create ... when a required parameter is
missing
"""
runner, cli, _ = cli_runner
result = runner.invoke(
cli,
['ims', 'public-keys', 'create', '--name', 'foo']
)
assert result.exit_code == 2
assert '--public-key' in result.output
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_base(cli_runner, rest_mock):
""" Test cray ims public-keys base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys'])
assert result.exit_code == 0
outputs = [
"update",
"delete",
"deleteall",
"describe",
"list",
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_delete(cli_runner, rest_mock):
""" Test cray ims public_keys delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys', 'delete', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/deleted/public-keys/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_delete_all(cli_runner, rest_mock):
""" Test cray ims public_keys delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys', 'deleteall'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/deleted/public-keys'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_update(cli_runner, rest_mock):
""" Test cray ims deleted public_keys update ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys', 'update', 'foo',
'--operation', 'undelete'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'PATCH'
assert data['url'] == '{}/apis/ims/v3/deleted/public-keys/foo'.format(
config['default']['hostname']
)
assert data['body'] == {
'operation': 'undelete'
}
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_list(cli_runner, rest_mock):
""" Test cray ims deleted public_keys list """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys', 'list'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/deleted/public-keys'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_public_keys_describe(cli_runner, rest_mock):
""" Test cray ims deleted public_keys describe """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'public-keys', 'describe', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/deleted/public-keys/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_base(cli_runner, rest_mock):
""" Test cray ims recipes base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'recipes'])
assert result.exit_code == 0
outputs = [
"create",
"delete",
"deleteall",
"describe",
"list",
"update"
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_delete(cli_runner, rest_mock):
""" Test cray ims recipes delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'recipes', 'delete', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/recipes/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_delete_all(cli_runner, rest_mock):
""" Test cray ims recipes delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'recipes', 'deleteall'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
assert data['url'] == '{}/apis/ims/v3/recipes'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_list(cli_runner, rest_mock):
""" Test cray ims recipes list """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'recipes', 'list'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/recipes'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_describe(cli_runner, rest_mock):
""" Test cray ims recipes describe """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'recipes', 'describe', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/ims/v3/recipes/foo'.format(
config['default']['hostname']
)
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_create(cli_runner, rest_mock):
""" Test cray ims recipes create ... happy path """
runner, cli, config = cli_runner
s3_link_path = new_random_string()
s3_link_etag = new_random_string()
result = runner.invoke(cli, ['ims', 'recipes', 'create',
'--name', 'foo',
'--linux-distribution', 'sles15',
'--recipe-type', 'kiwi-ng',
'--link-type', 's3',
'--link-path', s3_link_path,
'--link-etag', s3_link_etag])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'POST'
assert data['url'] == '{}/apis/ims/v3/recipes'.format(
config['default']['hostname']
)
assert data['body'].get('name', None) == 'foo'
assert data['body'].get('linux_distribution', None) == 'sles15'
assert data['body'].get('recipe_type', None) == 'kiwi-ng'
assert 'link' in data['body']
assert data['body']['link'].get('type', None) == 's3'
assert data['body']['link'].get('path', None) == s3_link_path
assert data['body']['link'].get('etag', None) == s3_link_etag
# pylint: disable=redefined-outer-name
def test_cray_ims_recipes_create_missing_required(cli_runner, rest_mock):
"""Test cray ims recipes create ... when a required parameter is
missing
"""
runner, cli, _ = cli_runner
result = runner.invoke(
cli,
['ims', 'recipes', 'create',
'--name', 'foo',
'--recipe-type', 'kiwi-ng']
)
assert result.exit_code == 2
assert '--linux-distribution' in result.output
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_recipes_base(cli_runner, rest_mock):
""" Test cray ims recipes base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'recipes'])
assert result.exit_code == 0
outputs = [
"update",
"delete",
"deleteall",
"describe",
"list",
]
compare_output(outputs, result.output)
# pylint: disable=redefined-outer-name
def test_cray_ims_deleted_recipes_delete(cli_runner, rest_mock):
""" Test cray ims recipes delete ... """
runner, cli, config = cli_runner
result = runner.invoke(cli, ['ims', 'deleted', 'recipes', 'delete', 'foo'])
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'DELETE'
| |
self.radio_mac,
addr2 = client.mac,
addr3 = str2mac(pkt[:6]),
#SC = self.dot11_seq << 4
)/
Dot11QoS()/
LLC(dsap = 170, ssap = 170, ctrl = 3)/
SNAP()[0])
AP._scapy_cache_static['wlan_wrapping_1'] = p1[:20]
AP._scapy_cache_static['wlan_wrapping_2'] = p1[38:-2]
AP._scapy_cache_static['wlan_wrapping'] = True
p = (
AP._scapy_cache_static['wlan_wrapping_1'] +
self.radio_mac_bytes +
client.mac_bytes +
pkt[:6] +
AP._scapy_cache_static['wlan_wrapping_2']
)
#CAPWAP_DATA(p).dump_offsets_tree()
# need to update following:
# Dot11_swapped.addr1 = self.radio_mac_bytes
# Dot11_swapped.addr2 = client.mac_bytes
# Dot11_swapped.addr3 = pkt.dst
# Dot11_swapped.SC = self.dot11_seq << 4
# SNAP.code = struct.unpack('!H', pkt[12:14]
#p1 = (
# p[:9] + ap.radio_mac_bytes +
# p[15:20] + struct.pack('!B', capwap_seq) +
# p[21:]
# )
#
if verify and os.getenv('VERIFY_SCAPY_CACHE'):
print('verifying wlan_wrapping')
assert p == p1[:-2], '\n%s\n%s\n\n%s\n%s' % (type(p), hexstr(p), type(p1), hexstr(p1))
return self.wrap_capwap_pkt(p + pkt[12:], dst_port = 5247)
def wrap_pkt_by_ap_msg(self, pkt):
assert type(pkt) is bytes, 'wrap_pkt_by_ap_msg() expects bytes, got: %s' % type(pkt)
assert len(pkt) >= 14, 'Too small buffer to wrap'
p1 = bytes(
CAPWAP_DATA(
header = CAPWAP_Header(
wbid = 1,
flags = 'WT',
wireless_info_802 = CAPWAP_Wireless_Specific_Information_IEEE802_11(
rssi = 216,
snr = 31,
data_rate = 0,
)
)
)/
Dot11_swapped(
FCfield = 'to-DS',
subtype = 8,
type = 'Data',
ID = 0,
addr1 = self.mac,
addr2 = self.mac,
addr3 = self.mac,
)/
Dot11QoS())
return self.wrap_capwap_pkt(p1 + pkt, dst_port = 5247)
def patch_stream(self, client, stream):
assert type(stream) is STLStream, 'patch_stream() expects STLStream, got: %s' % type(stream)
stream = copy.deepcopy(stream)
patched_pkt = Ether(stream.pkt)
if stream.fields['packet']['meta']:
pkt_meta = '%s\nPatched stream: Added WLAN' % stream.fields['packet']['meta']
else:
pkt_meta = 'Patched stream: Added WLAN'
port_layer = self.trex_port.get_layer_cfg()
if stream.fields['flags'] & 1 == 0:
pkt_meta += ', Changed source'
patched_pkt.src = port_layer['ether']['src']
if stream.fields['flags'] & 0x110 == 0:
pkt_meta += ', Changed destination'
patched_pkt.dst = port_layer['ether']['dst']
stream.pkt = self.wrap_pkt_by_wlan(client, bytes(patched_pkt))
stream.fields['packet'] = {'binary': base64encode(stream.pkt),
'meta': pkt_meta}
# We force the changed src & dst MAC address to 1 using the stream flags
stream.fields['flags'] |= 0b111
for inst in stream.fields['vm']['instructions']:
if 'pkt_offset' in inst:
inst['pkt_offset'] += 78 # Size of wrapping layers minus removed Ethernet
elif 'offset' in inst:
inst['offset'] += 78
return stream
def patch_ap_stream(self, stream):
assert type(stream) is STLStream, 'patch_stream() expects STLStream, got: %s' % type(stream)
stream = copy.deepcopy(stream)
patched_pkt = Ether(stream.pkt)
stream.pkt = self.wrap_pkt_by_ap_msg(bytes(patched_pkt))
stream.fields['packet'] = {'binary': base64encode(stream.pkt),
'meta': ''}
# We force the changed src & dst MAC address to 1 using the stream flags
stream.fields['flags'] |= 0b011
for inst in stream.fields['vm']['instructions']:
if 'pkt_offset' in inst:
inst['pkt_offset'] += 84 # Size of wrapping layers
elif 'offset' in inst:
inst['offset'] += 84
return stream
def is_handshake_done_libssl(self):
with self.ssl_lock:
return bool(libssl.SSL_is_init_finished(self.ssl))
def is_dtls_closed_libssl(self):
with self.ssl_lock:
return bool(libssl.SSL_get_shutdown(self.ssl))
@property
def is_dtls_established(self):
return self.is_handshake_done and not self.is_dtls_closed
def ssl_read(self):
with self.ssl_lock:
ret = libcrypto.BIO_read(self.out_bio, self.openssl_buf, 10000)
if ret >= 0:
return self.openssl_buf[:ret]
ret = libcrypto.BIO_test_flags(self.out_bio, SSL_CONST.BIO_FLAGS_SHOULD_RETRY)
if ret:
return ''
self.is_connected = False
# without lock, careful
def __ssl_write(self, buf):
if isinstance(buf, ctypes.Array):
ret = libcrypto.BIO_write(self.in_bio, buf, len(buf))
else:
ret = libcrypto.BIO_write(self.in_bio, c_buffer(buf), len(buf) + 1)
if ret >= 0:
return ret
ret = libcrypto.BIO_test_flags(out_bio, SSL_CONST.BIO_FLAGS_SHOULD_RETRY)
if ret:
return ''
self.is_connected = False
def encrypt(self, buf):
with self.ssl_lock:
if isinstance(buf, Packet):
raise TRexError('Consider converting to buffer: %s' % buf.command())
if isinstance(buf, ctypes.Array):
ret = libssl.SSL_write(self.ssl, buf, len(buf))
else:
ret = libssl.SSL_write(self.ssl, c_buffer(buf), len(buf))
#err = SSL_CONST.ssl_err.get(libcrypto.ERR_get_error(self.ssl, ret))
#if err != 'SSL_ERROR_NONE':
# self.fatal('Got SSL error: %s (ret %s)' % (err, ret))
return self.ssl_read()
def decrypt(self, buf):
with self.ssl_lock:
self.__ssl_write(buf)
ret = libssl.SSL_read(self.ssl, self.openssl_buf, 10000)
#err = SSL_CONST.ssl_err.get(libcrypto.ERR_get_error(self.ssl, ret))
#if err != 'SSL_ERROR_NONE':
# self.fatal('Got SSL error: %s' % (err, ret))
return self.openssl_buf[:ret]
def get_arp_pkt(self, op, src_mac_bytes, src_ip_bytes):
assert len(src_mac_bytes) == 6
assert len(src_ip_bytes) == 4
if op == 'who-has':
arp_dst = b'\xff\xff\xff\xff\xff\xff' + self.wlc_ip_bytes
elif op == 'is-at':
arp_dst = self.mac_dst_bytes + self.wlc_ip_bytes
elif op == 'garp':
arp_dst = b'\0\0\0\0\0\0' + src_ip_bytes
else:
raise TRexError('Bad op of ARP: %s' % op)
return (
(b'\xff\xff\xff\xff\xff\xff' if op in ('who-has', 'garp') else self.mac_dst_bytes) + src_mac_bytes + b'\x08\x06' + # Ethernet
b'\x00\x01\x08\x00\x06\x04' +
(b'\x00\x01' if op in ('who-has', 'garp') else b'\x00\x02') +
src_mac_bytes + src_ip_bytes + arp_dst # ARP
)
def get_capwap_seq(self):
seq = self.__capwap_seq
if self.__capwap_seq < 0xff:
self.__capwap_seq += 1
else:
self.__capwap_seq = 0
return seq
def get_vap_entry(self, slot_id, vap_id):
return self.SSID.get(slot_id, vap_id)
def get_open_auth_vap(self):
for vap in self.SSID.values():
if vap.encrypt_policy == 1:
return vap
def create_vap(self, ssid, slot_id, vap_id):
"""
Create a new VAP and insert it into the AP
Return the newly created VAP
"""
vap = VAP(ssid=ssid, slot_id=slot_id, vap_id=vap_id)
self.SSID[(slot_id, vap_id)] = vap
return vap
def delete_vap(self, slot_id, vap_id):
del self.SSID[(slot_id, vap_id)]
class APClient:
def __init__(self, mac, ip, ap):
if ':' in mac:
self.mac_bytes = mac2str(mac)
self.mac = mac
else:
self.mac_bytes = mac
self.mac = str2mac(mac)
if '.' in ip:
self.ip_bytes = is_valid_ipv4_ret(ip)
self.ip = ip
elif len(ip) == 4:
self.ip_bytes = ip
self.ip = str2ip(ip)
else:
raise TRexError('Bad IP provided, should be x.x.x.x, got: %s' % ip)
check_mac_addr(self.mac)
check_ipv4_addr(self.ip)
assert isinstance(ap, AP)
self.ap = ap
self.reset()
def reset(self):
self.got_disconnect = False
self.is_associated = False
self.seen_arp_reply = False
def disconnect(self):
self.reset()
self.got_disconnect = True
class AP_Manager:
def __init__(self, trex_client = None, server = None, sync_port = 4501, async_port = 4500):
self.ssl_ctx = None
if not (bool(server) ^ bool(trex_client)):
raise STLError('Please specify either trex_client or server argument.')
if trex_client:
conn_info = trex_client.get_connection_info()
server = conn_info['server']
sync_port = conn_info['sync_port']
async_port = conn_info['async_port']
self.bg_client = STLClient('AP Manager', server, sync_port, async_port, verbose_level = 'none')
self.trex_client = trex_client or self.bg_client
self.aps = []
self.clients = []
self.ap_by_name = {}
self.ap_by_mac = {}
self.ap_by_ip = {}
self.client_by_id = {}
self.bg_lock = threading.RLock()
self.service_ctx = {}
self.base_file_path = '/tmp/trex/console/%s_%s_%s_%s.wlc_base' % (get_current_user(), server, sync_port, async_port)
base_file_dir = os.path.dirname(self.base_file_path)
if not os.path.exists(base_file_dir):
os.makedirs(base_file_dir, mode = 0o777)
self._init_base_vals()
def init(self, trex_port_ids):
if type(trex_port_ids) is int:
trex_port_ids = [trex_port_ids]
if not self.bg_client.is_connected():
self.bg_client.connect()
for port_id in trex_port_ids:
if port_id in self.service_ctx:
raise TRexError('AP manager already initialized on port %s. Close it to proceed.' % port_id)
if port_id >= len(self.trex_client.ports):
raise TRexError('TRex port %s does not exist!' % port_id)
trex_port = self.trex_client.ports[port_id]
if not trex_port.is_acquired():
raise TRexError('Port %s is not acquired' % port_id)
if trex_port.get_vlan_cfg():
raise TRexError('Port %s has VLAN, plugin does not support it. Use trunk with native vlans.' % port_id)
for port_id in trex_port_ids:
success = False
try:
self.service_ctx[port_id] = {}
if not self.ssl_ctx:
self.ssl_ctx = SSL_Context()
self.trex_client.set_service_mode(port_id, True)
if not self.trex_client.get_port_attr(port = port_id)['prom'] == 'on':
self.trex_client.set_port_attr(ports = port_id, promiscuous = True)
self.service_ctx[port_id]['synced'] = True
self.service_ctx[port_id]['bg'] = ServiceApBgMaintenance(self, port_id)
self.service_ctx[port_id]['fg'] = ServiceBufferedCtx(self.trex_client, port_id)
self.service_ctx[port_id]['bg'].run()
success = True
finally:
if not success:
del self.service_ctx[port_id]
def _init_base_vals(self):
try:
self.set_base_values(load = True)
except Exception as e:
self.err('Error setting base values (%s), will use default' % e)
self.next_ap_mac = '94:12:12:12:12:01'
self.next_ap_ip = '172.16.31.10'
self.next_client_mac = '94:13:13:13:13:01'
self.next_client_ip = '172.16.17.32'
self.wlc_ip = '255.255.255.255'
def _get_ap_by_id(self, ap_id):
if isinstance(ap_id, AP):
return ap_id
if ap_id in self.ap_by_name:
return self.ap_by_name[ap_id]
elif ap_id in self.ap_by_mac:
return self.ap_by_mac[ap_id]
elif ap_id in self.ap_by_ip:
return self.ap_by_ip[ap_id]
else:
raise TRexError('AP with id %s does not exist!' % ap_id)
def _get_client_by_id(self, client_id):
if isinstance(client_id, APClient):
return client_id
elif client_id in self.client_by_id:
return self.client_by_id[client_id]
else:
raise TRexError('Client with id %s does not exist!' % client_id)
def create_ap(self, trex_port_id, mac, ip, wlc_ip = None, verbose_level = AP.VERB_WARN,rsa_ca_priv_file = None, rsa_priv_file = None, rsa_cert_file = None):
if trex_port_id not in self.service_ctx:
raise TRexError('TRex port %s does not exist!' % trex_port_id)
if ':' not in mac:
mac = str2mac(mac)
if mac in self.ap_by_mac:
raise TRexError('AP with such MAC (%s) already exists!' % mac)
if ip in self.ap_by_ip:
raise TRexError('AP with such IP (%s) already exists!' % ip)
ap = AP(self.ssl_ctx, self.trex_client.logger, self.trex_client.ports[trex_port_id], mac, ip, wlc_ip, verbose_level, rsa_ca_priv_file, rsa_priv_file, rsa_cert_file)
self.ap_by_name[ap.name] = ap
self.ap_by_mac[mac] = ap
self.ap_by_ip[ip] = ap
with self.bg_lock:
self.aps.append(ap)
self.service_ctx[trex_port_id]['synced'] = False
def remove_ap(self, ap_id):
ap = self._get_ap_by_id(ap_id)
if ap.is_dtls_established:
self.service_ctx[ap.port_id]['fg'].run([ServiceApShutdownDTLS(ap)])
with self.bg_lock:
for client in ap.clients:
for key, val in dict(self.client_by_id).items():
if val == client:
del self.client_by_id[key]
self.clients.remove(client)
self.aps.remove(ap)
self.service_ctx[ap.port_id]['synced'] = False
del self.ap_by_name[ap.name]
del self.ap_by_mac[ap.mac]
del self.ap_by_ip[ap.ip]
def remove_client(self, id):
client = self._get_client_by_id(id)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""jobmanager module
<NAME> 2014
This module provides an easy way to implement distributed computing
based on the python class SyncManager for remote communication
and the python module multiprocessing for local parallelism.
class SIG_handler_Loop
The class Loop provides as mechanism to spawn a process repeating to
call a certain function as well as a StatusBar class for the terminal.
class StatusBar
The class JobManager_Server will provide a server process handling the
following tasks:
- providing a list (queue) of arguments to be processed by client processes
(see put_arg and args_from_list)
- handling the results of the calculations done by the client processes
(see process_new_result)
- when finished (all provided arguments have been processed and returned its result)
process the obtained results (see process_final_result)
The class JobManager_Client
"""
from __future__ import division, print_function
import copy
from datetime import datetime
import inspect
import multiprocessing as mp
from multiprocessing.managers import BaseManager, RemoteError
import subprocess
import os
import pickle
import signal
import socket
import sys
import random
import time
import traceback
import warnings
import binfootprint as bf
import progression as progress
import shelve
import hashlib
import logging
import threading
import ctypes
from shutil import rmtree
log = logging.getLogger(__name__)
# This is a list of all python objects that will be imported upon
# initialization during module import (see __init__.py)
__all__ = ["JobManager_Client",
"JobManager_Local",
"JobManager_Server",
"getDateForFileName"
]
# Magic conversion from 3 to 2
if sys.version_info[0] == 2:
# Python 2
import Queue as queue
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.annotations = getattr(f, '__annotations__', {})
inspect.getfullargspec = getfullargspec
# IOError (socket.error) expection handling
import errno
class JMConnectionError(Exception):
pass
class JMConnectionRefusedError(JMConnectionError):
pass
class JMConnectionResetError(JMConnectionError):
pass
input_promt = raw_input
else:
# Python 3
import queue
JMConnectionError = ConnectionError
JMConnectionRefusedError = ConnectionRefusedError
JMConnectionResetError = ConnectionResetError
input_promt = input
class JMHostNotReachableError(JMConnectionError):
pass
myQueue = queue.Queue
AuthenticationError = mp.AuthenticationError
def humanize_size(size_in_bytes):
"""convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero.
"""
thr = 99
scales = [1024, 1024, 1024]
units = ['k', 'M', 'G', 'T']
i = 0
while (size_in_bytes > thr) and (i < len(scales)):
size_in_bytes = size_in_bytes / scales[i]
i += 1
return "{:.2f}{}B".format(size_in_bytes, units[i])
def get_user():
out = subprocess.check_output('id -un', shell=True).decode().strip()
return out
def get_user_process_limit():
out = subprocess.check_output('ulimit -u', shell=True).decode().strip()
return int(out)
def get_user_num_process():
out = subprocess.check_output('ps ut | wc -l', shell=True).decode().strip()
return int(out)-2
class JobManager_Client(object):
"""
Calls the functions self.func with arguments fetched from the job_q.
You should subclass this class and overwrite func to handle your own
function.
The job_q is provided by the SyncManager who connects to a
SyncManager setup by the JobManager_Server.
Spawns nproc subprocesses (__worker_func) to process arguments.
Each subprocess gets an argument from the job_q, processes it
and puts the result to the result_q.
If the job_q is empty, terminate the subprocess.
In case of any failure detected within the try except clause
the argument, which just failed to process, the error and the
hostname are put to the fail_q so the JobManager_Server can take
care of that.
After that the traceback is written to a file with name
traceback_args_<args>_err_<err>_<YYYY>_<MM>_<DD>_<hh>_<mm>_<ss>_<PID>.trb.
Then the process will terminate.
"""
def __init__(self,
server,
authkey,
port = 42524,
nproc = 0,
njobs = 0,
nice = 19,
no_warnings = False,
verbose = None,
show_statusbar_for_jobs = True,
show_counter_only = False,
interval = 0.3,
emergency_dump_path = '.',
#job_q_get_timeout = 1,
#job_q_put_timeout = 10,
#result_q_put_timeout = 300,
#fail_q_put_timeout = 10,
reconnect_wait = 2,
reconnect_tries = 3,
ping_timeout = 2,
ping_retry = 3,
hide_progress = False,
use_special_SIG_INT_handler = True):
"""
server [string] - ip address or hostname where the JobManager_Server is running
authkey [string] - authentication key used by the SyncManager.
Server and Client must have the same authkey.
port [int] - network port to use
nproc [integer] - number of subprocesses to start
positive integer: number of processes to spawn
zero: number of spawned processes == number cpu cores
negative integer: number of spawned processes == number cpu cores - |nproc|
njobs [integer] - total number of jobs to run per process
negative integer or zero: run until there are no more jobs
positive integer: run only njobs number of jobs per nproc
The total number of jobs this client will
run is njobs*nproc.
nice [integer] - niceness of the subprocesses
no_warnings [bool] - call warnings.filterwarnings("ignore") -> all warnings are ignored
verbose [int] - 0: quiet, 1: status only, 2: debug messages
DO NOT SIGTERM CLIENT TOO ERLY, MAKE SURE THAT ALL SIGNAL HANDLERS ARE UP (see log at debug level)
"""
global log
log = logging.getLogger(__name__+'.'+self.__class__.__name__)
self._pid = os.getpid()
self._sid = os.getsid(self._pid)
if verbose is not None:
log.warning("verbose is deprecated, only allowed for compatibility")
warnings.warn("verbose is deprecated", DeprecationWarning)
self.hide_progress = hide_progress
self.use_special_SIG_INT_handler = use_special_SIG_INT_handler
log.info("init JobManager Client instance (pid %s)", os.getpid())
self.show_statusbar_for_jobs = show_statusbar_for_jobs
log.debug("show_statusbar_for_jobs:%s", self.show_statusbar_for_jobs)
self.show_counter_only = show_counter_only
log.debug("show_counter_only:%s", self.show_counter_only)
self.interval = interval
log.debug("interval:%s", self.interval)
#self._job_q_get_timeout = job_q_get_timeout
#log.debug("_job_q_get_timeout:%s", self._job_q_get_timeout)
#self._job_q_put_timeout = job_q_put_timeout
#log.debug("_job_q_put_timeout:%s", self._job_q_put_timeout)
#self._result_q_put_timeout = result_q_put_timeout
#log.debug("_result_q_put_timeout:%s", self._result_q_put_timeout)
#self._fail_q_put_timeout = fail_q_put_timeout
#log.debug("_fail_q_put_timeout:%s", self._fail_q_put_timeout)
self.reconnect_wait = reconnect_wait
log.debug("reconnect_wait:%s", self.reconnect_wait)
self.reconnect_tries = reconnect_tries
log.debug("reconnect_tries:%s", self.reconnect_tries)
self.ping_timeout = ping_timeout
log.debug("ping_timeout:%s", self.ping_timeout)
self.ping_retry = ping_retry
log.debug("ping_retry:%s", self.ping_retry)
if no_warnings:
warnings.filterwarnings("ignore")
log.info("ignore all warnings")
self.server = server
log.debug("server:%s", self.server)
if isinstance(authkey, bytearray):
self.authkey = authkey
else:
self.authkey = bytearray(authkey, encoding='utf8')
log.debug("authkey:%s", self.authkey.decode())
self.port = port
log.debug("port:%s", self.port)
self.nice = nice
log.debug("nice:%s", self.nice)
if nproc > 0:
self.nproc = nproc
else:
self.nproc = mp.cpu_count() + nproc
if self.nproc <= 0:
raise RuntimeError("Invalid Number of Processes\ncan not spawn {} processes (cores found: {}, cores NOT to use: {} = -nproc)".format(self.nproc, mp.cpu_count(), abs(nproc)))
log.debug("nproc:%s", self.nproc)
if njobs == 0: # internally, njobs must be negative for infinite jobs
njobs -= 1
self.njobs = njobs
log.debug("njobs:%s", self.njobs)
self.emergency_dump_path = emergency_dump_path
log.debug("emergency_dump_path:%s", self.emergency_dump_path)
self.pbc = None
self.procs = []
self.manager_objects = None # will be set via connect()
def connect(self):
if self.manager_objects is None:
try:
self.manager_objects = self.create_manager_objects()
except Exception as e:
log.critical("creating manager objects failed due to {}".format(type(e)))
log.info(traceback.format_exc())
raise
else:
log.info("already connected (at least shared object are available)")
@property
def connected(self):
return self.manager_objects is not None
def _dump_result_to_local_storage(self, res):
pass
def create_manager_objects(self):
"""
connects to the server and get registered shared objects such as
job_q, result_q, fail_q
const_arg will be deep copied from the manager and therefore live
as non shared object in local memory
"""
class ServerQueueManager(BaseManager):
pass
ServerQueueManager.register('get_job_q')
ServerQueueManager.register('get_result_q')
ServerQueueManager.register('get_fail_q')
ServerQueueManager.register('get_const_arg')
manager = ServerQueueManager(address=(self.server, self.port), authkey=self.authkey)
try:
call_connect(connect = manager.connect,
dest = address_authkey_from_manager(manager),
reconnect_wait = self.reconnect_wait,
reconnect_tries = self.reconnect_tries)
except:
log.warning("FAILED to connect to %s", address_authkey_from_manager(manager))
log.info(traceback.format_exc())
return None
job_q = manager.get_job_q()
log.info("found job_q with %s jobs", job_q.qsize())
result_q = manager.get_result_q()
fail_q = manager.get_fail_q()
# deep copy const_arg from manager -> non shared object in local memory
const_arg = copy.deepcopy(manager.get_const_arg())
return job_q, result_q, fail_q, const_arg, manager
@staticmethod
def func(arg, const_arg):
"""
function to be called by the worker processes
arg - provided by the job_q of the JobManager_Server
const_arg - tuple of constant arguments also provided by the JobManager_Server
to give status information to the Client class, use the variables
(c, m) as additional parameters. c and m will be
multiprocessing.sharedctypes.Synchronized objects with an underlying
unsigned int. so set c.value to the current status of the operation
ans m.value to the final status. So at the end of the operation c.value should
be m.value.
NOTE: This is just some dummy implementation to be used for test reasons only!
Subclass and overwrite this function to implement your own function.
"""
pid = os.getpid()
#print("{} sleeps for {}s".format(pid, const_arg))
time.sleep(const_arg)
return pid
@staticmethod
def __worker_func(func,
nice,
loglevel,
i,
job_q_get,
| |
= Constraint(expr= - 8*m.b263 + m.x1544 <= 0)
m.c4685 = Constraint(expr= - 8*m.b264 + m.x1545 <= 0)
m.c4686 = Constraint(expr= - 8*m.b265 + m.x1546 <= 0)
m.c4687 = Constraint(expr= - 8*m.b266 + m.x1547 <= 0)
m.c4688 = Constraint(expr= - 8*m.b267 + m.x1548 <= 0)
m.c4689 = Constraint(expr= - 8*m.b268 + m.x1549 <= 0)
m.c4690 = Constraint(expr= - 8*m.b269 + m.x1550 <= 0)
m.c4691 = Constraint(expr= - 8*m.b270 + m.x1551 <= 0)
m.c4692 = Constraint(expr= - 8*m.b271 + m.x1552 <= 0)
m.c4693 = Constraint(expr= - 8*m.b272 + m.x1553 <= 0)
m.c4694 = Constraint(expr= - 8*m.b273 + m.x1554 <= 0)
m.c4695 = Constraint(expr= - 8*m.b274 + m.x1555 <= 0)
m.c4696 = Constraint(expr= - 8*m.b275 + m.x1556 <= 0)
m.c4697 = Constraint(expr= - 8*m.b276 + m.x1557 <= 0)
m.c4698 = Constraint(expr= - 8*m.b277 + m.x1558 <= 0)
m.c4699 = Constraint(expr= - 8*m.b278 + m.x1559 <= 0)
m.c4700 = Constraint(expr= - 8*m.b279 + m.x1560 <= 0)
m.c4701 = Constraint(expr= - 8*m.b280 + m.x1561 <= 0)
m.c4702 = Constraint(expr= - 8*m.b281 + m.x1562 <= 0)
m.c4703 = Constraint(expr= - 8*m.b282 + m.x1563 <= 0)
m.c4704 = Constraint(expr= - 8*m.b283 + m.x1564 <= 0)
m.c4705 = Constraint(expr= - 8*m.b284 + m.x1565 <= 0)
m.c4706 = Constraint(expr= - 8*m.b285 + m.x1566 <= 0)
m.c4707 = Constraint(expr= - 8*m.b286 + m.x1567 <= 0)
m.c4708 = Constraint(expr= - 8*m.b287 + m.x1568 <= 0)
m.c4709 = Constraint(expr= - 8*m.b288 + m.x1569 <= 0)
m.c4710 = Constraint(expr= - 8*m.b289 + m.x1570 <= 0)
m.c4711 = Constraint(expr= - 8*m.b290 + m.x1571 <= 0)
m.c4712 = Constraint(expr= - 8*m.b291 + m.x1572 <= 0)
m.c4713 = Constraint(expr= - 8*m.b292 + m.x1573 <= 0)
m.c4714 = Constraint(expr= - 8*m.b293 + m.x1574 <= 0)
m.c4715 = Constraint(expr= - 8*m.b294 + m.x1575 <= 0)
m.c4716 = Constraint(expr= - 8*m.b295 + m.x1576 <= 0)
m.c4717 = Constraint(expr= - 8*m.b296 + m.x1577 <= 0)
m.c4718 = Constraint(expr= - 8*m.b297 + m.x1578 <= 0)
m.c4719 = Constraint(expr= - 8*m.b298 + m.x1579 <= 0)
m.c4720 = Constraint(expr= - 8*m.b299 + m.x1580 <= 0)
m.c4721 = Constraint(expr= - 8*m.b300 + m.x1581 <= 0)
m.c4722 = Constraint(expr= - 4*m.b301 + m.x1582 <= 0)
m.c4723 = Constraint(expr= - 4*m.b302 + m.x1583 <= 0)
m.c4724 = Constraint(expr= - 4*m.b303 + m.x1584 <= 0)
m.c4725 = Constraint(expr= - 4*m.b304 + m.x1585 <= 0)
m.c4726 = Constraint(expr= - 4*m.b305 + m.x1586 <= 0)
m.c4727 = Constraint(expr= - 4*m.b306 + m.x1587 <= 0)
m.c4728 = Constraint(expr= - 4*m.b307 + m.x1588 <= 0)
m.c4729 = Constraint(expr= - 4*m.b308 + m.x1589 <= 0)
m.c4730 = Constraint(expr= - 4*m.b309 + m.x1590 <= 0)
m.c4731 = Constraint(expr= - 4*m.b310 + m.x1591 <= 0)
m.c4732 = Constraint(expr= - 4*m.b311 + m.x1592 <= 0)
m.c4733 = Constraint(expr= - 4*m.b312 + m.x1593 <= 0)
m.c4734 = Constraint(expr= - 4*m.b313 + m.x1594 <= 0)
m.c4735 = Constraint(expr= - 4*m.b314 + m.x1595 <= 0)
m.c4736 = Constraint(expr= - 4*m.b315 + m.x1596 <= 0)
m.c4737 = Constraint(expr= - 4*m.b316 + m.x1597 <= 0)
m.c4738 = Constraint(expr= - 4*m.b317 + m.x1598 <= 0)
m.c4739 = Constraint(expr= - 4*m.b318 + m.x1599 <= 0)
m.c4740 = Constraint(expr= - 4*m.b319 + m.x1600 <= 0)
m.c4741 = Constraint(expr= - 4*m.b320 + m.x1601 <= 0)
m.c4742 = Constraint(expr= - 7*m.b321 + m.x1602 <= 0)
m.c4743 = Constraint(expr= - 7*m.b322 + m.x1603 <= 0)
m.c4744 = Constraint(expr= - 7*m.b323 + m.x1604 <= 0)
m.c4745 = Constraint(expr= - 7*m.b324 + m.x1605 <= 0)
m.c4746 = Constraint(expr= - 7*m.b325 + m.x1606 <= 0)
m.c4747 = Constraint(expr= - 7*m.b326 + m.x1607 <= 0)
m.c4748 = Constraint(expr= - 7*m.b327 + m.x1608 <= 0)
m.c4749 = Constraint(expr= - 7*m.b328 + m.x1609 <= 0)
m.c4750 = Constraint(expr= - 7*m.b329 + m.x1610 <= 0)
m.c4751 = Constraint(expr= - 7*m.b330 + m.x1611 <= 0)
m.c4752 = Constraint(expr= - 7*m.b331 + m.x1612 <= 0)
m.c4753 = Constraint(expr= - 7*m.b332 + m.x1613 <= 0)
m.c4754 = Constraint(expr= - 7*m.b333 + m.x1614 <= 0)
m.c4755 = Constraint(expr= - 7*m.b334 + m.x1615 <= 0)
m.c4756 = Constraint(expr= - 7*m.b335 + m.x1616 <= 0)
m.c4757 = Constraint(expr= - 7*m.b336 + m.x1617 <= 0)
m.c4758 = Constraint(expr= - 7*m.b337 + m.x1618 <= 0)
m.c4759 = Constraint(expr= - 7*m.b338 + m.x1619 <= 0)
m.c4760 = Constraint(expr= - 7*m.b339 + m.x1620 <= 0)
m.c4761 = Constraint(expr= - 7*m.b340 + m.x1621 <= 0)
m.c4762 = Constraint(expr= - 8*m.b341 + m.x1622 <= 0)
m.c4763 = Constraint(expr= - 8*m.b342 + m.x1623 <= 0)
m.c4764 = Constraint(expr= - 8*m.b343 + m.x1624 <= 0)
m.c4765 = Constraint(expr= - 8*m.b344 + m.x1625 <= 0)
m.c4766 = Constraint(expr= - 8*m.b345 + m.x1626 <= 0)
m.c4767 = Constraint(expr= - 8*m.b346 + m.x1627 <= 0)
m.c4768 = Constraint(expr= - 8*m.b347 + m.x1628 <= 0)
m.c4769 = Constraint(expr= - 8*m.b348 + m.x1629 <= 0)
m.c4770 = Constraint(expr= - 8*m.b349 + m.x1630 <= 0)
m.c4771 = Constraint(expr= - 8*m.b350 + m.x1631 <= 0)
m.c4772 = Constraint(expr= - 8*m.b351 + m.x1632 <= 0)
m.c4773 = Constraint(expr= - 8*m.b352 + m.x1633 <= 0)
m.c4774 = Constraint(expr= - 8*m.b353 + m.x1634 <= 0)
m.c4775 = Constraint(expr= - 8*m.b354 + m.x1635 <= 0)
m.c4776 = Constraint(expr= - 8*m.b355 + m.x1636 <= 0)
m.c4777 = Constraint(expr= - 8*m.b356 + m.x1637 <= 0)
m.c4778 = Constraint(expr= - 8*m.b357 + m.x1638 <= 0)
m.c4779 = Constraint(expr= - 8*m.b358 + m.x1639 <= 0)
m.c4780 = Constraint(expr= - 8*m.b359 + m.x1640 <= 0)
m.c4781 = Constraint(expr= - 8*m.b360 + m.x1641 <= 0)
m.c4782 = Constraint(expr= - 7*m.b361 + m.x1642 <= 0)
m.c4783 = Constraint(expr= - 7*m.b362 + m.x1643 <= 0)
m.c4784 = Constraint(expr= - 7*m.b363 + m.x1644 <= 0)
m.c4785 = Constraint(expr= - 7*m.b364 + m.x1645 <= 0)
m.c4786 = Constraint(expr= - 7*m.b365 + m.x1646 <= 0)
m.c4787 = Constraint(expr= - 7*m.b366 + m.x1647 <= 0)
m.c4788 = Constraint(expr= - 7*m.b367 + m.x1648 <= 0)
m.c4789 = Constraint(expr= - 7*m.b368 + m.x1649 <= 0)
m.c4790 = Constraint(expr= - 7*m.b369 + m.x1650 <= 0)
m.c4791 = Constraint(expr= - 7*m.b370 + m.x1651 <= 0)
m.c4792 = Constraint(expr= - 7*m.b371 + m.x1652 <= 0)
m.c4793 = Constraint(expr= - 7*m.b372 + m.x1653 <= 0)
m.c4794 = Constraint(expr= - 7*m.b373 + m.x1654 <= 0)
m.c4795 = Constraint(expr= - 7*m.b374 + m.x1655 <= 0)
m.c4796 = Constraint(expr= - 7*m.b375 + m.x1656 <= 0)
m.c4797 = Constraint(expr= - 7*m.b376 + m.x1657 <= 0)
m.c4798 = Constraint(expr= - 7*m.b377 + m.x1658 <= 0)
m.c4799 = Constraint(expr= - 7*m.b378 + m.x1659 <= 0)
m.c4800 = Constraint(expr= - 7*m.b379 + m.x1660 <= 0)
m.c4801 = Constraint(expr= - 7*m.b380 + m.x1661 <= 0)
m.c4802 = Constraint(expr= - 7*m.b381 + m.x1662 <= 0)
m.c4803 = Constraint(expr= - 7*m.b382 + m.x1663 <= 0)
m.c4804 = Constraint(expr= - 7*m.b383 + m.x1664 <= 0)
m.c4805 = Constraint(expr= - 7*m.b384 + m.x1665 <= 0)
m.c4806 = Constraint(expr= - 7*m.b385 + m.x1666 <= 0)
m.c4807 = Constraint(expr= - 7*m.b386 + m.x1667 <= 0)
m.c4808 = Constraint(expr= - 7*m.b387 + m.x1668 <= 0)
m.c4809 = Constraint(expr= - 7*m.b388 + m.x1669 <= 0)
m.c4810 = Constraint(expr= - 7*m.b389 + m.x1670 <= 0)
m.c4811 = Constraint(expr= - 7*m.b390 + m.x1671 <= 0)
m.c4812 = Constraint(expr= - 7*m.b391 + m.x1672 <= 0)
m.c4813 = Constraint(expr= - 7*m.b392 + m.x1673 <= 0)
m.c4814 = Constraint(expr= - 7*m.b393 + m.x1674 <= 0)
m.c4815 = Constraint(expr= - 7*m.b394 + m.x1675 <= 0)
m.c4816 = Constraint(expr= - 7*m.b395 + m.x1676 <= 0)
m.c4817 = Constraint(expr= - 7*m.b396 + m.x1677 <= 0)
m.c4818 = Constraint(expr= - 7*m.b397 + m.x1678 <= 0)
m.c4819 = Constraint(expr= - 7*m.b398 + m.x1679 <= 0)
m.c4820 = Constraint(expr= - 7*m.b399 + m.x1680 <= 0)
m.c4821 = Constraint(expr= - 7*m.b400 + m.x1681 <= 0)
m.c4822 = Constraint(expr= - 9*m.b401 + m.x1682 <= 0)
m.c4823 = Constraint(expr= - 9*m.b402 + m.x1683 <= 0)
m.c4824 = Constraint(expr= - 9*m.b403 + m.x1684 <= 0)
m.c4825 = Constraint(expr= - 9*m.b404 + m.x1685 <= 0)
m.c4826 = Constraint(expr= - 9*m.b405 + m.x1686 <= 0)
m.c4827 = Constraint(expr= - 9*m.b406 + m.x1687 <= 0)
m.c4828 = Constraint(expr= - 9*m.b407 + m.x1688 <= 0)
m.c4829 = Constraint(expr= - 9*m.b408 + m.x1689 <= 0)
m.c4830 = Constraint(expr= - 9*m.b409 + m.x1690 <= 0)
m.c4831 = Constraint(expr= - 9*m.b410 + m.x1691 <= 0)
m.c4832 = Constraint(expr= - 9*m.b411 + m.x1692 <= 0)
m.c4833 = Constraint(expr= - 9*m.b412 + m.x1693 <= 0)
m.c4834 = Constraint(expr= - 9*m.b413 + m.x1694 <= 0)
m.c4835 = Constraint(expr= - 9*m.b414 + m.x1695 <= 0)
m.c4836 = Constraint(expr= - 9*m.b415 + m.x1696 <= 0)
m.c4837 = Constraint(expr= - 9*m.b416 + m.x1697 <= 0)
m.c4838 = Constraint(expr= - 9*m.b417 + m.x1698 <= 0)
m.c4839 = Constraint(expr= - 9*m.b418 + m.x1699 <= 0)
m.c4840 = Constraint(expr= - 9*m.b419 + m.x1700 <= 0)
m.c4841 = Constraint(expr= - 9*m.b420 + m.x1701 <= 0)
m.c4842 = Constraint(expr= - 4*m.b421 + m.x1702 <= 0)
m.c4843 = Constraint(expr= - 4*m.b422 + m.x1703 <= 0)
m.c4844 | |
<filename>vimms/BOMAS.py
import time
from mass_spec_utils.data_import.mzmine import load_picked_boxes, map_boxes_to_scans
from mass_spec_utils.data_import.mzml import MZMLFile
from vimms.Agent import TopNDEWAgent
from vimms.Box import *
from vimms.Common import *
from vimms.Controller import TopN_SmartRoiController, WeightedDEWController, TopN_RoiController, \
NonOverlapController, IntensityNonOverlapController, TopNBoxRoiController, FlexibleNonOverlapController, \
FixedScansController, RoiBuilder, AgentBasedController, TopNController
from vimms.DsDA import get_schedule, dsda_get_scan_params, create_dsda_schedule
from vimms.Environment import *
from vimms.Evaluation import evaluate_multi_peak_roi_aligner
from vimms.Evaluation import evaluate_multiple_simulated_env
from vimms.GridEstimator import *
from vimms.Roi import FrequentistRoiAligner
def run_coverage_evaluation(box_file, mzml_file, half_isolation_window):
boxes = load_picked_boxes(box_file)
mz_file = MZMLFile(mzml_file)
scans2boxes, boxes2scans = map_boxes_to_scans(mz_file, boxes, half_isolation_window=half_isolation_window)
coverage = len(boxes2scans) / len(boxes)
return coverage
def run_env(mass_spec, controller, min_rt, max_rt, mzml_file):
env = Environment(mass_spec, controller, min_rt, max_rt)
env.run()
env.write_mzML(None, mzml_file)
chems = [event.chem.__repr__() for event in env.mass_spec.fragmentation_events if event.ms_level > 1]
chemical_coverage = len(np.unique(np.array(chems))) / len(env.mass_spec.chemicals)
return chemical_coverage
########################################################################################################################
# Evaluation methods
########################################################################################################################
def top_n_evaluation(param_dict):
mass_spec = load_obj(param_dict['mass_spec_file'])
params = load_obj(param_dict['params_file'])
topn = TopNController(param_dict['ionisation_mode'], param_dict['N'], param_dict['isolation_width'],
param_dict['mz_tol'], param_dict['rt_tol'], param_dict['min_ms1_intensity'], params=params)
chemical_coverage = run_env(mass_spec, topn, param_dict['min_rt'], param_dict['max_rt'],
param_dict['save_file_name'])
coverage = run_coverage_evaluation(param_dict['box_file'], param_dict['save_file_name'],
param_dict['half_isolation_window'])
print('coverage', coverage)
print('chemical_coverage', chemical_coverage)
if param_dict['coverage_type'] == 'coverage':
return coverage
else:
return chemical_coverage
def smart_roi_evaluation(param_dict):
mass_spec = load_obj(param_dict['mass_spec_file'])
params = load_obj(param_dict['params_file'])
smartroi = TopN_SmartRoiController(param_dict['ionisation_mode'], param_dict['isolation_window'],
param_dict['mz_tol'], param_dict['min_ms1_intensity'],
param_dict['min_roi_intensity'], param_dict['min_roi_length'],
param_dict['N'], param_dict['rt_tol'],
param_dict['min_roi_length_for_fragmentation'],
param_dict['reset_length_seconds'],
param_dict['iif'], length_units="scans", drop_perc=param_dict['dp'] / 100,
ms1_shift=0, params=params)
chemical_coverage = run_env(mass_spec, smartroi, param_dict['min_rt'], param_dict['max_rt'],
param_dict['save_file_name'])
coverage = run_coverage_evaluation(param_dict['box_file'], param_dict['save_file_name'],
param_dict['half_isolation_window'])
print('coverage', coverage)
print('chemical_coverage', chemical_coverage)
if param_dict['coverage_type'] == 'coverage':
return coverage
else:
return chemical_coverage
def smart_roi_evaluation(param_dict):
mass_spec = load_obj(param_dict['mass_spec_file'])
params = load_obj(param_dict['params_file'])
smart_roi = TopN_SmartRoiController(param_dict['ionisation_mode'], param_dict['isolation_width'],
param_dict['mz_tol'], param_dict['min_ms1_intensity'],
param_dict['min_roi_intensity'], param_dict['min_roi_length'],
N=param_dict['N'], rt_tol=param_dict['rt_tol'],
min_roi_length_for_fragmentation=param_dict['min_roi_length_for_fragmentation'],
reset_length_seconds=param_dict['reset_length_seconds'],
intensity_increase_factor=param_dict['intensity_increase_factor'],
drop_perc=param_dict['drop_perc'], ms1_shift=param_dict['ms1_shift'],
params=params)
run_env(mass_spec, smart_roi, param_dict['min_rt'], param_dict['max_rt'], param_dict['save_file_name'])
coverage = run_coverage_evaluation(param_dict['box_file'], param_dict['save_file_name'],
param_dict['half_isolation_window'])
return coverage
def weighted_dew_evaluation(param_dict):
mass_spec = load_obj(param_dict['mass_spec_file'])
params = load_obj(param_dict['params_file'])
weighted_dew = WeightedDEWController(param_dict['ionisation_mode'], param_dict['N'], param_dict['isolation_width'],
param_dict['mz_tol'], param_dict['rt_tol'], param_dict['min_ms1_intensity'],
exclusion_t_0=param_dict['exclusion_t_0'],
log_intensity=param_dict['log_intensity'], params=params)
run_env(mass_spec, weighted_dew, param_dict['min_rt'], param_dict['max_rt'], param_dict['save_file_name'])
coverage = run_coverage_evaluation(param_dict['box_file'], param_dict['save_file_name'],
param_dict['half_isolation_window'])
return coverage
########################################################################################################################
# Experiment evaluation methods
########################################################################################################################
def top_n_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, rt_tol, min_ms1_intensity,
base_chemicals=None, mzmine_files=None, rt_tolerance=100, experiment_dir=None,
progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = TopNController(POSITIVE, N, isolation_window, mz_tol, rt_tol, min_ms1_intensity, ms1_shift=0,
initial_exclusion_list=None, force_N=False)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
def top_n_exclusion_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, rt_tol,
min_ms1_intensity,
base_chemicals=None, mzmine_files=None, rt_tolerance=100,
experiment_dir=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
agent = TopNDEWAgent(POSITIVE, N, isolation_window, mz_tol, rt_tol, min_ms1_intensity, remove_exclusion=False)
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = AgentBasedController(agent)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
def top_n_roi_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, rt_tol,
min_ms1_intensity, min_roi_intensity, min_roi_length, base_chemicals=None,
mzmine_files=None, rt_tolerance=100, experiment_dir=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = TopN_RoiController(POSITIVE, isolation_window, mz_tol, min_ms1_intensity, min_roi_intensity,
min_roi_length, N=N, rt_tol=rt_tol)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
def smart_roi_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, rt_tol,
min_ms1_intensity, min_roi_intensity, min_roi_length,
min_roi_length_for_fragmentation, reset_length_seconds, intensity_increase_factor,
drop_perc, ms1_shift, base_chemicals=None, mzmine_files=None,
rt_tolerance=100, experiment_dir=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = TopN_SmartRoiController(POSITIVE, isolation_window, mz_tol, min_ms1_intensity,
min_roi_intensity,
min_roi_length, N=N, rt_tol=rt_tol,
min_roi_length_for_fragmentation=min_roi_length_for_fragmentation,
reset_length_seconds=reset_length_seconds,
intensity_increase_factor=intensity_increase_factor,
drop_perc=drop_perc, ms1_shift=ms1_shift)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
def weighted_dew_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, r, t0,
min_ms1_intensity, base_chemicals=None, mzmine_files=None, rt_tolerance=100,
experiment_dir=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = WeightedDEWController(POSITIVE, N, isolation_window, mz_tol, r, min_ms1_intensity,
exclusion_t_0=t0, log_intensity=True)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
def box_controller_experiment_evaluation(datasets, group_list, min_rt, max_rt, N, isolation_window,
mz_tol, rt_tol, min_ms1_intensity, min_roi_intensity, min_roi_length,
boxes_params, base_chemicals=None, mzmine_files=None, rt_tolerance=100,
experiment_dir=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
boxes = []
boxes_intensity = []
aligner = RoiAligner()
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = TopNBoxRoiController(POSITIVE, isolation_window, mz_tol, min_ms1_intensity, min_roi_intensity,
min_roi_length, boxes_params=boxes_params, boxes=boxes,
boxes_intensity=boxes_intensity, N=N, rt_tol=rt_tol)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
rois = env.controller.live_roi + env.controller.dead_roi
aligner.add_sample(rois, 'sample_' + str(i), group_list[i])
boxes = aligner.get_boxes()
boxes_intensity = aligner.get_max_frag_intensities()
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
# change roi_type to ROI_TYPE_SMART to toggle smartroi
# change exclusion_method to ROI_EXCLUSION_WEIGHTED_DEW and specify exclusion_t_0 to toggle weighteddew
def non_overlap_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol, rt_tol, min_ms1_intensity,
min_roi_intensity, min_roi_length, rt_box_size, mz_box_size,
min_roi_length_for_fragmentation, base_chemicals=None, mzmine_files=None,
rt_tolerance=100, experiment_dir=None,
roi_type=RoiBuilder.ROI_TYPE_NORMAL, reset_length_seconds=1e6,
intensity_increase_factor=10, drop_perc=0.1 / 100,
exclusion_method=ROI_EXCLUSION_DEW, exclusion_t_0=None, progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
grid = GridEstimator(LocatorGrid(min_rt, max_rt, rt_box_size, 0, 3000, mz_box_size), IdentityDrift())
mzml_files = []
source_files = ['sample_' + str(i) for i in range(len(datasets))]
for i in range(len(datasets)):
mass_spec = IndependentMassSpectrometer(POSITIVE, datasets[i])
controller = NonOverlapController(
POSITIVE, isolation_window, mz_tol, min_ms1_intensity, min_roi_intensity,
min_roi_length, N, grid, rt_tol=rt_tol,
min_roi_length_for_fragmentation=min_roi_length_for_fragmentation,
roi_type=roi_type, reset_length_seconds=reset_length_seconds,
intensity_increase_factor=intensity_increase_factor, drop_perc=drop_perc,
exclusion_method=exclusion_method, exclusion_t_0=exclusion_t_0)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=progress_bar)
env.run()
if progress_bar is False:
print('Processed dataset ' + str(i))
env_list.append(env)
if base_chemicals is None:
file_link = os.path.join(experiment_dir, source_files[i] + '.mzml')
mzml_files.append(file_link)
env.write_mzML(experiment_dir, source_files[i] + '.mzml')
if base_chemicals is not None:
evaluation = evaluate_multiple_simulated_env(env_list, base_chemicals=base_chemicals)
else:
roi_aligner = RoiAligner(rt_tolerance=rt_tolerance)
for i in range(len(mzml_files)):
roi_aligner.add_picked_peaks(mzml_files[i], mzmine_files[i], source_files[i], 'mzmine')
evaluation = evaluate_multi_peak_roi_aligner(roi_aligner, source_files)
return env_list, evaluation
else:
return None, None
# change roi_type to ROI_TYPE_SMART to toggle smartroi
# change exclusion_method to ROI_EXCLUSION_WEIGHTED_DEW and specify exclusion_t_0 to toggle weighteddew
def intensity_non_overlap_experiment_evaluation(datasets, min_rt, max_rt, N, isolation_window, mz_tol,
rt_tol, min_ms1_intensity, min_roi_intensity, min_roi_length,
rt_box_size, mz_box_size, min_roi_length_for_fragmentation,
scoring_params={'theta1': 1}, base_chemicals=None, mzmine_files=None,
rt_tolerance=100, experiment_dir=None,
roi_type=RoiBuilder.ROI_TYPE_NORMAL, reset_length_seconds=1e6,
intensity_increase_factor=10, drop_perc=0.1 / 100,
exclusion_method=ROI_EXCLUSION_DEW, exclusion_t_0=None,
progress_bar=False):
if base_chemicals is not None or mzmine_files is not None:
env_list = []
grid = GridEstimator(AllOverlapGrid(min_rt, max_rt, rt_box_size, 0, 3000, mz_box_size), | |
"""SmartTimer
Classes:
:class:`SmartTimer`
Note:
Internal operations may affect the time measurements by a factor of
milliseconds. In a future release, this noise will be corrected.
"""
__all__ = [
'TimerStat',
'SmartTimer'
]
import os
import re
import numpy
import cProfile
import time as std_time
from .timer import Timer
from functools import wraps, partial
from .clocks import are_clocks_compatible
from collections import namedtuple, defaultdict
_TimerStat_fields = ('min', 'max', 'total', 'avg',)
TimerStat = namedtuple('TimerStat', _TimerStat_fields)
class SmartTimer:
"""`Timer`_ container to perform time measurements in code blocks.
Args:
name (str, optional): Name of container. Default is *smarttimer*.
kwargs (dict, optional): Map of options to configure the internal
`Timer`_. Default is `Timer`_ defaults.
A :class:`SmartTimer` allows recording elapsed time in an arbitrary
number of code blocks. Specified points in the code are marked as either
the beginning of a block to measure, :meth:`tic`, or as the end of a
measured block, :meth:`toc`. Times are managed internally and ordered
based on :meth:`tic` calls. Times can be queried, operated on, and
written to file.
The following schemes are supported for timing code blocks
* Consecutive: ``tic('A')``, ``toc()``, ..., ``tic('B')``, ``toc()``
* Cascade: ``tic('A')``, ``toc()``, ``toc()``, ...
* Nested: ``tic('A')``, ``tic('B')``, ..., ``toc()``, ``toc()``
* Label-paired: ``tic('A')``, ``tic('B')``, ..., ``toc('A')``,
``toc('B')``
* Mixed: arbitrary combinations of schemes
.. _`namedtuple`:
https://docs.python.org/3.3/library/collections.html#collections.namedtuple
Attributes:
name (str): Name of container. May be used for filename in
:meth:`write_to_file`.
labels (list, str): Label identifiers of completed timed code blocks.
active_labels (list, str): Label identifiers of active code blocks.
seconds (list, float): Elapsed time for completed code blocks.
minutes (list, float): Elapsed time for completed code blocks.
times (dict): Map of times elapsed for completed blocks. Keys are the
labels used when invoking :meth:`tic`.
walltime (float): Elapsed time between first and last timings.
"""
DEFAULT_CLOCK_NAME = 'process_time'
_LABELS = ('label', 'seconds', 'minutes', 'rel_percent', 'cum_sec',
'cum_min', 'cum_percent')
def __init__(self, name=None, **kwargs):
self.name = name
self._timer = Timer(label=None, **kwargs) # internal Timer
self._first_tic = None # pointer used to calculate walltime
self._last_tic = self._timer # pointer used to support cascade scheme
self._timers = [] # completed time blocks
self._timer_stack = [] # stack of active time blocks
self._prof = None # profiling object
@property
def labels(self):
return tuple(t.label for t in self._filter_timers())
@property
def active_labels(self):
return tuple(t.label for t in self._timer_stack)
@property
def seconds(self):
return tuple(t.seconds for t in self._filter_timers())
@property
def minutes(self):
return tuple(t.minutes for t in self._filter_timers())
@property
def relative_percent(self):
return tuple(t.relative_percent for t in self._filter_timers())
@property
def cumulative_seconds(self):
return tuple(t.cumulative_seconds for t in self._filter_timers())
@property
def cumulative_minutes(self):
return tuple(t.cumulative_minutes for t in self._filter_timers())
@property
def cumulative_percent(self):
return tuple(t.cumulative_percent for t in self._filter_timers())
@property
def times(self):
times_map = defaultdict(list)
for t in self._filter_timers():
times_map[t.label].append(t.seconds)
return times_map
@property
def clock_name(self):
return self._timer.clock_name
@clock_name.setter
def clock_name(self, clock_name):
if not are_clocks_compatible(self._timer.clock_name, clock_name):
self._timers = list(self._filter_timers())
self._timer_stack = []
self._first_tic = None
self._last_tic = self._timer
self._timer.clock_name = clock_name
@property
def info(self):
return self._timer.info
@property
def walltime(self):
if not any(self._timers):
return 0.
return self._timer.seconds - self._first_tic.seconds
def _filter_timers(self):
return filter(None, self._timers)
def __repr__(self):
return "{cls}(name={name},"\
" timer={timer})"\
.format(cls=type(self).__qualname__,
name=repr(self.name),
timer=repr(self._timer))
def __str__(self):
if not self.labels:
return ""
lw = max(len('label'), max(map(len, self.labels)))
fmt_head = "{:>" + str(lw) + "}" + 6 * " {:>12}" + os.linesep
fmt_data = "{:>" + str(lw) + "}" + 6 * " {:12.4f}" + os.linesep
data = fmt_head.format(*type(self)._LABELS)
for t in self._filter_timers():
data += fmt_data.format(t.label, t.seconds, t.minutes,
t.relative_percent, t.cumulative_seconds,
t.cumulative_minutes, t.cumulative_percent)
return data
def __enter__(self):
self.tic()
return self
def __eq__(self, other):
return NotImplemented
__hash__ = None
def __exit__(self, *args):
self.toc()
def __getitem__(self, key):
value = self.times[key]
return value[0] if len(value) == 1 else value
def _update_cumulative_and_percent(self):
total_seconds = sum(self.seconds)
for i, t in enumerate(self._filter_timers()):
# Skip timers already processed, only update percentages
if t.cumulative_seconds < 0. or t.cumulative_minutes < 0.:
t.cumulative_seconds = t.seconds
t.cumulative_minutes = t.minutes
if i > 0:
t_prev = self._timers[i - 1]
t.cumulative_seconds += t_prev.cumulative_seconds
t.cumulative_minutes += t_prev.cumulative_minutes
t.relative_percent = t.seconds / total_seconds
t.cumulative_percent = t.cumulative_seconds / total_seconds
def tic(self, label=None):
"""Start measuring time.
Measure time at the latest moment possible to minimize noise from
internal operations.
Args:
label (str): Label identifier for current code block.
"""
# _last_tic -> timer of most recent tic
self._last_tic = Timer(label=label, clock_name=self._timer.clock_name)
# _first_tic -> timer of first tic
if self._first_tic is None:
self._first_tic = self._last_tic
# Insert Timer into stack, then record time to minimize noise
self._timer_stack.append(self._last_tic)
# Use 'None' as an indicator of active code blocks
self._timers.append(None)
# Measure time
self._last_tic.time()
def toc(self, label=None):
"""Stop measuring time at end of code block.
Args:
label (str): Label identifier for current code block.
Returns:
float: Measured time in seconds.
Raises:
Exception, KeyError: If there is not a matching :meth:`tic`.
"""
# Error if no tic pair (e.g., toc() after instance creation)
# _last_tic -> _timer
if self._last_tic is self._timer:
raise Exception("'toc()' has no matching 'tic()'")
# Measure time at the soonest moment possible to minimize noise from
# internal operations.
self._timer.time()
# Stack is not empty so there is a matching tic
if self._timer_stack:
# Last item or item specified by label
stack_idx = -1
# Label-paired timer.
# Label can be "", so explicitly check against None.
if label is not None:
# Find index of last timer in stack with matching label
for i, t in enumerate(self._timer_stack[::-1]):
if label == t.label:
stack_idx = len(self._timer_stack) - i - 1
break
else:
raise KeyError("'{}' has no matching label".format(label))
# Calculate time elapsed
t_first = self._timer_stack.pop(stack_idx)
t_diff = self._timer - t_first
# Add extra attributes, use a negative sentinel value
t_diff.relative_percent = -1.
t_diff.cumulative_seconds = -1.
t_diff.cumulative_minutes = -1.
t_diff.cumulative_percent = -1.
# Place time in corresponding position
idx = [i for i, v in enumerate(self._timers)
if v is None][stack_idx]
self._timers[idx] = t_diff
# Empty stack, use _last_tic -> timer from most recent tic
else:
t_diff = self._timer - self._last_tic
# Add extra attributes, use a negative sentinel value
t_diff.relative_percent = -1.
t_diff.cumulative_seconds = -1.
t_diff.cumulative_minutes = -1.
t_diff.cumulative_percent = -1.
# Use label.
# Label can be "", so explicitly check against None.
if label is not None:
t_diff.label = label
self._timers.append(t_diff)
# Update cumulative and percent times when all timers have completed
if all(self._timers):
self._update_cumulative_and_percent()
return t_diff.seconds
def print_info(self):
self._timer.print_info()
def remove(self, *keys):
"""Remove time(s) of completed code blocks.
Args:
keys (str): Keys to select times for removal based on the label
used in :meth:`tic`.
"""
for key in keys:
for t in filter(None, self._timers[:]):
if key == t.label:
self._timers.remove(t)
def clear(self):
self._timers = []
self._timer_stack = []
self._timer.clear()
self._first_tic = None
self._last_tic = self._timer
if self._prof:
self._prof.clear()
self._prof = None
def reset(self):
self.name = None
self._timer.reset()
self._timer.clock_name = type(self).DEFAULT_CLOCK_NAME
self.clear()
def dump_times(self, filename=None, mode='w'):
"""Write timing results to a CSV file.
If *filename* is provided, then it will be used as the filename.
Otherwise :attr:`name` is used if non-empty, else the default filename
is used. The suffix and extension *-times.csv* are appended only if
filename does not already has an extension. Using *mode* the file can
be overwritten or appended with timing data.
.. _`open`: https://docs.python.org/3/library/functions.html#open
Args:
filename (str, optional): Name of file.
mode (str, optional): Mode flag passed to `open`_. Default is *w*.
"""
if not filename:
if not self.name:
raise ValueError("either provide an explicit filename or set"
" 'name' attribute")
filename = self.name
if not os.path.splitext(filename)[1]:
filename += '-times.csv'
with open(filename, mode) as fd:
fd.write(','.join(type(self)._LABELS))
fd.write('\n')
for t in self._filter_timers():
data = (t.label, t.seconds, t.minutes, t.relative_percent,
t.cumulative_seconds, t.cumulative_minutes,
t.cumulative_percent)
fd.write(','.join((str(datum) for datum in data)))
fd.write('\n')
def stats(self, label=None):
"""Compute total, min, max, and average stats for timings.
Note:
* *label* is compared as a word-bounded expression.
Args:
label (str, iterable, None, optional): String used to match timer
labels to select. To use as a regular expression, *label*
has to be a raw string. If None, then all completed timings are
used.
Returns:
TimerStat, None: Stats in seconds and minutes (`namedtuple`_).
"""
timers = list(self._filter_timers())
# Label can | |
of the words of the context/question and the
# number of words in the context/question, and call a Multi-Timescale LSTM
# further reference on Multi-Timescale LSTM can be found in Learned in Translation: Contextualized Word Vectors
# at https://arxiv.org/pdf/1708.00107.pdf
# We feed GloVe(cw_idxs) and GloVe(qw_idxs) to a standard, two-layer, bidirectional, long short-term memory network
# (cf. <NAME>, 2005) that we refer to as an MT-LSTM to indicate that it is this same two-layer BiLSTM
# that we later transfer as a pretrained encoder. The MT-LSTM is used to compute a sequence of hidden states
# MT-LSTM(GloVe(cw_idxs)) and MT-LSTM(GloVe(qw_idxs)).
# used as embeddings
_, c_c = self.cove(cw_idxs, c_len) # with torch.size([batch_size, length of the context in words for the longest context in the batch], 600)
_, c_q = self.cove(qw_idxs, q_len) # with torch.size([batch_size, length of the question in words for the longest question in the batch], 600)
if self.args.drop_prob > 0:
c_c = F.dropout(c_c, p=self.args.drop_prob, training=self.training)
c_q = F.dropout(c_q, p=self.args.drop_prob, training=self.training)
# part-of-speech (POS) embeddings: as descrived on page 5 of the FusionNet paper (Huang et al.), in the SQuaD task
# we also include 12 dim POS embedding
# See also: https://arxiv.org/pdf/1704.00051.pdf Reading Wikipedia to Answer Open-Domain Questions
c_pos_emb = self.pos_embedding(cw_pos) # with torch.size([batch_size, length of the context in words for the longest context in the batch], 12)
# named entity recognition (NER) embeddings
c_ner_emb = self.ner_embedding(cw_ner) # with torch.size([batch_size, length of the context in words for the longest context in the batch], 8)
# Fully-Aware Multi-level Fusion: Word-level
# Note that the parameter q_mask is equal to 1 if padded and 0 otherwise.
# self.full_attn_word_level calls layers.FullyAwareAttention
# In multi-level fusion, we separetely consider fusing word-level and higher level. Word-level fusion informs C
# about what kind of words are in Q. For this component, we follow the approach of Chen et al. 2017a.
# In the initial test of the multi-level fusion, we used a parameter 1, meaning one level only
# in the the next tests we also used 6 (waiting for result) and also intending to use 3 levels of fusions
g_hat_c = self.full_attn_word_level(g_c, g_q,
g_q, q_mask) #with torch.size([batch_size, length of the context in words for the longest context in the batch], 300)
# Creation of input vectors w_c and w_q (page 5 of fusionnet article).
# ---------------------------------------------------------------------
# Create vector an input vector w_c by concatening g_c, c_c, c_pos_emb, c_ner_emb, cw_freq.unsqueeze(-1)
# g_c, c_c, c_pos_emb, c_ner_emb have the same dimensions at 0 and 1.
# cw_freq has dimension torch.Size([64, 305]), and is converted to dimension torch.Size([64, 305,1])
# using the command cw_freq.unsqueeze(-1)
# https://pytorch.org/docs/stable/torch.html#torch.unsqueeze
# unsqueeze: Returns a new tensor with a dimension of size one inserted at the specified position.
# The returned tensor shares the same underlying data with this tensor.
w_c = torch.cat([g_c, c_c, c_pos_emb, c_ner_emb, cw_freq.unsqueeze(-1)], 2)
# create an input vector w_q by concatening g_q, c_q along the dimension 2
# therefore concatenate two vectors: g_q of size torch.Size([batch size, max nb words question, 300]) and
# torch.Size([batch size, max nb words question,600]) into a vector w_q of size
# torch.Size([batch size, max nb words question, 900])
w_q = torch.cat([g_q, c_q], 2)
# Reading (page 6 of FusionNet article)
# In the reading component, we use a separate bidirectional LSTm (BiLSTM) calling layers.MultiLevelRNNEncoder
# to form low-level and high-level concepts for C and Q. Hence low-level and high-level concepts are created
# for each word.
h_c_l, h_c_h = self.reading_context(torch.cat([w_c, cqw_extra, g_hat_c], 2),
c_mask)
h_q_l, h_q_h = self.reading_question(w_q, q_mask)
# Understanding vector for the question
# Question Understanding: In the Question Understanding component, we apply a new BiLSTM
# layers.MultiLevelRNNEncoder taking in both h_q_l and h_q_h to obtain the final question representation U_q:
U_q = self.final_ques(torch.cat([h_q_l, h_q_h], 2),
q_mask)[0]
# Fully-Aware Multi-level Fusion: Higher-level. Explanation on page 6 of the FusionNet article
# This component fuses all higher-level informationin the question Q to the context C through fully-aware attention
# on history-of-word. Since the proposed attention scoring function for fully-aware attention is constrained
# to be symmetric, we need to identify the common history-of-word for both C,Q. This achieved by
# concatenating the the sequences of tensors in the given dimension.
# All tensors have the same shape (except in the concatenating dimension).
# g_c and g_q are the GloVe embeddings and c_c and c_q are the CoVe embedding
# h_c_l, h_c_h, h_q_l and h_q_h are the low level and high level concepts for C and Q generated by the
# 4 BiLSTM.
HoW_c = torch.cat([g_c, c_c, h_c_l, h_c_h], 2)
HoW_q = torch.cat([g_q, c_q, h_q_l, h_q_h], 2)
# Using the history of words for the context HoW_c and for the question HoW_q, we compute the
# (i) low-level fusion, (ii) high-level fusion and (iii) understanding fusion using Fully Aware Attention.
# The difference in input between the 3 fusions is the use of h_q_l (low-level), h_q_h (high-level) and
# U_q (understanding).
# Low-level fusion (equation C1 in Fusionnet page 6)
# h_hat_c_l = \sum_j alpha^l_{ij} h_q_l_{j} with alpha^l_{ij} proportional to an attention function^l (HoW_c_i, HoW_q_j)
h_hat_c_l = self.full_attn_low_level(HoW_c, HoW_q,
h_q_l, q_mask)
# High-level fusion (equation C1 in Fusionnet page 6)
# h_hat_c_h = \sum_j alpha^h_{ij} h_q_h_{j} with alpha^h_{ij} proportional to an attention function^h (HoW_c_i, HoW_q_j)
h_hat_c_h = self.full_attn_high_level(HoW_c, HoW_q,
h_q_h, q_mask)
# Understanding fusion (equation C1 in Fusionnet page 6)
# h_hat_c = \sum_j alpha^u_{ij} U_q_{j} with alpha^h_{ij} proportional to an attention function^u (HoW_c_i, HoW_q_j)
u_hat_c = self.full_attn_understand(HoW_c, HoW_q,
U_q, q_mask)
# We concatenate the tensors h_c_l, h_c_h, h_hat_c_l, h_hat_c_h, u_hat_c over the dimension 2 and pass it
# as first parameter to MultiLevelRNNEncoder with the second parameter being c_mask
# As a reminder c_mask is equal to 1 if padded and 0 otherwise.
# This new BiLSTM is applied to obtain the representation for C fully fused with information in the question Q.
# (equation C2 in Fusionnet page 6)
v_c = self.fully_focused_context(torch.cat([h_c_l, h_c_h, h_hat_c_l, h_hat_c_h, u_hat_c], 2),
c_mask)[0]
# Fully-Aware Self-Boosted Fusion
# Define the new history of word by concatening the tensors g_c, c_c, h_c_l, h_c_h, h_hat_c_l, h_hat_c_h,
# u_hat_c, v_c along dimension 2:
HoW_c = torch.cat([g_c, c_c, h_c_l, h_c_h, h_hat_c_l, h_hat_c_h, u_hat_c, v_c], 2)
# The fully aware attention v_hat_c is computed through Fully Aware Attention
# (equation C3 of FusionNet article page 6).
v_hat_c = self.full_attn_history_of_word(HoW_c, HoW_c,
v_c, c_mask)
# Understanding vector for the context
# The Final context representation U_c represents the understanding vector for the context C, which are fully
# fused with with the question Q.
# U_c is obtained by applying a BiLSTM (MultiLevelRNNEncoder) on the
# concatenation of the tensors v_c, v_hat_c (equation C4 of the FusionNet article on page 6)
U_c = self.final_context(torch.cat([v_c, v_hat_c], 2), 1)[0]
# The output of FusionNet are the understanding vectors U_c and U_q for both C and Q.
# Computation of the answer span in the context:
# Summarized question understanding vector
# The single summarized question understanding vector u_q is obtained by computing
# \sum_i \beta_i U_q_{i), where \beta_i is proportional to \exp(w^T u_i^Q) and w is a trainable vector
u_q = self.summarized_final_ques(None, U_q, q_mask)
# size: torch.Size([batch_size, max nb of words question, attention_hidden_size])
# The span start P_s is computed using the summarized question understanding vector u_q
# [Need to explain how it is computed by checking the code in layers.py]
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
P_s = self.span_start(u_q, U_c, c_mask)
# size: torch.Size([batch_size, max nb of words question])
# Computation of v_q:
# To use the information of the span start, when we attend for the span end, we combine the context
# understanding vector for the span start with u_q through a FRU (Cho et al., 2014)
# v_q = GRU (u_q, \sum_i P_S_{i} u_c_{i}), where u_q is taken as teh memory and \sum_i | |
#region Imports
# HTTP requests and url parsing
import requests
import aiohttp
import webbrowser
import http.server
from urllib.parse import urlparse, parse_qs
# Data formats
import json
import yaml
from helpers.ArgHandler import Get_Args
# Logging
from logger.AppLogger import build_logger
# Build the logger object, using the argument for verbosity as the setting for debug log level
logger = build_logger(logger_name="To Do Widget", debug=Get_Args().verbose)
# Scheduling and threading
import atexit
import time
from datetime import datetime
import asyncio
import threading
from kivy.clock import Clock
from functools import partial
# Integration
from integrations.ToDoIntegrations.Task import TaskItem
# MSAL authentication
from msal import PublicClientApplication, SerializableTokenCache
from requests_oauthlib import OAuth2Session
import os
# Kivy
from kivy.properties import ObjectProperty, StringProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleview.datamodel import RecycleDataModel
# Settings
from dynaconf_settings import settings
#endregion
# The authorization code returned by Microsoft
# This needs to be global to allow the request handler to obtain it and pass it back to Aquire_Auth_Code()
authorization_response = None
# Note that this class needs to be at the top of this file.
class RequestHandler(http.server.SimpleHTTPRequestHandler):
'''
Request handler to parse urls during a get request and strip out authorization code
as a string. It also sets the global autorization_response variable to the authorization code.
'''
def do_GET(self):
global authorization_response
query_components = parse_qs(urlparse(self.path).query)
code = str(query_components['code'])
authorization_response = code[2:len(code)-2]
if self.path == '/':
self.path = 'index.html'
logger.debug("Got response from HTTP server.")
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class ToDoWidget(RecycleView):
'''
Handle all transactions for the Microsoft To Do integration.
'''
# Load the authentication_settings.yml file
# Note: this file is not tracked by github, so it will need to be created before running
# stream = open('integrations/ToDoIntegrations/microsoft_authentication_settings.yml', 'r')
# settings = yaml.safe_load(stream)
# if settings:
# logger.debug("Setting successfully loaded ")
# The instance of the Public Client Application from MSAL. This is assigned in __init__
app = None
# This stores all the actual task data in dictionaries
to_do_tasks = ListProperty()
data = []
delta_links = {}
# The settings required for msal to properly authenticate the user
msal = {
'authority': "https://login.microsoftonline.com/common",
'authorize_endpoint': "/oauth2/v2.0/authorize",
'redirect_uri': "http://localhost:1080",
'token_endpoint': "/oauth2/v2.0/token",
'scopes': ["user.read", "Tasks.ReadWrite"],
'headers': "",
# The access token aquired in Aquire_Access_Token. This is a class variable for the cases
# where there is an attempt to make a request again in the short time this token is valid for.
# If that should happen, storing the token like this minimalizes the amount of requests needed
# to Microsoft's servers
'access_token': None
}
sign_in_label_text = StringProperty()
sign_in_button = ObjectProperty()
def __init__(self, **kwargs):
super(ToDoWidget, self).__init__(**kwargs)
# This is necessary because Azure does not guarantee
# the return of scopes in the same case and order as requested
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
os.environ['OAUTHLIB_IGNORE_SCOPE_CHANGE'] = '1'
cache = self.Deserialize_Cache("integrations/ToDoIntegrations/microsoft_cache.bin")
# Instantiate the Public Client App
self.app = PublicClientApplication(settings.To_Do_Widget.get('app_id', '565467a5-8f81-4e12-8c8d-e6ec0a0c4290'), authority=self.msal['authority'], token_cache=cache)
# If an account exists in cache, get it now. If not, don't do anything and let user sign in on settings screen.
if(self.app.get_accounts()):
setup_thread = threading.Thread(target=self.Setup_Tasks)
setup_thread.start()
Clock.schedule_interval(self.Start_Update_Loop, settings.To_Do_Widget.get('update_interval', 30))
#region MSAL
def Deserialize_Cache(self, cache_path):
'''Create the cache object, deserialize it for use, and register it to be reserialized before the application quits.'''
cache = SerializableTokenCache()
if os.path.exists(cache_path):
cache.deserialize(open(cache_path, "r").read())
logger.info("Reading MSAL token cache")
# Register a function with atexit to make sure the cache is written to just before the application terminates.
atexit.register(lambda:
open(cache_path, "w").write(cache.serialize())
# Hint: The following optional line persists only when state changed
if cache.has_state_changed else None
)
return cache
# Gets access token however it is needed and returns that token
def Aquire_Access_Token(self):
'''
If there is an access token in the cache, get it and obtain an authorization code using it.
Else run the Aquire_Auth_Code method to have the user authenticate.
'''
result = None
accounts = self.app.get_accounts()
if(self.msal['access_token'] == None):
result = self.Pull_From_Token_Cache()
if (result == None):
# Then there was no token in the cache
# Get auth code
authCode = self.Aquire_Auth_Code(self.settings)
# Aquire token from Microsoft with auth code and scopes from above
result = self.app.acquire_token_by_authorization_code(authCode, scopes=self.msal["scopes"], redirect_uri=self.msal['redirect_uri'])
# Strip down the result and convert it to a string to get the final access token
self.msal['access_token'] = str(result['access_token'])
if self.msal['access_token'] != None:
self.sign_in_label_text = "You are signed in to Microsoft"
# self.sign_in_button.visible = False # TODO: Re-enable this
return True
else:
logger.error("Something went wrong and no token was obtained")
return False
def Pull_From_Token_Cache(self):
'''If there is a vaild account in the cache, obtain it and then use it to get and return an access token.'''
accounts = self.app.get_accounts()
if accounts:
# TODO: Will implement better account management later. For now, the first account found is chosen.
return self.app.acquire_token_silent(self.msal["scopes"], account=accounts[0])
else:
logger.info("No accounts were found in the cache. Reauthenticating...")
return None
def Aquire_Auth_Code(self, settings):
'''Aquire MSAL authorization code from Microsoft.'''
# Use the global variable authorization_response instead of a local one
global authorization_response
# Begin localhost web server in a new thread to handle the get request that will come from Microsoft
webServerThread = threading.Thread(target=self.Run_Localhost_Server)
webServerThread.setDaemon(True)
webServerThread.start()
# Builds url from yml settings
authorize_url = '{0}{1}'.format(self.msal['authority'], self.msal['authorize_endpoint'])
# Begins OAuth session with app_id, scopes, and redirect_uri from yml
aadAuth = OAuth2Session(settings['app_id'], scope=self.msal['scopes'], redirect_uri=self.msal['redirect_uri'])
# Obtain final login url from the OAuth session
sign_in_url, state = aadAuth.authorization_url(authorize_url)
# Opens a web browser with the new sign in url
webbrowser.open(sign_in_url, new=2, autoraise=True)
# Waits until the web server thread closes before continuing
# This ensures that an authorization response will be returned.
webServerThread.join()
# This function returns the global authorization_response when it is not equal to None
return authorization_response
#endregion
def refresh_from_data(self, *largs, **kwargs):
# Resort the data after the update
self.to_do_tasks = self.multikeysort(self.to_do_tasks, settings.To_Do_Widget.get('task_sort_order', ['-status', 'title']))
super(ToDoWidget, self).refresh_from_data(largs, kwargs)
def Setup_Tasks(self, *kwargs):
'''
Make sure all the tasks are set up properly during initialization.
Ensure that a valid access token is present, pull all the tasks
from the API, sort them correctly, and display them on screen.
'''
start = time.time()
logger.info("Starting task update")
success = self.Aquire_Access_Token()
if success:
asyncio.run(self.Get_Tasks())
self.to_do_tasks = self.multikeysort(self.to_do_tasks, settings.To_Do_Widget.get('task_sort_order', ['-status', 'title']))
self.last_task_update = time.time()
logger.info("Finished setting up tasks during initialization")
logger.debug(f"This task setup took {time.time() - start} seconds.")
def Start_Update_Loop(self, dt):
# TODO: Consider moving this to the main python file for a unified update loop across integrations.
update_thread = threading.Thread(target=self.Update_All_Tasks)
update_thread.setDaemon(True)
update_thread.start()
def Update_All_Tasks(self):
logger.info("Starting tasks update")
# TODO Look into a more pythonic way to do this with list comprehension
# or something using async functions.
for list_id in self.delta_links:
# TODO Handle the case where the token in self.msal['headers'] may not be valid anymore
response = requests.get(self.delta_links[list_id], headers=self.msal['headers'])
if response.status_code == 200:
json_data = json.loads(response.text)
# Reassign the new delta link provided by the api
self.delta_links[list_id] = json_data['@odata.deltaLink']
if json_data['value']:
self.Update_Given_Tasks(json_data['value'], list_id)
elif response.status_code == 410:
logger.warning(f"The entire dataset for list id '{list_id}' must be redownloaded")
else:
logger.error(f"Something went wrong checking for updated tasks on list id '{list_id}'")
def Update_Given_Tasks(self, tasks_to_update, list_id):
for task in tasks_to_update:
# I can use next here since the task id's are going to be unique coming from Microsoft
# Return the index of an existing task in to_do_tasks, or None if 'task' is not in the list
local_task_index = next((i for i, item in enumerate(self.to_do_tasks) if item['id']==task['id']), None)
if '@removed' in task:
logger.info(f"Removed task titled '{self.to_do_tasks[local_task_index]['title']}'")
removed_task = self.to_do_tasks.pop(local_task_index)
continue
if task['status'] == "completed":
# TODO in-app toggle for this
task['isVisible'] = False
else:
task['isVisible'] = settings.To_Do_Widget.get('incomplete_task_visibility', True)
task['list_id'] = list_id
# TODO There is a small chance here that the local_task_index changes between the time I obtain it and reassign the task back
# Make sure to fix this issue!
if local_task_index != None:
logger.info(f"Updating existing task titled '{task['title']}'")
self.to_do_tasks[local_task_index] = task
else:
logger.info(f"Adding new task titled '{task['title']}'")
self.to_do_tasks.append(task)
self.refresh_from_data()
def Get_Task_Lists(self):
'''
Get To Do task lists from Microsoft's graph API.
NOTE: This is usually only run by the Get_Tasks method, there should
be no need to get task lists without pulling the tasks from them.
'''
| |
<reponame>JosieHong/VOS_with_Seg-Siam
'''
@Author: JosieHong
@Date: 2020-04-26 12:40:11
@LastEditAuthor: JosieHong
LastEditTime: 2021-01-12 22:52:02
'''
import os.path as osp
import warnings
import mmcv
import numpy as np
from imagecorruptions import corrupt
from mmcv.parallel import DataContainer as DC
import torch
from .utils import random_scale, to_tensor
from .registry import DATASETS
from .coco_seg import Coco_Seg_Dataset, INF
@DATASETS.register_module
class TSD_MAX_Seg_Dataset(Coco_Seg_Dataset):
CLASSES = ('Section8', 'Section6', 'Section63', 'Section33', 'Section11',
'Section2', 'Section48', 'Section13', 'Section64', 'Section4',
'Section75')
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
refer_scale=(127,127),
num_polar=36,
multiscale_mode='value',
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True,
with_semantic_seg=False,
seg_prefix=None,
seg_scale_factor=1,
extra_aug=None,
resize_keep_ratio=True,
corruption=None,
corruption_severity=1,
skip_img_without_anno=True,
test_mode=False,
strides=[8, 16, 32, 64, 128],
regress_ranges=[(-1, 64), (64, 128),
(128, 256), (256, 512), (512, 1e8)]):
super(TSD_MAX_Seg_Dataset, self).__init__(ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode,
size_divisor,
proposal_file,
num_max_proposals,
flip_ratio,
with_mask,
with_crowd,
with_label,
with_semantic_seg,
seg_prefix,
seg_scale_factor,
extra_aug,
resize_keep_ratio,
corruption,
corruption_severity,
skip_img_without_anno,
test_mode)
self.refer_scale = refer_scale
self.strides = strides
self.regress_ranges = regress_ranges
assert num_polar in [36, 72]
self.num_polar = num_polar
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix[:-11], img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 and self.skip_img_without_anno:
warnings.warn('Skip the image "%s" that has no valid gt bbox' %
osp.join(self.img_prefix, img_info['filename']))
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix[:-11], refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix,
img_info['filename'].replace('jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack([proposals, scores
]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)),
img_refer=DC(to_tensor(img_refer), stack=True))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
#--------------------offline ray label generation-----------------------------
self.center_sample = True
self.use_mask_center = True
self.radius = 1.5
featmap_sizes = self.get_featmap_size(pad_shape)
# featmap_sizes: [[32, 32], [16, 16], [8, 8]]
num_levels = len(self.strides)
all_level_points = self.get_points(featmap_sizes)
# level 0 points: torch.Size([1024, 2])
# level 1 points: torch.Size([256, 2])
# level 2 points: torch.Size([64, 2])
self.num_points_per_level = [i.size()[0] for i in all_level_points]
expanded_regress_ranges = [
all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
all_level_points[i]) for i in range(num_levels)
]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, 0)
gt_masks = gt_masks[:len(gt_bboxes)]
gt_bboxes = torch.Tensor(gt_bboxes)
gt_labels = torch.Tensor(gt_labels)
_labels, _bbox_targets, _mask_targets = self.polar_target_single(
gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges, self.num_polar)
data['_gt_labels'] = DC(_labels)
data['_gt_bboxes'] = DC(_bbox_targets)
data['_gt_masks'] = DC(_mask_targets)
#--------------------offline ray label generation-----------------------------
return data
def get_featmap_size(self, shape):
h,w = shape[:2]
featmap_sizes = []
for i in self.strides:
featmap_sizes.append([int(h / i)+1, int(w / i)+1])
return featmap_sizes
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix[:-11], img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix[:-11], refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
def prepare_single(img, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack([_proposal, score
]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
img_refers = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
data = dict(img=imgs,
img_meta=img_metas,
img_refer=img_refers)
if self.proposals is not None:
data['proposals'] = proposals
return data
# fit different polar nunbers
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, points, regress_ranges, num_polar):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
#xs ys 分别是points的x y坐标
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1) #feature map上所有点对于gtbox的上下左右距离 [num_pix, num_gt, 4]
#mask targets 也按照这种写 同时labels 得从bbox中心修改成mask 重心
mask_centers = []
mask_contours = []
#第一步 先算重心 return [num_gt, 2]
for mask in gt_masks:
cnt, contour = self.get_single_centerpoint(mask)
contour = contour[0]
contour = torch.Tensor(contour).float()
y, x = cnt
mask_centers.append([x,y])
mask_contours.append(contour)
mask_centers = torch.Tensor(mask_centers).float()
# 把mask_centers assign到不同的层上,根据regress_range和重心的位置
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
# condition1: inside a gt bbox
# add center sample
if self.center_sample:
if self.use_mask_center:
inside_gt_bbox_mask = self.get_mask_sample_region(gt_bboxes,
mask_centers,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0 #[num_gt] 介于0-80
bbox_targets = bbox_targets[range(num_points), min_area_inds]
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, num_polar).float()
pos_mask_ids = min_area_inds[pos_inds]
for p,id in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = mask_contours[id]
# SiamPolar: interpolate
new_contour = []
contour_length = len(pos_mask_contour)
for i in range(contour_length):
new_contour.append(pos_mask_contour[i])
# new_contour.append((3*pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/4)
new_contour.append((pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/2)
# new_contour.append((pos_mask_contour[i]+3*pos_mask_contour[(i+1)%contour_length])/4)
new_pos_mask_contour = torch.cat(new_contour, dim=0).unsqueeze(1)
# print(pos_mask_contour.size())
# print(new_pos_mask_contour.size())
# print(new_pos_mask_contour)
# exit()
dists, coords = self.get_coordinates(x, y, new_pos_mask_contour, num_polar)
mask_targets[p] = dists
return labels, bbox_targets, mask_targets
def get_coordinates(self, c_x, c_y, pos_mask_contour, num_polar):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
# generate num_polar angles
new_coordinate = {}
step_size = int(360/num_polar)
for i in range(0, 360, step_size):
| |
import os
import pprint
import re
import click
import yaml
from flask.cli import FlaskGroup
from networkx import draw, has_path
from g2p import make_g2p
from g2p._version import VERSION
from g2p.api import update_docs
from g2p.app import APP, network_to_echart
from g2p.exceptions import MappingMissing
from g2p.log import LOGGER
from g2p.mappings import Mapping
from g2p.mappings.create_fallback_mapping import (
DUMMY_INVENTORY,
align_to_dummy_fallback,
)
from g2p.mappings.create_ipa_mapping import create_mapping, create_multi_mapping
from g2p.mappings.langs import LANGS_NETWORK, MAPPINGS_AVAILABLE, cache_langs
from g2p.mappings.langs.utils import check_ipa_known_segs
from g2p.mappings.utils import is_ipa, is_xsampa, load_mapping_from_path, normalize
from g2p.transducer import Transducer
PRINTER = pprint.PrettyPrinter(indent=4)
def create_app():
"""Return the flask app for g2p"""
return APP
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def parse_from_or_to_lang_spec(lang_spec):
"""Parse a value given to g2p generate-mapping --from or --to.
See the documentation of generate_mapping() for the syntax of lang_spec.
Returns list[tuple[Mapping, io (str)]]:
the mapping(s) lang_spec refers to, and "in" or "out", to indicate if the
relevant inventory is the mapping's in_lang or out_lang.
Raises:
click.BadParameter if lang_spec is not valid
"""
mapping_spec, _, in_or_out = lang_spec.partition("[")
in_or_out.rstrip("]")
in_lang, _, out_lang = mapping_spec.partition("_to_")
if out_lang:
try:
mapping = Mapping(in_lang=in_lang, out_lang=out_lang)
except MappingMissing as e:
raise click.BadParameter(
f'Cannot find mapping {in_lang}->{out_lang} for --from or --to spec "{lang_spec}": {e}'
)
if not in_or_out:
if is_ipa(out_lang):
in_or_out = "out"
elif is_ipa(in_lang):
in_or_out = "in"
else:
raise click.BadParameter(
f'Cannot guess in/out for IPA lang spec "{lang_spec}" because neither {in_lang} '
f'nor {out_lang} is IPA. Specify "[in]" or "[out]" if you are sure it is correct.'
)
if in_or_out not in ("in", "out"):
raise click.BadParameter(
f'Invalid IPA language specification "{lang_spec}": only "in" or "out" '
"is allowed in square brackets, to disambiguate between input or output "
"inventory when necessary."
)
return [(mapping, in_or_out)]
else:
if in_or_out:
raise click.BadParameter(
f'Bad IPA lang spec "{lang_spec}": the [in]/[out] qualifier is only '
"supported with the full in-lang_to_out-lang[[in]|[out]] syntax."
)
if in_lang == "eng":
mapping = Mapping(in_lang="eng-ipa", out_lang="eng-arpabet")
in_or_out = "in"
return [(mapping, in_or_out)]
else:
out_lang = in_lang + "-ipa"
# check_ipa_known_segs([out_lang]) # this outputs a lot of spurious noise...
mappings = [
(Mapping(in_lang=m["in_lang"], out_lang=m["out_lang"]), "out")
for m in MAPPINGS_AVAILABLE
if m["out_lang"] == out_lang and not is_ipa(m["in_lang"])
]
if not mappings:
raise click.BadParameter(f'No IPA mappings found for "{lang_spec}".')
return mappings
@click.version_option(version=VERSION, prog_name="g2p")
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS)
def cli():
"""Management script for G2P"""
@click.option(
"--out-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help='Output results in DIRECTORY instead of the global "generated" directory.',
)
@click.option(
"--to",
"to_langs",
default=None,
help='Colon- or comma-separated list of "to" languages in from/to mode',
)
@click.option(
"--from",
"from_langs",
default=None,
help='Colon- or comma-separated list of "from" languages in from/to mode',
)
@click.option(
"--list-dummy", default=False, is_flag=True, help="List the dummy phone inventory."
)
@click.option(
"--dummy/--no-dummy",
default=False,
help="Generate dummy fallback mapping to minimalist phone inventory.",
)
@click.option(
"--ipa/--no-ipa", default=False, help="Generate mapping from LANG-ipa to eng-ipa."
)
@click.option(
"--merge/--no-merge",
default=False,
help="Merge multiple mappings together, in which case IN_LANG is a colon-seperated list and OUT_LANG is required.",
)
@click.argument(
"out_lang", required=False, default=None, type=str,
)
@click.argument(
"in_lang", required=False, default=None, type=str,
)
@cli.command(
context_settings=CONTEXT_SETTINGS,
short_help="Generate English IPA or dummy mapping.",
)
def generate_mapping(
in_lang, out_lang, dummy, ipa, list_dummy, out_dir, merge, from_langs, to_langs
):
""" Generate a new mapping from existing mappings in the g2p system.
This command has different modes of operation.
Standard mode:
g2p generate-mapping [--dummy|--ipa] IN_LANG [OUT_LANG]
For specified IN_LANG, generate a mapping from IN_LANG-ipa to eng-ipa,
or from IN_LANG-ipa to a dummy minimalist phone inventory. This assumes
the mapping IN_LANG -> IN_LANG-ipa exists and creates a mapping from its
output inventory.
To generate a mapping from IN_LANG-ipa to eng-ipa from a mapping
following a different patterns, e.g., from crl-equiv -> crl-ipa, specify
both IN_LANG (crl-equiv in this example) and OUT_LANG (crl-ipa in this
example).
\b
Sample usage:
Generate Algonquin IPA to English IPA from alq -> alq-ipa:
g2p generate-mapping --ipa alq
Generate Mohawk IPA to English IPA from moh-equiv -> moh-ipa:
g2p generate-mapping --ipa moh-equiv moh-ipa
Generate Michif IPA to English IPA from the union of crg-dv -> crg-ipa
and crg-tmd -> crg-ipa:
g2p generate-mapping --ipa --merge crg-dv:crg-tmd crg-ipa
List the dummy inventory used by --dummy:
g2p generate-mapping --list-dummy
From/to IPA mode:
\b
g2p generate-mapping --from FROM_L1 --to TO_L1
g2p generate-mapping --from FROM_L1:FROM_L2:... --to TO_L1:TO_L2:...
Generate an IPA mapping from the union of FROM_L1-ipa, FROM-L2-ipa, etc to
the union of TO_L1-ipa, TO-L2-ipa, etc. One or more from/to language
code(s) can be specified in colon- or comma-separated lists.
\b
Sample usage:
Generate a mapping from kwk-ipa to moh-ipa based on all mappings into
kwk-ipa and moh-ipa:
g2p generate-mapping --from kwk --to moh
Generate a mapping from eng-ipa to crg-ipa based only on crg-dv -> crg-ipa:
g2p generate-mapping --from eng --to crg-dv_to_crg-ipa
Generate a mapping from kwk-ipa to moh-ipa+crg-ipa+eng-ipa based on
all mappings into kwk-ipa (from side) and the union of all mappings
into moh-ipa and crg-ipa plus eng-ipa_to_eng-arpabet (to side):
g2p generate-mapping --from kwk --to moh:crg:eng
Full syntax for specifying FROM_Ln and TO_Ln:
\b
lang (i.e., 3-letter code):
- If there is only one mapping into lang-ipa, "lang" refers to the
output of that mapping, e.g., "fra" means "fra_to_fra-ipa[out]".
- If there are several mappings into lang-ipa, "lang" refers to the
union of the outputs of those mappings, e.g., "moh" means the union
of "moh-equiv_to_moh-ipa[out]" and "moh-festival_to_moh-ipa[out]".
- It is an error if there are no mappings into lang-ipa.
- Only mappings from non-IPA to IPA are considered (i.e., IPA-to-IPA
mappings created by this command will not be included: use the
longer syntax below if you want to use them).
- Special case: "eng" refers to "eng-ipa_to_eng-arpabet[in]".
\b
in-lang_to_out-lang[[in]|[out]]:
- This expanded syntax is used to avoid the union when it is not
desired, e.g., "moh-equiv_to_moh-ipa" refers only to
"moh-equiv_to_moh-ipa,out" rather than the union "moh" represents.
- If out-lang is IPA, the output inventory is used; else if in-lang
is IPA, the input inventory is used; it is an error if neither
language is IPA.
- Specify "[in]" or "[out]" to override the above default.
- "_to_" is the joiner used to specify "the mapping from 'in-lang' to
'out-lang'" in the g2p network, regardless of the name of the file
it is stored in.
If you just modified or created the mappings from which the new mapping is
to be generated, don't forget to call "g2p update" first, so that "g2p
generate-mapping" can see the latest version.
Call "g2p update" again after calling "g2p generate-mapping" to compile
the newly generated mapping and make it available.
Note: exactly one of --ipa, --dummy, --from/--to, or --list-dummy is
required.
You can list available mappings with "g2p doctor --list-ipa", or by
visiting http://g2p-studio.herokuapp.com/api/v1/langs .
"""
# Make sure only one mode was specified on the command line
mode_count = (
(1 if ipa else 0)
+ (1 if dummy else 0)
+ (1 if list_dummy else 0)
+ (1 if (from_langs or to_langs) else 0)
)
if mode_count == 0:
raise click.UsageError(
"Nothing to do! Please specify at least one of --ipa, --dummy, "
"--list-dummy, or --from/--to."
)
if mode_count > 1:
raise click.UsageError(
"Multiple modes selected. Choose only one of --ipa, --dummy, "
"--list-dummy, or --from/--to."
)
if list_dummy or from_langs is not None or to_langs is not None:
if in_lang is not None:
raise click.UsageError(
"IN_LANG is not allowed with --list-dummy or --from/--too",
)
if from_langs is not None or to_langs is not None:
if from_langs is None or to_langs is None:
raise click.UsageError("--from and --to must be used together")
if merge:
if not ipa and not dummy:
raise click.UsageError("--merge is only compatible with --ipa and --dummy.")
if out_lang is None:
raise click.UsageError("OUT_LANG is required with --merge.")
if out_dir and not os.path.isdir(out_dir):
raise click.BadParameter(
f'Output directory "{out_dir}" does not exist. Cannot write mapping.',
param_hint="--out-dir",
)
if list_dummy:
# --list-dummy mode
print("Dummy phone inventory: {}".format(DUMMY_INVENTORY))
elif ipa or dummy:
# --ipa and --dummy modes
if in_lang is None:
raise click.UsageError("Missing argument 'IN_LANG'.")
if merge:
in_langs = in_lang.split(":")
else:
in_langs = [in_lang]
in_lang_choices = [
x for x in LANGS_NETWORK.nodes if not is_ipa(x) and not is_xsampa(x)
]
for l in in_langs:
if l not in in_lang_choices:
raise click.UsageError(
f'Invalid value for IN_LANG: "{l}".\n'
"IN_LANG must be a non-IPA language code with an existing IPA mapping, "
f"i.e., one of:\n{', | |
import logging
import math
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError, transaction
from django.test import Client
from django.utils import timezone
from django.conf import settings
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability, AmbulanceUpdate
from emstrack.latlon import calculate_orientation
from ambulance.serializers import AmbulanceSerializer, AmbulanceUpdateSerializer, AmbulanceUpdateCompactSerializer
from login.models import Client as loginClient
from emstrack.tests.util import date2iso, point2str, dict2point
from login.tests.setup_data import TestSetup
logger = logging.getLogger(__name__)
class TestAmbulanceGetList(TestSetup):
def test_ambulance_serializer(self):
# test AmbulanceSerializer
for a in (self.a1, self.a2, self.a3):
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': AmbulanceStatus.UK.name,
'orientation': a.orientation,
'location': point2str(a.location),
'timestamp': date2iso(a.timestamp),
'client_id': None,
'updated_by': a.updated_by.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
def test_ambulance_get_viewset(self):
# instantiate client
client = Client()
# login as admin
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
# retrieve any ambulance
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a1.id)).data
self.assertDictEqual(result, answer)
# retrieve any ambulance
response = client.get('/en/api/ambulance/{}/'.format(str(self.a2.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a2.id)).data
self.assertDictEqual(result, answer)
# retrieve any ambulance
response = client.get('/en/api/ambulance/{}/'.format(str(self.a3.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a3.id)).data
self.assertDictEqual(result, answer)
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='<PASSWORD>')
# retrieve own
response = client.get('/en/api/ambulance/{}/'.format(str(self.a3.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a3.id)).data
self.assertDictEqual(result, answer)
# retrieve someone else's
response = client.get('/en/api/ambulance/{}/'.format(str(self.a2.id)),
follow=True)
self.assertEqual(response.status_code, 404)
# can't read
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow=True)
self.assertEqual(response.status_code, 404)
# logout
client.logout()
# login as testuser1
client.login(username='testuser1', password='<PASSWORD>')
# retrieve someone else's
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow=True)
self.assertEqual(response.status_code, 404)
# retrieve someone else's
response = client.get('/en/api/ambulance/{}/'.format(str(self.a2.id)),
follow=True)
self.assertEqual(response.status_code, 404)
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow=True)
self.assertEqual(response.status_code, 404)
# logout
client.logout()
def test_ambulance_get_list_viewset(self):
# instantiate client
client = Client()
# login as admin
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
# retrieve ambulances
response = client.get('/en/api/ambulance/',
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = [AmbulanceSerializer(self.a1).data,
AmbulanceSerializer(self.a2).data,
AmbulanceSerializer(self.a3).data]
self.assertCountEqual(result, answer)
# logout
client.logout()
# login as testuser1
client.login(username='testuser1', password='<PASSWORD>')
# retrieve ambulances
response = client.get('/en/api/ambulance/',
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = []
self.assertCountEqual(result, answer)
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='<PASSWORD>')
# retrieve ambulances
response = client.get('/en/api/ambulance/',
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = [ # AmbulanceSerializer(self.a1).data, # can't read
AmbulanceSerializer(Ambulance.objects.get(id=self.a3.id)).data]
self.assertCountEqual(result, answer)
# logout
client.logout()
class TestAmbulanceUpdate(TestSetup):
def test_ambulance_update_serializer(self):
# superuser first
# Update ambulance status
a = self.a1
user = self.u1
status = AmbulanceStatus.AH.name
serializer = AmbulanceSerializer(a,
data={
'status': status,
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': status,
'orientation': a.orientation,
'location': point2str(a.location),
'client_id': None,
'timestamp': date2iso(a.timestamp),
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# Update ambulance location
timestamp = timezone.now()
location = {'latitude': -2., 'longitude': 7.}
serializer = AmbulanceSerializer(a,
data={
'location': location,
'timestamp': timestamp,
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'client_id': None,
'timestamp': date2iso(timestamp),
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# error update timestamp without location or status
serializer = AmbulanceSerializer(a,
data={
'timestamp': timestamp,
}, partial=True)
self.assertEqual(serializer.is_valid(), False)
# regular authorized user
# Update ambulance status
a = self.a3
user = self.u3
status = AmbulanceStatus.AH.name
serializer = AmbulanceSerializer(a,
data={
'status': status,
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': status,
'orientation': a.orientation,
'location': point2str(a.location),
'client_id': None,
'timestamp': date2iso(a.timestamp),
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# Update ambulance location
timestamp = timezone.now()
location = {'latitude': -2., 'longitude': 7.}
serializer = AmbulanceSerializer(a,
data={
'location': location,
'timestamp': timestamp
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'client_id': None,
'timestamp': date2iso(timestamp),
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# error update timestamp without location or status
serializer = AmbulanceSerializer(a,
data={
'timestamp': timestamp,
}, partial=True)
self.assertEqual(serializer.is_valid(), False)
# update client
client1 = loginClient(client_id='client_id_1', user_id=self.u1.id, ambulance=a)
client1.save()
client2 = loginClient(client_id='client_id_2', user_id=self.u3.id)
client2.save()
self.assertEqual(client1.ambulance, a)
self.assertEqual(client2.ambulance, None)
self.assertEqual(a.client, client1)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': client1.client_id,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# will not change
client2.ambulance = self.a2
with self.assertRaises(Exception) as raised:
with transaction.atomic():
client2.save()
self.assertEqual(PermissionDenied, type(raised.exception))
# will not change
client2.ambulance = a
with self.assertRaises(Exception) as raised:
with transaction.atomic():
client2.save()
self.assertEqual(IntegrityError, type(raised.exception))
client2 = loginClient.objects.get(client_id='client_id_2')
a = Ambulance.objects.get(id=a.id)
self.assertEqual(client1.ambulance, a)
self.assertEqual(client2.ambulance, None)
self.assertEqual(a.client, client1)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': client1.client_id,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# will reset
client1.ambulance = None
client1.save()
a = Ambulance.objects.get(id=a.id)
self.assertEqual(client1.ambulance, None)
self.assertEqual(client2.ambulance, None)
self.assertEqual(hasattr(a, 'client'), False)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': None,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# will change
client2.ambulance = a
client2.save()
self.assertEqual(client1.ambulance, None)
self.assertEqual(client2.ambulance, a)
self.assertEqual(a.client, client2)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': a.status,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': client2.client_id,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# will not change in partial update
serializer = AmbulanceSerializer(a,
data={
'status': AmbulanceStatus.OS.name
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': AmbulanceStatus.OS.name,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': client2.client_id,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
# will not change in partial update
serializer = AmbulanceSerializer(a,
data={
'status': AmbulanceStatus.PB.name,
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = AmbulanceSerializer(a)
result = {
'id': a.id,
'identifier': a.identifier,
'comment': a.comment,
'capability': a.capability,
'status': AmbulanceStatus.PB.name,
'orientation': a.orientation,
'location': point2str(location),
'timestamp': date2iso(timestamp),
'client_id': client2.client_id,
'updated_by': user.id,
'updated_on': date2iso(a.updated_on)
}
self.assertDictEqual(serializer.data, result)
def test_ambulance_patch_viewset(self):
# instantiate client
client = Client()
# login as admin
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
# retrieve ambulance
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(self.a1).data
self.assertDictEqual(result, answer)
# set status ambulance
status = AmbulanceStatus.OS.name
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a1.id)),
content_type='application/json',
data=json.dumps({
'status': status,
}),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a1.id)).data
self.assertDictEqual(result, answer)
# retrieve new ambulance status
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)),
follow = True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
self.assertEqual(result['status'], status)
# set status location
timestamp = timezone.now()
location = {'latitude': -2., 'longitude': 7.}
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a1.id)),
content_type='application/json',
data=json.dumps({
'location': point2str(location),
'timestamp': date2iso(timestamp),
}),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a1.id)).data
if math.fabs(answer['orientation'] - result['orientation']) < 1e-4:
answer['orientation'] = result['orientation']
self.assertDictEqual(result, answer)
# retrieve new ambulance location
response = client.get('/en/api/ambulance/{}/'.format(str(self.a1.id)))
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
self.assertEqual(result['status'], status)
self.assertEqual(result['location'], point2str(location))
self.assertEqual(result['timestamp'], date2iso(timestamp))
# set wrong attribute
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a1.id)),
content_type='application/json',
data=json.dumps({
'status': 'will fail'
}),
follow=True)
self.assertEqual(response.status_code, 400)
# set wrong ambulance id
response = client.patch('/en/api/ambulance/100/',
data=json.dumps({
'status': status
}),
follow=True)
self.assertEqual(response.status_code, 404)
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='<PASSWORD>')
# retrieve ambulance
response = client.get('/en/api/ambulance/{}/'.format(str(self.a3.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(self.a3).data
self.assertDictEqual(result, answer)
# set status ambulance
status = AmbulanceStatus.OS.name
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a3.id)),
content_type='application/json',
data=json.dumps({
'status': status,
}),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a3.id)).data
self.assertDictEqual(result, answer)
# retrieve new ambulance status
response = client.get('/en/api/ambulance/{}/'.format(str(self.a3.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
self.assertEqual(result['status'], status)
# set location
timestamp = timezone.now()
location = {'latitude': -2., 'longitude': 7.}
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a3.id)),
content_type='application/json',
data=json.dumps({
'location': point2str(location),
'timestamp': date2iso(timestamp),
}),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = AmbulanceSerializer(Ambulance.objects.get(id=self.a3.id)).data
if math.fabs(answer['orientation'] - result['orientation']) < 1e-4:
answer['orientation'] = result['orientation']
self.assertDictEqual(result, answer)
# retrieve new ambulance location
response = client.get('/en/api/ambulance/{}/'.format(str(self.a3.id)),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
self.assertEqual(result['status'], status)
self.assertEqual(result['location'], point2str(location))
self.assertEqual(result['timestamp'], date2iso(timestamp))
# set status ambulance
status = AmbulanceStatus.OS.name
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a1.id)),
content_type='application/json',
data=json.dumps({
'status': status,
}),
follow=True)
self.assertEqual(response.status_code, 404)
# set status ambulance
status = AmbulanceStatus.OS.name
response = client.patch('/en/api/ambulance/{}/'.format(str(self.a2.id)),
content_type='application/json',
data=json.dumps({
'status': status,
}),
follow=True)
self.assertEqual(response.status_code, 404)
# logout
| |
<reponame>dpeters1/YakFrequency
# -*- coding: utf-8 -*-
"""Class for a user on Yik Yak"""
from yaklient import helper
from yaklient import settings
from yaklient.api import notifyapi, parseapi, yikyakapi
from yaklient.objects.comment import Comment
from yaklient.objects.location import Location
from yaklient.objects.message import Message
from yaklient.objects.notification import Notification, check_notif_error
from yaklient.objects.peeklocation import PeekLocation
from yaklient.objects.yak import Yak
from yaklient.config import locationize_endpoint
from yaklient.helper import ParsingResponseError
class User(object):
"""A user who interacts with Yik Yak"""
def __init__(self, location, user_id=None):
"""Initialize user with location and user_id"""
self.basecamp_location = None
self.basecamp_name = None
self.basecamp_set = False
self.location = location
self.user_id = user_id
self.yakarma = None
# Set endpoint to nearest server if required
if settings.LOCATIONIZE_ENDPOINT:
locationize_endpoint(self.location)
# If no user ID specified, register new user
if self.user_id is None:
self.user_id = helper.generate_id(dashes=False, upper=True)
# Do not register with Parse if APPID or CLIENTKEY is missing
if None in [settings.PARSE_APPID, settings.PARSE_CLIENTKEY]:
self.register(parse_register=False)
else:
self.register()
# Log user ID in file if required
if settings.LOG_USERIDS:
with open("userids", "a") as userid_file:
userid_file.write(self.user_id + "\n")
# Update user properties from server
self.update()
def __str__(self):
"""Return user (user ID) as string"""
return "User(%s) at %s" % (self.user_id, str(self.location))
def _convert_to_comment_id(self, comment, yak):
"""Return comment_id and message_id from comment and yak"""
# Get message_id from yak
if yak is not None:
message_id = self._convert_to_message_id(yak)
# If comment is a Comment, use comment's comment_id and message_id
if isinstance(comment, Comment):
comment_id = comment.comment_id
message_id = comment.message_id
# If comment is a string, treat comment as the comment_id
elif isinstance(comment, basestring):
comment_id = comment
# Otherwise, TypeError
else:
raise TypeError("comment is not Message/string: " + str(comment))
try:
return comment_id, message_id
except NameError:
raise NameError("No Yak specified")
@staticmethod
def _convert_to_message_id(yak):
"""Return message_id from yak"""
# If yak is a Message, use yak's message_id
if isinstance(yak, Message):
message_id = yak.message_id
# If yak is a string, treat yak as the message_id
elif isinstance(yak, basestring):
message_id = yak
# Otherwise, TypeError
else:
raise TypeError("yak is not Message/string: " + str(yak))
return message_id
@staticmethod
def _convert_to_peek_id(peek):
"""Return peek_id from peek"""
# If peek is a PeekLocation, use peek's peek_id
if isinstance(peek, PeekLocation):
peek_id = peek.peek_id
# If peek is a string, treat peek as the peek_id
elif isinstance(peek, basestring):
peek_id = peek
# Otherwise, TypeError
else:
raise TypeError("peek is not Message/string" + str(peek))
return peek_id
def _get_comment_list(self, raw_data):
"""Return list of comments from raw"""
try:
return [Comment(raw, self) for raw in raw_data.json()["comments"]]
except (KeyError, ValueError):
raise ParsingResponseError("Getting comment list failed", raw_data)
def _get_notification_list(self, raw_data):
"""Return list of Yaks from raw server response"""
try:
return [Notification(raw, self) for raw in raw_data.json()["data"]]
except (KeyError, ValueError):
raise ParsingResponseError("Getting notifs failed", raw_data)
def _get_peek_location_list(self, raw_data, categ):
"""Return list of peek locations in category (categ) from raw"""
try:
return [PeekLocation(raw, self) for raw in raw_data.json()[categ]]
except (KeyError, ValueError):
raise ParsingResponseError("Getting peek list failed", raw_data)
def _get_yak_list(self, raw_data):
"""Return list of Yaks from raw"""
#try:
yaks = [Yak(raw, self) for raw in raw_data.json()["messages"]]
#except (KeyError, ValueError):
# raise ParsingResponseError("Getting Yak list failed", raw_data)
# If no Yaks, return empty list
if yaks == []:
return yaks
# If no user-created Yaks in given area, return empty list
if yaks[0].message_id == settings.NO_YAKS_MESSAGE_ID:
return []
# If too close to school, raise exception
elif yaks[0].message_id == settings.TOO_CLOSE_TO_SCHOOL_MESSAGE_ID:
raise TooCloseToSchoolException("School nearby/invalid location")
return yaks
def _validate_basecamp(self, basecamp):
"""Raise error if basecamp is True but user has not set basecamp"""
if basecamp and not self.basecamp_set:
raise NoBasecampSetError("Tried to use basecamp when not set")
def register(self, parse_register=True, yikyak_register=True):
"""Register with Parse (if parse_register is True) and Yik Yak (if
yikyak_register is True). Return True if successful, False if
unsuccessful"""
if parse_register:
parseapi.register_user(self.user_id)
if yikyak_register:
yikyakapi.register_user(self)
def update(self):
"""Update Yakarma and basecamp information"""
raw = yikyakapi.get_messages(self, self.location, basecamp=True)
# Check if too close to school
self._get_yak_list(raw)
try:
self.yakarma = int(raw.json()["yakarma"])
except (KeyError, ValueError):
raise ParsingResponseError("Getting Yakarma failed", raw)
try:
self.basecamp_set = bool(int(raw.json()["bcEligible"]))
except (KeyError, ValueError):
raise ParsingResponseError("Getting bcEligible failed", raw)
try:
latitude = float(raw.json()["bcLat"])
longitude = float(raw.json()["bcLong"])
self.basecamp_name = raw.json()["bcName"]
self.basecamp_location = Location(latitude, longitude)
except (KeyError, ValueError):
pass
def get_yak(self, yak):
"""Return Yak (or None if it does not exist)"""
message_id = self._convert_to_message_id(yak)
raw = yikyakapi.get_message(self, message_id)
try:
return self._get_yak_list(raw)[0]
except IndexError:
return None
def get_yaks(self, location=None, basecamp=False):
"""Return a list of Yaks at particular location (optionally at
basecamp)"""
# Set location if not None, otherwise set to user's location
location = location if location else self.location
self._validate_basecamp(basecamp)
raw = yikyakapi.get_messages(self, location, basecamp)
return self._get_yak_list(raw)
def get_featured_peek_locations(self):
"""Return a list of featured peek locations"""
raw = yikyakapi.get_messages(self, self.location)
return self._get_peek_location_list(raw, "featuredLocations")
def get_other_peek_locations(self):
"""Return a list of other peek locations"""
raw = yikyakapi.get_messages(self, self.location)
return self._get_peek_location_list(raw, "otherLocations")
def get_peek_yaks(self, location):
"""Return a list of Yaks at particular location/peek location"""
# If location is a PeekLocation, use get_peek_messages
if isinstance(location, PeekLocation):
raw = yikyakapi.get_peek_messages(self, location.peek_id)
# If location is a Location, use yaks
elif isinstance(location, Location):
raw = yikyakapi.yaks(self, location)
# Otherwise, TypeError
else:
raise TypeError("location is not Location or PeekLocation")
return self._get_yak_list(raw)
def get_top_yaks(self, location=None, basecamp=False):
"""Return a list of the top Yaks at location/basecamp"""
# Set location if not None, otherwise set to user's location
location = location if location else self.location
self._validate_basecamp(basecamp)
raw = yikyakapi.hot(self, location, basecamp)
return self._get_yak_list(raw)
def get_user_recent_yaks(self):
"""Return a list of recent Yaks by user"""
raw = yikyakapi.get_my_recent_yaks(self)
return self._get_yak_list(raw)
def get_user_recent_commented(self):
"""Return a list of Yaks with comments by user"""
raw = yikyakapi.get_my_recent_replies(self)
return self._get_yak_list(raw)
def get_user_top_yaks(self):
"""Return a list of top Yaks by user"""
raw = yikyakapi.get_my_tops(self)
return self._get_yak_list(raw)
def get_area_top_yaks(self):
"""Return a list of top Yaks in the area"""
raw = yikyakapi.get_area_tops(self)
return self._get_yak_list(raw)
def get_comment(self, comment, yak=None, basecamp=False):
"""Return comment on a Yak (or None if it does not exist, optionally
at basecamp)"""
self._validate_basecamp(basecamp)
(comment_id, message_id) = self._convert_to_comment_id(comment, yak)
for comment in self.get_comments(message_id, basecamp=basecamp):
if comment_id == comment.comment_id:
return comment
return None
def get_comments(self, yak, basecamp=False):
"""Return a list of comments on a Yak (optionally at basecamp)"""
self._validate_basecamp(basecamp)
message_id = self._convert_to_message_id(yak)
raw = yikyakapi.get_comments(self, message_id, basecamp)
return self._get_comment_list(raw)
def upvote(self, message, basecamp=False):
"""Upvote/unupvote a message (Yak/comment, optionally at basecamp).
Return True if successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
if isinstance(message, Yak):
return self.upvote_yak(message, basecamp=basecamp)
elif isinstance(message, Comment):
return self.upvote_comment(message, basecamp=basecamp)
else:
raise TypeError("yak is not Message")
def downvote(self, message, basecamp=False):
"""Downvote/undownvote a message (Yak/comment, optionally at basecamp).
Return True if successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
if isinstance(message, Yak):
return self.downvote_yak(message, basecamp=basecamp)
elif isinstance(message, Comment):
return self.downvote_comment(message, basecamp=basecamp)
else:
raise TypeError("yak is not Message")
def upvote_yak(self, yak, basecamp=False):
"""Upvote/unupvote a Yak (optionally at basecamp). Return True if
successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
message_id = self._convert_to_message_id(yak)
liked = self.get_yak(message_id).liked
yikyakapi.like_message(self, message_id, basecamp)
self.update()
return self.get_yak(message_id).liked != liked
def downvote_yak(self, yak, basecamp=False):
"""Downvote/undownvote a Yak (optionally at basecamp). Return True if
successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
message_id = self._convert_to_message_id(yak)
liked = self.get_yak(message_id).liked
yikyakapi.downvote_message(self, message_id, basecamp)
self.update()
return self.get_yak(message_id).liked != liked
def upvote_comment(self, comment, yak=None, basecamp=False):
"""Upvote/unupvote a comment (optionally at basecamp). Return True if
successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
(comment_id, message_id) = self._convert_to_comment_id(comment, yak)
liked = self.get_comment(message_id, comment_id).liked
yikyakapi.like_comment(self, comment_id, basecamp)
self.update()
return self.get_comment(message_id, comment_id).liked != liked
def downvote_comment(self, comment, yak=None, basecamp=False):
"""Downvote/undownvote a comment (optionally at basecamp). Return True
if successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
(comment_id, message_id) = self._convert_to_comment_id(comment, yak)
liked = self.get_comment(message_id, comment_id).liked
yikyakapi.downvote_comment(self, comment_id, basecamp)
self.update()
return self.get_comment(message_id, comment_id).liked != liked
def report(self, message, reason, basecamp=False):
"""Report a message (Yak/comment) for reason (optionally at
basecamp)"""
self._validate_basecamp(basecamp)
if isinstance(message, Yak):
self.report_yak(message, reason, basecamp=basecamp)
elif isinstance(message, Comment):
self.report_comment(message, reason, basecamp=basecamp)
else:
raise TypeError("yak is not Message")
def report_yak(self, yak, reason, basecamp=False):
"""Report a Yak for reason (optionally at basecamp)"""
self._validate_basecamp(basecamp)
message_id = self._convert_to_message_id(yak)
yikyakapi.report_message(self, message_id, reason, basecamp)
def report_comment(self, comment, reason, yak=None, basecamp=False):
"""Report a comment for reason (optionally at basecamp)"""
self._validate_basecamp(basecamp)
(comment_id, message_id) = self._convert_to_comment_id(comment, yak)
yikyakapi.report_comment(self, comment_id, message_id, reason,
basecamp)
def delete(self, message, basecamp=False):
"""Delete a message (Yak/comment, optionally at basecamp). Return True
if successful, False if unsuccessful"""
self._validate_basecamp(basecamp)
if isinstance(message, Yak):
return self.delete_yak(message, basecamp=basecamp)
elif isinstance(message, Comment):
| |
the component
comment : str; default=''
a comment for the card
"""
aecomp = AECOMP(name, list_type, lists, comment=comment)
self._add_aecomp_object(aecomp)
return aecomp
def add_aecompl(self, name: str, labels: List[str], comment: str='') -> AECOMPL:
"""
Creates an AECOMPL card
Parameters
----------
name : str
the name of the component
labels : List[str, str, ...]; str
A string of 8 characters referring to the names of other components
defined by either AECOMP or other AECOMPL entries.
comment : str; default=''
a comment for the card
"""
aecompl = AECOMPL(name, labels, comment=comment)
self._add_aecomp_object(aecompl)
return aecompl
def add_aestat(self, aestat_id: int, label: str, comment: str='') -> AESTAT:
"""
Creates an AESTAT card, which is a variable to be used in a TRIM analysis
Parameters
----------
aestat_id : int
unique id
label : str
name for the id
comment : str; default=''
a comment for the card
"""
aestat = AESTAT(aestat_id, label, comment=comment)
self._add_aestat_object(aestat)
return aestat
def add_aelink(self, aelink_id: int, label: str,
independent_labels: List[str], linking_coefficients: List[float],
comment: str='') -> AELINK:
"""
Creates an AELINK card, which defines an equation linking
AESTAT and AESURF cards
Parameters
----------
aelink_id : int
unique id
label : str
name of the dependent AESURF card
independent_labels : List[str, ..., str]
name for the independent variables (AESTATs)
linking_coefficients : List[float]
linking coefficients
comment : str; default=''
a comment for the card
"""
aelink = AELINK(aelink_id, label, independent_labels, linking_coefficients, comment=comment)
self._add_aelink_object(aelink)
return aelink
def add_aelist(self, sid: int, elements: List[int], comment: str='') -> AELIST:
"""
Creates an AELIST card, which defines the aero boxes for
an AESURF/SPLINEx.
Parameters
----------
sid : int
unique id
elements : List[int, ..., int]
list of box ids
comment : str; default=''
a comment for the card
"""
aelist = AELIST(sid, elements, comment=comment)
self._add_aelist_object(aelist)
return aelist
def add_aefact(self, sid, fractions, comment='') -> AEFACT:
"""
Creates an AEFACT card, which is used by the CAEROx / PAEROx card
to adjust the spacing of the sub-paneleing (and grid point
paneling in the case of the CAERO3).
Parameters
----------
sid : int
unique id
fractions : List[float, ..., float]
list of percentages
comment : str; default=''
a comment for the card
"""
aefact = AEFACT(sid, fractions, comment=comment)
self._add_aefact_object(aefact)
return aefact
def add_diverg(self, sid: int, nroots: int, machs: List[float], comment: str='') -> DIVERG:
"""
Creates an DIVERG card, which is used in divergence
analysis (SOL 144).
Parameters
----------
sid : int
The name
nroots : int
the number of roots
machs : List[float, ..., float]
list of Mach numbers
comment : str; default=''
a comment for the card
"""
diverg = DIVERG(sid, nroots, machs, comment=comment)
self._add_diverg_object(diverg)
return diverg
def add_csschd(self, sid, aesid, lschd, lalpha=None, lmach=None,
comment='') -> CSSCHD:
"""
Creates an CSSCHD card, which defines a specified control surface
deflection as a function of Mach and alpha (used in SOL 144/146).
Parameters
----------
sid : int
the unique id
aesid : int
the control surface (AESURF) id
lalpha : int; default=None
the angle of attack profile (AEFACT) id
lmach : int; default=None
the mach profile (AEFACT) id
lschd : int; default=None
the control surface deflection profile (AEFACT) id
comment : str; default=''
a comment for the card
"""
csschd = CSSCHD(sid, aesid, lschd, lalpha=lalpha, lmach=lmach,
comment=comment)
self._add_csschd_object(csschd)
return csschd
def add_aesurf(self, aesid, label, cid1, alid1, cid2=None, alid2=None,
eff=1.0, ldw='LDW', crefc=1.0, crefs=1.0,
pllim=-np.pi/2., pulim=np.pi/2.,
hmllim=None, hmulim=None, # hinge moment lower/upper limits
tqllim=None, tqulim=None, # TABLEDi deflection limits vs. dynamic pressure
comment='') -> AESURF:
"""
Creates an AESURF card, which defines a control surface
Parameters
----------
aesid : int
controller number
label : str
controller name
cid1 / cid2 : int / None
coordinate system id for primary/secondary control surface
alid1 / alid2 : int / None
AELIST id for primary/secondary control surface
eff : float; default=1.0
Control surface effectiveness
ldw : str; default='LDW'
Linear downwash flag; ['LDW', 'NODLW']
crefc : float; default=1.0
reference chord for the control surface
crefs : float; default=1.0
reference area for the control surface
pllim / pulim : float; default=-pi/2 / pi/2
Lower/Upper deflection limits for the control surface in radians
hmllim / hmulim : float; default=None
Lower/Upper hinge moment limits for the control surface in
force-length units
tqllim / tqulim : int; default=None
Set identification numbers of TABLEDi entries that provide the
lower/upper deflection limits for the control surface as a
function of the dynamic pressure
comment : str; default=''
a comment for the card
"""
aesurf = AESURF(aesid, label, cid1, alid1, cid2=cid2, alid2=alid2,
eff=eff, ldw=ldw, crefc=crefc, crefs=crefs,
pllim=pllim, pulim=pulim,
hmllim=hmllim, hmulim=hmulim,
tqllim=tqllim, tqulim=tqulim, comment=comment)
self._add_aesurf_object(aesurf)
return aesurf
def add_aesurfs(self, aesid, label, list1, list2, comment='') -> AESURFS:
"""
Creates an AESURFS card
Parameters
----------
aesid : int
the unique id
label : str
the AESURF name
list1 / list2 : int / None
the list (SET1) of node ids for the primary/secondary
control surface(s) on the AESURF card
comment : str; default=''
a comment for the card
"""
aesurfs = AESURFS(aesid, label, list1, list2, comment=comment)
self._add_aesurfs_object(aesurfs)
return aesurfs
def add_aeparm(self, aeparm_id, label, units, comment='') -> AEPARM:
"""
Creates an AEPARM card, which defines a new trim variable.
Parameters
----------
aeparm_id : int
the unique id
label : str
the variable name
units : str
unused by Nastran
comment : str; default=''
a comment for the card
"""
aeparm = AEPARM(aeparm_id, label, units, comment=comment)
self._add_aeparm_object(aeparm)
return aeparm
def add_dtable(self, default_values: Dict[str, float], comment='') -> DTABLE:
"""
Creates a DTABLE card
Parameters
----------
default_values : dict
key : str
the parameter name
value : float
the value
comment : str; default=''
a comment for the card
"""
dtable = DTABLE(default_values, comment=comment)
self._add_dtable_object(dtable)
return dtable
def add_tabled1(self, tid: int,
x: np.ndarray, y: np.ndarray,
xaxis: str='LINEAR', yaxis: str='LINEAR', extrap: int=0,
comment: str='') -> TABLED1:
"""
Creates a TABLED1, which is a dynamic load card that is applied
by the DAREA card
Parameters
----------
tid : int
table id
x : List[float]
nvalues
y : List[float]
nvalues
xaxis : str
LINEAR, LOG
yaxis : str
LINEAR, LOG
extrap : int; default=0
Extrapolation method:
0 : linear
1 : constant
.. note:: this is NX specific
comment : str; default=''
a comment for the card
"""
table = TABLED1(tid, x, y, xaxis=xaxis, yaxis=yaxis,
extrap=extrap, comment=comment)
self._add_tabled_object(table)
return table
def add_tabled2(self, tid: int, x1: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str='') -> TABLED2:
"""Creates a TABLED2 card"""
table = TABLED2(tid, x1, x, y, extrap=extrap, comment=comment)
self._add_tabled_object(table)
return table
def add_tabled3(self, tid: int, x1: float, x2: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str='') -> TABLED3:
"""Creates a TABLED3 card"""
table = TABLED3(tid, x1, x2, x, y, extrap=extrap, comment=comment)
self._add_tabled_object(table)
return table
def add_tabled4(self, tid: int,
x1: float, x2: float, x3: float, x4: float,
a: List[float], comment: str='') -> TABLED4:
"""Creates a TABLED4 card"""
table = TABLED4(tid, x1, x2, x3, x4, a, comment=comment)
self._add_tabled_object(table)
return table
def add_tablem1(self, tid: int, x: np.ndarray, y: np.ndarray,
xaxis: str='LINEAR', yaxis: str='LINEAR',
extrap: int=0, comment: str='') -> TABLEM1:
"""Creates a TABLEM1 card"""
table = TABLEM1(tid, x, y, xaxis=xaxis, yaxis=yaxis, comment=comment)
self._add_tablem_object(table)
return table
def add_tablem2(self, tid: int, x1: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str='') -> TABLEM2:
"""Creates a TABLEM2 card"""
table = TABLEM2(tid, x1, x, y, extrap=extrap, comment=comment)
self._add_tablem_object(table)
return table
def add_tablem3(self, tid: int, x1: float, x2: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str='') -> TABLEM3:
"""Creates a TABLEM3 card"""
table = TABLEM3(tid, x1, x2, x, y, extrap=extrap, comment=comment)
self._add_tablem_object(table)
return table
def add_tablem4(self, tid: int,
x1: float, x2: float, x3: float, x4: float,
a: List[float], comment: str='') -> TABLEM4:
"""Creates a TABLEM4 card"""
table = TABLEM4(tid, x1, x2, x3, x4, a, comment=comment)
self._add_tablem_object(table)
return table
def add_tables1(self, tid: int, x: np.ndarray, y: np.ndarray,
Type: int=1, comment: str='') -> TABLES1:
"""
Adds a TABLES1 card, which defines a stress dependent material
Parameters
----------
tid : int
Table ID
Type : int; default=1
Type of | |
]
:param ssh_auth_map: SSH authentication information mapped to host names. Useful for complex SSH Proxy cases.
:type ssh_auth_map: typing.Optional[typing.Union[typing.Dict[str, ssh_auth.SSHAuth], ssh_auth.SSHAuthMapping]]
:param keepalive: keepalive period
:type keepalive: typing.Union[int, bool]
:return: new ssh client instance using current as a proxy
:rtype: SSHClientBase
.. note:: auth has priority over username/password
.. versionadded:: 6.0.0
"""
if isinstance(ssh_config, _ssh_helpers.HostsSSHConfigs):
parsed_ssh_config: _ssh_helpers.HostsSSHConfigs = ssh_config
else:
parsed_ssh_config = _ssh_helpers.parse_ssh_config(ssh_config, host)
hostname = parsed_ssh_config[host].hostname
sock: paramiko.Channel = self._get_proxy_channel(port=port, ssh_config=parsed_ssh_config[hostname])
cls: type[SSHClientBase] = self.__class__
return cls(
host=host,
port=port,
username=username,
password=password,
auth=auth,
verbose=verbose,
ssh_config=ssh_config,
sock=sock,
ssh_auth_map=ssh_auth_map if ssh_auth_map is not None else self.__auth_mapping,
keepalive=int(keepalive),
)
def execute_through_host(
self,
hostname: str,
command: CommandT,
*,
auth: ssh_auth.SSHAuth | None = None,
port: int | None = None,
verbose: bool = False,
timeout: OptionalTimeoutT = constants.DEFAULT_TIMEOUT,
stdin: OptionalStdinT = None,
open_stdout: bool = True,
log_stdout: bool = True,
open_stderr: bool = True,
log_stderr: bool = True,
log_mask_re: LogMaskReT = None,
get_pty: bool = False,
width: int = 80,
height: int = 24,
) -> exec_result.ExecResult:
"""Execute command on remote host through currently connected host.
:param hostname: target hostname
:type hostname: str
:param command: Command for execution
:type command: typing.Union[str, typing.Iterable[str]]
:param auth: credentials for target machine
:type auth: typing.Optional[ssh_auth.SSHAuth]
:param port: target port
:type port: typing.Optional[int]
:param verbose: Produce log.info records for command call and output
:type verbose: bool
:param timeout: Timeout for command execution.
:type timeout: typing.Union[int, float, None]
:param stdin: pass STDIN text to the process
:type stdin: typing.Union[bytes, str, bytearray, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param log_stdout: log STDOUT during read
:type log_stdout: bool
:param open_stderr: open STDERR stream for read
:type open_stderr: bool
:param log_stderr: log STDERR during read
:type log_stderr: bool
:param log_mask_re: regex lookup rule to mask command for logger.
all MATCHED groups will be replaced by '<*masked*>'
:type log_mask_re: typing.Optional[str]
:param get_pty: open PTY on target machine
:type get_pty: bool
:param width: PTY width
:type width: int
:param height: PTY height
:type height: int
:return: Execution result
:rtype: ExecResult
:raises ExecHelperTimeoutError: Timeout exceeded
.. versionchanged:: 1.2.0 default timeout 1 hour
.. versionchanged:: 1.2.0 log_mask_re regex rule for masking cmd
.. versionchanged:: 3.2.0 Expose pty options as optional keyword-only arguments
.. versionchanged:: 4.0.0 Expose stdin and log_mask_re as optional keyword-only arguments
.. versionchanged:: 6.0.0 Move channel open to separate method and make proper ssh-proxy usage
.. versionchanged:: 6.0.0 only hostname and command are positional argument, target_port changed to port.
.. versionchanged:: 7.0.0 target_port argument removed
"""
conn: SSHClientBase
if auth is None:
auth = self.auth
with self.proxy_to(
host=hostname,
port=port,
auth=auth,
verbose=verbose,
ssh_config=self.ssh_config,
keepalive=False,
) as conn:
return conn(
command,
timeout=timeout,
stdin=stdin,
open_stdout=open_stdout,
log_stdout=log_stdout,
open_stderr=open_stderr,
log_stderr=log_stderr,
log_mask_re=log_mask_re,
get_pty=get_pty,
width=width,
height=height,
)
@classmethod
def execute_together(
cls,
remotes: typing.Iterable[SSHClientBase],
command: CommandT,
timeout: OptionalTimeoutT = constants.DEFAULT_TIMEOUT,
expected: ExpectedExitCodesT = (proc_enums.EXPECTED,),
raise_on_err: bool = True,
*,
stdin: OptionalStdinT = None,
open_stdout: bool = True,
open_stderr: bool = True,
chroot_path: str | None = None,
verbose: bool = False,
log_mask_re: LogMaskReT = None,
exception_class: type[exceptions.ParallelCallProcessError] = exceptions.ParallelCallProcessError,
**kwargs: typing.Any,
) -> dict[tuple[str, int], exec_result.ExecResult]:
"""Execute command on multiple remotes in async mode.
:param remotes: Connections to execute on
:type remotes: typing.Iterable[SSHClientBase]
:param command: Command for execution
:type command: typing.Union[str, typing.Iterable[str]]
:param timeout: Timeout for command execution.
:type timeout: typing.Union[int, float, None]
:param expected: expected return codes (0 by default)
:type expected: typing.Iterable[typing.Union[int, proc_enums.ExitCodes]]
:param raise_on_err: Raise exception on unexpected return code
:type raise_on_err: bool
:param stdin: pass STDIN text to the process
:type stdin: typing.Union[bytes, str, bytearray, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
:type open_stderr: bool
:param chroot_path: chroot path override
:type chroot_path: typing.Optional[str]
:param verbose: produce verbose log record on command call
:type verbose: bool
:param log_mask_re: regex lookup rule to mask command for logger.
all MATCHED groups will be replaced by '<*masked*>'
:type log_mask_re: typing.Optional[str]
:param exception_class: Exception to raise on error. Mandatory subclass of exceptions.ParallelCallProcessError
:type exception_class: typing.Type[exceptions.ParallelCallProcessError]
:param kwargs: additional parameters for execute_async call.
:type kwargs: typing.Any
:return: dictionary {(hostname, port): result}
:rtype: typing.Dict[typing.Tuple[str, int], exec_result.ExecResult]
:raises ParallelCallProcessError: Unexpected any code at lest on one target
:raises ParallelCallExceptionsError: At lest one exception raised during execution (including timeout)
.. versionchanged:: 1.2.0 default timeout 1 hour
.. versionchanged:: 1.2.0 log_mask_re regex rule for masking cmd
.. versionchanged:: 3.2.0 Exception class can be substituted
.. versionchanged:: 3.4.0 Expected is not optional, defaults os dependent
.. versionchanged:: 4.0.0 Expose stdin and log_mask_re as optional keyword-only arguments
"""
def get_result(remote: SSHClientBase) -> exec_result.ExecResult:
"""Get result from remote call.
:param remote: SSH connection instance
:return: execution result
:raises ExecHelperTimeoutError: Timeout exceeded
"""
# pylint: disable=protected-access
cmd_for_log: str = remote._mask_command(cmd=cmd, log_mask_re=log_mask_re)
remote._log_command_execute(
command=cmd,
log_mask_re=log_mask_re,
log_level=log_level,
chroot_path=chroot_path,
**kwargs,
)
# pylint: enable=protected-access
with remote.open_execute_context(
cmd,
stdin=stdin,
open_stdout=open_stdout,
open_stderr=open_stderr,
chroot_path=chroot_path,
timeout=timeout,
**kwargs,
) as async_result:
done = async_result.interface.status_event.wait(timeout)
res = exec_result.ExecResult(cmd=cmd_for_log, stdin=stdin, started=async_result.started)
res.read_stdout(src=async_result.stdout)
res.read_stderr(src=async_result.stderr)
if done:
res.exit_code = async_result.interface.recv_exit_status()
return res
result.set_timestamp()
wait_err_msg: str = _log_templates.CMD_WAIT_ERROR.format(result=res, timeout=timeout)
remote.logger.debug(wait_err_msg)
raise exceptions.ExecHelperTimeoutError(result=res, timeout=timeout) # type: ignore[arg-type]
prep_expected: typing.Sequence[ExitCodeT] = proc_enums.exit_codes_to_enums(expected)
log_level: int = logging.INFO if verbose else logging.DEBUG
cmd = _helpers.cmd_to_string(command)
results: dict[tuple[str, int], exec_result.ExecResult] = {}
errors: dict[tuple[str, int], exec_result.ExecResult] = {}
raised_exceptions: dict[tuple[str, int], Exception] = {}
not_done: set[concurrent.futures.Future[exec_result.ExecResult]]
with concurrent.futures.ThreadPoolExecutor(thread_name_prefix="exec-helpers_ssh_multiple_poll_") as executor:
futures: dict[SSHClientBase, concurrent.futures.Future[exec_result.ExecResult]] = {
remote: executor.submit(get_result, remote) for remote in set(remotes)
} # Use distinct remotes
_done, not_done = concurrent.futures.wait(futures.values(), timeout=timeout)
for fut in not_done: # pragma: no cover
fut.cancel()
for remote, future in futures.items():
try:
result = future.result(timeout=0.1)
results[(remote.hostname, remote.port)] = result
if result.exit_code not in prep_expected:
errors[(remote.hostname, remote.port)] = result
except Exception as e:
raised_exceptions[(remote.hostname, remote.port)] = e
if raised_exceptions: # always raise
raise exceptions.ParallelCallExceptionsError(
command=cmd,
exceptions=raised_exceptions,
errors=errors,
results=results,
expected=prep_expected,
)
if errors and raise_on_err:
raise exception_class(cmd, errors, results, expected=prep_expected)
return results
def open(self, path: SupportPathT, mode: str = "r") -> paramiko.SFTPFile:
"""Open file on remote using SFTP session.
:param path: filesystem object path
:type path: typing.Union[str, pathlib.PurePath]
:param mode: open file mode ('t' is not supported)
:type mode: str
:return: file.open() stream
:rtype: paramiko.SFTPFile
"""
return self._sftp.open(pathlib.PurePath(path).as_posix(), mode) # pragma: no cover
def exists(self, path: SupportPathT) -> bool:
"""Check for file existence using SFTP session.
:param path: filesystem object path
:type path: typing.Union[str, pathlib.PurePath]
:return: path is valid (object exists)
:rtype: bool
"""
try:
self._sftp.lstat(pathlib.PurePath(path).as_posix())
return True
except OSError:
return False
def stat(self, path: SupportPathT) -> paramiko.sftp_attr.SFTPAttributes:
"""Get stat info for path with following symlinks.
:param path: filesystem object path
:type path: typing.Union[str, pathlib.PurePath]
:return: stat like information for remote path
:rtype: paramiko.sftp_attr.SFTPAttributes
"""
return self._sftp.stat(pathlib.PurePath(path).as_posix()) # pragma: no cover
def utime(self, path: SupportPathT, times: tuple[int, int] | None = None) -> None:
"""Set atime, mtime.
:param path: filesystem object path
:type path: typing.Union[str, pathlib.PurePath]
:param times: (atime, mtime)
:type times: typing.Optional[typing.Tuple[int, int]]
.. versionadded:: 1.0.0
"""
self._sftp.utime(pathlib.PurePath(path).as_posix(), times) # pragma: no cover
def isfile(self, path: SupportPathT) -> bool:
"""Check, that path is file using SFTP session.
:param path: remote path to validate
:type path: typing.Union[str, pathlib.PurePath]
:return: path is file
:rtype: bool
"""
try:
attrs: paramiko.sftp_attr.SFTPAttributes = self._sftp.lstat(pathlib.PurePath(path).as_posix())
if attrs.st_mode is None:
return False
return stat.S_ISREG(attrs.st_mode)
except (TypeError, OSError):
return False
def isdir(self, path: SupportPathT) -> bool:
"""Check, that path is directory using SFTP session.
:param path: remote path to validate
:type path: typing.Union[str, pathlib.PurePath]
:return: path is directory
:rtype: bool
"""
try:
attrs: paramiko.sftp_attr.SFTPAttributes = self._sftp.lstat(pathlib.PurePath(path).as_posix())
if attrs.st_mode is None:
return False
return stat.S_ISDIR(attrs.st_mode)
except (TypeError, OSError):
return False
def islink(self, path: SupportPathT) -> bool:
"""Check, that path is symlink using SFTP session.
:param path: remote path to validate
:type path: typing.Union[str, pathlib.PurePath]
:return: path is symlink
:rtype: bool
"""
try:
attrs: paramiko.sftp_attr.SFTPAttributes = self._sftp.lstat(pathlib.PurePath(path).as_posix())
if attrs.st_mode is None:
return False
return stat.S_ISLNK(attrs.st_mode)
except (TypeError, OSError):
return False
def symlink(self, source: SupportPathT, dest: SupportPathT) -> None:
"""Produce symbolic link like `os.symlink`.
:param source: source path
:type source: typing.Union[str, pathlib.PurePath]
| |
lane.
return None
if len(lanes) > 1:
# Multiple root lanes for a patron indicates a
# configuration problem, but we shouldn't make the patron
# pay the price -- just pick the first one.
logging.error(
"Multiple root lanes found for patron type %s.",
self.external_type
)
return lanes[0]
def work_is_age_appropriate(self, work_audience, work_target_age):
"""Is the given audience and target age an age-appropriate match for this Patron?
NOTE: What "age-appropriate" means depends on some policy questions
that have not been answered and may be library-specific. For
now, it is determined by comparing audience and target age to that of the
Patron's root lane.
This is designed for use when reasoning about works in
general. If you have a specific Work in mind, use
`Work.age_appropriate_for_patron`.
:param work_audience: One of the audience constants from
Classifier, representing the general reading audience to
which a putative work belongs.
:param work_target_age: A number or 2-tuple representing the target age
or age range of a putative work.
:return: A boolean
"""
root = self.root_lane
if not root:
# The patron has no root lane. They can interact with any
# title.
return True
# The patron can interact with a title if any of the audiences
# in their root lane (in conjunction with the root lane's target_age)
# are a match for the title's audience and target age.
return any(
self.age_appropriate_match(
work_audience, work_target_age,
audience, root.target_age
)
for audience in root.audiences
)
@classmethod
def age_appropriate_match(
cls, work_audience, work_target_age,
reader_audience, reader_age
):
"""Match the audience and target age of a work with that of a reader,
and see whether they are an age-appropriate match.
NOTE: What "age-appropriate" means depends on some policy
questions that have not been answered and may be
library-specific. For now, non-children's books are
age-inappropriate for young children, and children's books are
age-inappropriate for children too young to be in the book's
target age range.
:param reader_audience: One of the audience constants from
Classifier, representing the general reading audience to
which the reader belongs.
:param reader_age: A number or 2-tuple representing the age or
age range of the reader.
"""
if reader_audience is None:
# A patron with no particular audience restrictions
# can see everything.
#
# This is by far the most common case, so we don't set up
# logging until after running it.
return True
log = logging.getLogger("Age-appropriate match calculator")
log.debug(
"Matching work %s/%s to reader %s/%s" % (
work_audience, work_target_age,
reader_audience, reader_age
)
)
if reader_audience not in Classifier.AUDIENCES_JUVENILE:
log.debug("A non-juvenile patron can see everything.")
return True
if work_audience == Classifier.AUDIENCE_ALL_AGES:
log.debug("An all-ages book is always age appropriate.")
return True
# At this point we know that the patron is a juvenile.
def ensure_tuple(x):
# Convert a potential NumericRange into a tuple.
if isinstance(x, NumericRange):
x = numericrange_to_tuple(x)
return x
reader_age = ensure_tuple(reader_age)
if isinstance(reader_age, tuple):
# A range was passed in rather than a specific age. Assume
# the reader is at the top edge of the range.
ignore, reader_age = reader_age
work_target_age = ensure_tuple(work_target_age)
if isinstance(work_target_age, tuple):
# Pick the _bottom_ edge of a work's target age range --
# the work is appropriate for anyone _at least_ that old.
work_target_age, ignore = work_target_age
# A YA reader is treated as an adult (with no reading
# restrictions) if they have no associated age range, or their
# age range includes ADULT_AGE_CUTOFF.
if (reader_audience == Classifier.AUDIENCE_YOUNG_ADULT
and (reader_age is None
or reader_age >= Classifier.ADULT_AGE_CUTOFF)):
log.debug("YA reader to be treated as an adult.")
return True
# There are no other situations where a juvenile reader can access
# non-juvenile titles.
if work_audience not in Classifier.AUDIENCES_JUVENILE:
log.debug("Juvenile reader cannot access non-juvenile title.")
return False
# At this point we know we have a juvenile reader and a
# juvenile book.
if (reader_audience == Classifier.AUDIENCE_YOUNG_ADULT
and work_audience in (Classifier.AUDIENCES_YOUNG_CHILDREN)):
log.debug("YA reader can access any children's title.")
return True
if (reader_audience in (Classifier.AUDIENCES_YOUNG_CHILDREN)
and work_audience == Classifier.AUDIENCE_YOUNG_ADULT):
log.debug("Child reader cannot access any YA title.")
return False
# At this point we either have a YA patron with a YA book, or
# a child patron with a children's book. It comes down to a
# question of the reader's age vs. the work's target age.
if work_target_age is None:
# This is a generic children's or YA book with no
# particular target age. Assume it's age appropriate.
log.debug(
"Juvenile book with no target age is presumed age-appropriate."
)
return True
if reader_age is None:
# We have no idea how old the patron is, so any work with
# the appropriate audience is considered age-appropriate.
log.debug(
"Audience matches, and no specific patron age information available: presuming age-appropriate."
)
return True
if reader_age < work_target_age:
# The audience for this book matches the patron's
# audience, but the book has a target age that is too high
# for the reader.
log.debug(
"Audience matches, but work's target age is too high for reader."
)
return False
log.debug("Both audience and target age match; it's age-appropriate.")
return True
Index("ix_patron_library_id_external_identifier", Patron.library_id, Patron.external_identifier)
Index("ix_patron_library_id_authorization_identifier", Patron.library_id, Patron.authorization_identifier)
Index("ix_patron_library_id_username", Patron.library_id, Patron.username)
class Loan(Base, LoanAndHoldMixin):
__tablename__ = 'loans'
id = Column(Integer, primary_key=True)
patron_id = Column(Integer, ForeignKey('patrons.id'), index=True)
integration_client_id = Column(Integer, ForeignKey('integrationclients.id'), index=True)
# A Loan is always associated with a LicensePool.
license_pool_id = Column(Integer, ForeignKey('licensepools.id'), index=True)
# It may also be associated with an individual License if the source
# provides information about individual licenses.
license_id = Column(Integer, ForeignKey('licenses.id'), index=True, nullable=True)
fulfillment_id = Column(Integer, ForeignKey('licensepooldeliveries.id'))
start = Column(DateTime, index=True)
end = Column(DateTime, index=True)
# Some distributors (e.g. Feedbooks) may have an identifier that can
# be used to check the status of a specific Loan.
external_identifier = Column(Unicode, unique=True, nullable=True)
__table_args__ = (
UniqueConstraint('patron_id', 'license_pool_id'),
)
def until(self, default_loan_period):
"""Give or estimate the time at which the loan will end."""
if self.end:
return self.end
if default_loan_period is None:
# This loan will last forever.
return None
start = self.start or datetime.datetime.utcnow()
return start + default_loan_period
class Hold(Base, LoanAndHoldMixin):
"""A patron is in line to check out a book.
"""
__tablename__ = 'holds'
id = Column(Integer, primary_key=True)
patron_id = Column(Integer, ForeignKey('patrons.id'), index=True)
integration_client_id = Column(Integer, ForeignKey('integrationclients.id'), index=True)
license_pool_id = Column(Integer, ForeignKey('licensepools.id'), index=True)
start = Column(DateTime, index=True)
end = Column(DateTime, index=True)
position = Column(Integer, index=True)
external_identifier = Column(Unicode, unique=True, nullable=True)
@classmethod
def _calculate_until(
self, start, queue_position, total_licenses, default_loan_period,
default_reservation_period):
"""Helper method for `Hold.until` that can be tested independently.
We have to wait for the available licenses to cycle a
certain number of times before we get a turn.
Example: 4 licenses, queue position 21
After 1 cycle: queue position 17
2 : queue position 13
3 : queue position 9
4 : queue position 5
5 : queue position 1
6 : available
The worst-case cycle time is the loan period plus the reservation
period.
"""
if queue_position == 0:
# The book is currently reserved to this patron--they need
# to hurry up and check it out.
return start + default_reservation_period
if total_licenses == 0:
# The book will never be available
return None
# If you are at the very front of the queue, the worst case
# time to get the book is is the time it takes for the person
# in front of you to get a reservation notification, borrow
# the book at the last minute, and keep the book for the
# maximum allowable time.
cycle_period = (default_reservation_period + default_loan_period)
# This will happen at least once.
cycles = 1
if queue_position <= total_licenses:
# But then the book will be available to you.
pass
else:
# This will happen more than once. After the first cycle,
# other people will be notified that it's their turn,
# they'll wait a while, get a reservation, and then keep
# the book for a while, and so on.
cycles += queue_position | |
# uncompyle6 version 3.7.4
# Python bytecode 3.5 (3350)
# Decompiled from: Python 3.8.5 (default, Jan 27 2021, 15:41:15)
# [GCC 9.3.0]
# Embedded file name: /home/docker/CSN/bin/gpu_ccsn.py
# Compiled at: 2021-03-31 16:15:26
# Size of source mod 2**32: 19699 bytes
"""
python version of csn algorithm
https://github.com/wys8c764/CSN
"""
import os, argparse, logging, pandas as pd, numpy as np
from scipy import sparse
from scipy import stats
import sys
sys.path.append('.')
import useful_functions as uf
def condition_g(adjmc, kk=50, dlimit=5):
"""return the degree >5 and top kk ρ统计量的gene index, 可优化参数 > degree limit"""
a = np.sum(adjmc, axis=1)
id1 = np.argwhere(a >= dlimit)
INDEX = np.argsort(a[id1.flatten()])[::-1]
id2 = INDEX[0:kk]
return id2.tolist()
def get_data(csv):
if str(csv).endswith('csv'):
df = pd.read_csv(csv, index_col=0, header=0)
else:
df = pd.read_csv(csv, index_col=0, header=0, sep='\t')
return df
class SSN:
"""Construction of cell-specific networks
模型构建过程用所有的样品数据,后续预测用整合有的大表做dm转化但仅输出少量样品(cells.list)的network和degree matrix
在dblur features水平做矩阵融合
The function performs the transformation from gene expression matrix to cell-specific network (csn).
This is a groups style docs.
Parameters:
`data` Gene expression matrix, rows = genes, columns = cells
Returns: None
Raises: KeyError - raises an exception
"""
def __init__(self, data, outdir='./', log=None):
"""
default values when initialize. set log file
"""
self.outdir = outdir
self.tablename = data
uf.create_dir(self.outdir)
self.log = os.path.join(self.outdir, log) if log else os.path.join(self.outdir, '{}_{}.log'.format(os.path.basename(data), uf.now()))
self.logger = uf.create_logger(self.log)
self.logger.info('start reading data from {}, log file is {}'.format(data, self.log))
df = get_data(data)
self.data = df.loc[(df.sum(axis=1) != 0, df.sum(axis=0) != 0)]
self.csn = None
self.logger.info('finish reading data from {}'.format(data))
@uf.robust
def get_cells(self, cells=None):
"""
Get cells in list format
Parameters:
file cells.list
Returns:
cells in list format
Raises:
KeyError - raises an exception
"""
if not cells:
cells = list(self.data.columns)
else:
if isinstance(cells, list):
cells = cells
else:
if os.access(cells, os.R_OK):
cells = [cell.strip() for cell in open(cells).readlines()]
else:
print('cells must be list or file with one column')
return cells
@uf.robust
def csnet(self, cells=None, alpha=0.01, boxsize=0.1, edgeW=0, kk=0, dlimit=5, to_csv=0, average=1, *args, **kwargs):
"""
fcndm = cndm(data, 0.1, 0.1, 1) for test
Construct the CSN for sepecified cells
Parameters:
`cells` Construct the CSNs for all cells, set cells = None (Default) otherwise input cells.list
`alpha` Significant level (eg. 0.001, 0.01, 0.05 ...)
larger alpha leads to more edges, Default = 0.01
`boxsize` Size of neighborhood, the value between 1 to 2 is recommended, Default = 0.1,
`edgeW` 1 edge is weighted (statistic pxy(x))
0 edge is not weighted (Default)
`nodeW` 1 node is weighted (gene or otu abundance)
0 node is not wieghted (Default)
`csn` Cell-specific network, the kth CSN is in csn{k}
rows = genes, columns = genes
`kk` the number of conditional gene. when kk=0, the method is CSN
`dlimit` the min degree limitation of conditional genes.
`average` whether use the average(adjmc + adjmc1) network or intersection(adjmc.*adjmc1) network.
Returns:
csnet dict
Raises:
KeyError - raises an exception
Notes:
Too many cells or genes may lead to out of memory.
学习 dataframe 和array python的矩阵运算。
np index start from 0
每个new cell都要和原来所有的细胞一起计算lower upper边界矩阵,都要排序每个基因来计算。
如果数据库足够大,可以就用原来的边界矩阵,重新换算出upper和lower矩阵。带入new cell的基因表达数据就可以。
"""
self.logger.info('start construction cell-specific network ')
nr, nc = self.data.shape
data = self.data
upper = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
lower = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
for i in range(nr):
sort_gi = data.iloc[i, :].sort_values(axis=0, ascending=True)
s1 = sort_gi.values
s2 = sort_gi.index
n1 = sum(np.sign(s1))
n0 = nc - n1
h = round(boxsize * np.sqrt(n1))
k = 0
while k < nc:
s = 0
while k + s + 1 < nc and s1[(k + s + 1)] == s1[k]:
s = s + 1
if s >= h:
upper.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[k])]
lower.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[k])]
else:
upper.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[int(min(nc - 1, k + s + h))])]
lower.loc[(data.index[i], s2[range(k, k + s + 1)])] = data.loc[(data.index[i], s2[int(max(n0 * (n0 > h), k - h))])]
k = k + s + 1
# %If gene expression matrix is sparse, use the sparse matrix will accelerate
# %the calculation and reduce memory footprint
# %data = sparse(data); upper = sparse(upper); lower = sparse(lower);
self.logger.info('finish caculate the neighborhood of each gene for each cell')
cells = self.get_cells(cells=cells)
csn = dict()
B = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
p = -stats.norm.ppf(q=alpha, loc=0, scale=1)
for k in cells:
for j in B.columns:
if average:
B.loc[:, j] = (data.loc[:, j] <= upper.loc[:, k]) & (data.loc[:, j] >= lower.loc[:, k]) & (data.loc[:, k] > 0)
else:
B.loc[:, j] = (data.loc[:, j] <= upper.loc[:, k]) & (data.loc[:, j] >= lower.loc[:, k])
B = B * 1
a = np.matrix(B.sum(axis=1))
csnk = (B.dot(B.T) * nc - a.T * a) / np.sqrt(np.multiply(a.T * a, (nc - a).T * (nc - a)) / (nc - 1) + np.spacing(1))
csnlink = (csnk > p) * 1
if csnlink.sum().sum() == 0:
self.logger.info('no genes in Cell {} has a link'.format(k))
continue
if kk != 0:
id = condition_g(csnlink, kk=kk, dlimit=dlimit)
csnlink = pd.DataFrame(np.zeros([nr, nr])) if average else pd.DataFrame(np.ones([nr, nr]))
for m in range(kk):
B_z = B.iloc[id[m], :] * B
idc = np.argwhere(B.iloc[id[m], :] != 0).flatten()
B_z = B_z.iloc[:, idc]
r = B_z.shape[1]
a_z = np.mat(B_z.sum(axis=1))
c_z = B_z @ B_z.T
csnk1 = (c_z * r - a_z.T * a_z) / np.sqrt(np.multiply(a_z.T * a_z, (r - a_z).T * (r - a_z)) / (r - 1) + np.spacing(1))
csnlink1 = (csnk1 > p) * 1
csnlink = csnlink + csnlink1 if average else csnlink * csnlink1
else:
kk = 1
csnlink = csnlink / kk if average else csnlink
csn[k] = csnlink
if to_csv:
filename = os.path.join(self.outdir, 'cellnws', '{}.nw.csv'.format(k))
uf.create_dir(self.outdir + '/cellnws')
csn[k].to_csv(path_or_buf=filename)
self.logger.info('Cell {} specific network is completed'.format(k))
self.logger.info('Finished constructing all {} cell specific networks'.format(len(cells)))
self.upper = upper
self.lower = lower
self.csn = csn
@uf.robust
def csndm(self, cells=None, normalize=1, to_csv=1, nodeW=0, *args, **kwargs):
"""Construction of network degree matrix
The function performs the transformation from gene expression matrix to network degree matrix (ndm).
Parameters:
`data` Gene expression matrix (TPM/RPKM/FPKM/count), rows = genes, columns = cells. otu_even.table
`alpha` Significant level (eg. 0.001, 0.01, 0.05 ...), Default = 0.01
`boxsize` Size of neighborhood, Default = 0.1 (nx(k) = ny(k) = 0.1*n)
`normalize`1 result is normalized (Default);
0 result is not normalized
Note:
If gene expression matrix is sparse, use the sparse matrix will accelerate the calculation and reduce memory footprint
data = sparse(data); upper = sparse(upper); lower = sparse(lower);
可用于机器学习,样品分类预测等
只输出指定 cells 的degree matrix ,不指定就输出所有cell的全部gene's dm
"""
data = self.data
self.logger.info('Constructing network degree matrix ...')
cells = self.get_cells(cells=cells)
nr, nc = self.data.shape
ndm = pd.DataFrame(np.zeros((nr, nc)), columns=data.columns, index=data.index)
csn = self.csn
celln = 0
for k in cells:
if k not in csn:
self.logger.info('Cell {} has no network'.format(k))
continue
if nodeW:
ndm.loc[:, k] = csn[k].sum(axis=1) * data.loc[:, k]
else:
ndm.loc[:, k] = csn[k].sum(axis=1)
celln += 1
self.logger.info('Network degree vector of cell {} is complete'.format(k))
if normalize:
self.logger.info('Normalizing network degree matrix ...')
a = ndm.mean(axis=0)
ndm = ndm.div(a + np.spacing(1), axis=1)
ndm = np.log(1 + ndm)
self.ndm = ndm
if to_csv:
filename = os.path.join(self.outdir, '{}.{}cells.nwdm.csv'.format(os.path.basename(self.tablename), celln))
ndm.to_csv(path_or_buf=filename)
self.logger.info('Finished network degree matrix, file: {}'.format(filename))
@uf.robust
def nfe(self, cells=None, to_csv=1, *args, **kwargs):
data = self.data
csn = self.csn
self.logger.info('caculate network_flow_entropy ...')
cells = self.get_cells(cells=cells)
nr, nc = data.shape
NFE = pd.DataFrame(np.zeros((nc, 1)), columns=['network_flow_entropy'], index=data.columns)
celln = 0
for k in cells:
if k not in csn:
self.logger.info('Cell {} has no network'.format(k))
NFE.loc[k] = None
continue
datak = np.mat(data.loc[:, k])
P = np.multiply(datak.T * datak, np.mat(csn[k]))
cc = P.sum(axis=1) != 0
idc = np.array(cc)[:, 0]
id = data.index[idc]
x = data.loc[(id, k)]
x_n = x / x.sum()
P1 = P[[id]][:, id]
P_n = P1 / P1.sum(axis=1)
x_p = pd.DataFrame(P_n) * np.array(x_n).reshape(-1, 1)
x_p[x_p == 0] = 1
NFE.loc[k] = -np.sum(np.sum(x_p * np.log(x_p)))
NFE.loc[k]
celln += 1
self.logger.info('network_flow_entropy of cell {} is {}'.format(k, NFE.loc[k][0]))
self.NFE = NFE
if to_csv:
filename = os.path.join(self.outdir, '{}.{}cells.NFE.csv'.format(os.path.basename(self.tablename), celln))
NFE.to_csv(path_or_buf=filename)
self.logger.info('Finished network_flow_entropy, output file: {}'.format(filename))
if __name__ == | |
<gh_stars>0
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains a scaffold of a handler."""
import pprint
import time
from typing import Optional, Tuple, cast
from aea.configurations.base import ProtocolId
from aea.helpers.dialogue.base import DialogueLabel
from aea.helpers.search.models import Query
from aea.protocols.base import Message
from aea.protocols.default.message import DefaultMessage
from aea.protocols.signing.message import SigningMessage
from aea.skills.base import Handler
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.tac_negotiation.dialogues import (
DefaultDialogues,
FipaDialogue,
FipaDialogues,
OefSearchDialogue,
OefSearchDialogues,
SigningDialogue,
SigningDialogues,
)
from packages.fetchai.skills.tac_negotiation.strategy import Strategy
from packages.fetchai.skills.tac_negotiation.transactions import Transactions
class FipaNegotiationHandler(Handler):
"""This class implements the fipa negotiation handler."""
SUPPORTED_PROTOCOL = FipaMessage.protocol_id # type: Optional[ProtocolId]
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
pass
def handle(self, message: Message) -> None:
"""
Dispatch message to relevant handler and respond.
:param message: the message
:return: None
"""
fipa_msg = cast(FipaMessage, message)
# recover dialogue
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
fipa_dialogue = cast(FipaDialogue, fipa_dialogues.update(fipa_msg))
if fipa_dialogue is None:
self._handle_unidentified_dialogue(fipa_msg)
return
self.context.logger.debug(
"handling FipaMessage of performative={}".format(fipa_msg.performative)
)
if fipa_msg.performative == FipaMessage.Performative.CFP:
self._on_cfp(fipa_msg, fipa_dialogue)
elif fipa_msg.performative == FipaMessage.Performative.PROPOSE:
self._on_propose(fipa_msg, fipa_dialogue)
elif fipa_msg.performative == FipaMessage.Performative.DECLINE:
self._on_decline(fipa_msg, fipa_dialogue)
elif fipa_msg.performative == FipaMessage.Performative.ACCEPT:
self._on_accept(fipa_msg, fipa_dialogue)
elif fipa_msg.performative == FipaMessage.Performative.MATCH_ACCEPT_W_INFORM:
self._on_match_accept(fipa_msg, fipa_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
pass
def _handle_unidentified_dialogue(self, fipa_msg: FipaMessage) -> None:
"""
Handle an unidentified dialogue.
Respond to the sender with a default message containing the appropriate error information.
:param msg: the message
:return: None
"""
self.context.logger.info(
"received invalid fipa message={}, unidentified dialogue.".format(fipa_msg)
)
default_dialogues = cast(DefaultDialogues, self.context.default_dialogues)
default_msg = DefaultMessage(
performative=DefaultMessage.Performative.ERROR,
dialogue_reference=default_dialogues.new_self_initiated_dialogue_reference(),
error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE,
error_msg="Invalid dialogue.",
error_data={"fipa_message": fipa_msg.encode()},
)
default_msg.counterparty = fipa_msg.counterparty
assert (
default_dialogues.update(default_msg) is not None
), "DefaultDialogue not constructed."
self.context.outbox.put_message(message=default_msg)
def _on_cfp(self, cfp: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
"""
Handle a CFP.
:param cfp: the fipa message containing the CFP
:param fipa_dialogue: the fipa_dialogue
:return: None
"""
new_msg_id = cfp.message_id + 1
query = cast(Query, cfp.query)
strategy = cast(Strategy, self.context.strategy)
proposal_description = strategy.get_proposal_for_query(
query, cast(FipaDialogue.Role, fipa_dialogue.role)
)
if proposal_description is None:
self.context.logger.debug(
"sending to {} a Decline{}".format(
fipa_dialogue.dialogue_label.dialogue_opponent_addr[-5:],
pprint.pformat(
{
"msg_id": new_msg_id,
"dialogue_reference": cfp.dialogue_reference,
"origin": fipa_dialogue.dialogue_label.dialogue_opponent_addr[
-5:
],
"target": cfp.target,
}
),
)
)
fipa_msg = FipaMessage(
performative=FipaMessage.Performative.DECLINE,
message_id=new_msg_id,
dialogue_reference=fipa_dialogue.dialogue_label.dialogue_reference,
target=cfp.message_id,
)
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
fipa_dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_CFP, fipa_dialogue.is_self_initiated
)
else:
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.generate_signing_message(
SigningMessage.Performative.SIGN_MESSAGE,
proposal_description,
fipa_dialogue.dialogue_label,
cast(FipaDialogue.Role, fipa_dialogue.role),
self.context.agent_address,
)
transactions.add_pending_proposal(
fipa_dialogue.dialogue_label, new_msg_id, signing_msg
)
self.context.logger.info(
"sending to {} a Propose {}".format(
fipa_dialogue.dialogue_label.dialogue_opponent_addr[-5:],
pprint.pformat(
{
"msg_id": new_msg_id,
"dialogue_reference": cfp.dialogue_reference,
"origin": fipa_dialogue.dialogue_label.dialogue_opponent_addr[
-5:
],
"target": cfp.message_id,
"propose": proposal_description.values,
}
),
)
)
fipa_msg = FipaMessage(
performative=FipaMessage.Performative.PROPOSE,
message_id=new_msg_id,
dialogue_reference=fipa_dialogue.dialogue_label.dialogue_reference,
target=cfp.message_id,
proposal=proposal_description,
)
fipa_msg.counterparty = cfp.counterparty
fipa_dialogue.update(fipa_msg)
self.context.outbox.put_message(message=fipa_msg)
def _on_propose(self, propose: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
"""
Handle a Propose.
:param propose: the message containing the Propose
:param fipa_dialogue: the fipa_dialogue
:return: None
"""
new_msg_id = propose.message_id + 1
strategy = cast(Strategy, self.context.strategy)
proposal_description = propose.proposal
self.context.logger.debug("on Propose as {}.".format(fipa_dialogue.role))
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.generate_signing_message(
SigningMessage.Performative.SIGN_MESSAGE,
proposal_description,
fipa_dialogue.dialogue_label,
cast(FipaDialogue.Role, fipa_dialogue.role),
self.context.agent_address,
)
if strategy.is_profitable_transaction(
signing_msg, role=cast(FipaDialogue.Role, fipa_dialogue.role)
):
self.context.logger.info(
"accepting propose (as {}).".format(fipa_dialogue.role)
)
transactions.add_locked_tx(
signing_msg, role=cast(FipaDialogue.Role, fipa_dialogue.role)
)
transactions.add_pending_initial_acceptance(
fipa_dialogue.dialogue_label, new_msg_id, signing_msg
)
fipa_msg = FipaMessage(
performative=FipaMessage.Performative.ACCEPT,
message_id=new_msg_id,
dialogue_reference=fipa_dialogue.dialogue_label.dialogue_reference,
target=propose.message_id,
)
else:
self.context.logger.info(
"declining propose (as {})".format(fipa_dialogue.role)
)
fipa_msg = FipaMessage(
performative=FipaMessage.Performative.DECLINE,
message_id=new_msg_id,
dialogue_reference=fipa_dialogue.dialogue_label.dialogue_reference,
target=propose.message_id,
)
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
fipa_dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_PROPOSE, fipa_dialogue.is_self_initiated
)
fipa_msg.counterparty = propose.counterparty
fipa_dialogue.update(fipa_msg)
self.context.outbox.put_message(message=fipa_msg)
def _on_decline(self, decline: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
"""
Handle a Decline.
:param decline: the Decline message
:param fipa_dialogue: the fipa_dialogue
:return: None
"""
self.context.logger.debug(
"on_decline: msg_id={}, dialogue_reference={}, origin={}, target={}".format(
decline.message_id,
decline.dialogue_reference,
fipa_dialogue.dialogue_label.dialogue_opponent_addr,
decline.target,
)
)
target = decline.target
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
if target == 1:
fipa_dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_CFP, fipa_dialogue.is_self_initiated
)
elif target == 2:
fipa_dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_PROPOSE, fipa_dialogue.is_self_initiated
)
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.pop_pending_proposal(
fipa_dialogue.dialogue_label, target
)
elif target == 3:
fipa_dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_ACCEPT, fipa_dialogue.is_self_initiated
)
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.pop_pending_initial_acceptance(
fipa_dialogue.dialogue_label, target
)
transactions.pop_locked_tx(signing_msg)
def _on_accept(self, accept: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
"""
Handle an Accept.
:param accept: the Accept message
:param fipa_dialogue: the fipa_dialogue
:return: None
"""
self.context.logger.debug(
"on_accept: msg_id={}, dialogue_reference={}, origin={}, target={}".format(
accept.message_id,
accept.dialogue_reference,
fipa_dialogue.dialogue_label.dialogue_opponent_addr,
accept.target,
)
)
new_msg_id = accept.message_id + 1
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.pop_pending_proposal(
fipa_dialogue.dialogue_label, accept.target
)
strategy = cast(Strategy, self.context.strategy)
if strategy.is_profitable_transaction(
signing_msg, role=cast(FipaDialogue.Role, fipa_dialogue.role)
):
self.context.logger.info(
"locking the current state (as {}).".format(fipa_dialogue.role)
)
transactions.add_locked_tx(
signing_msg, role=cast(FipaDialogue.Role, fipa_dialogue.role)
)
if strategy.is_contract_tx:
pass
# contract = cast(ERC1155Contract, self.context.contracts.erc1155)
# if not contract.is_deployed:
# ledger_api = self.context.ledger_apis.get_api(strategy.ledger_id)
# contract_address = self.context.shared_state.get(
# "erc1155_contract_address", None
# )
# assert (
# contract_address is not None
# ), "ERC1155Contract address not set!"
# tx_nonce = transaction_msg.skill_callback_info.get("tx_nonce", None)
# assert tx_nonce is not None, "tx_nonce must be provided"
# transaction_msg = contract.get_hash_batch_transaction_msg(
# from_address=accept.counterparty,
# to_address=self.context.agent_address, # must match self
# token_ids=[
# int(key)
# for key in transaction_msg.terms.quantities_by_good_id.keys()
# ]
# + [
# int(key)
# for key in transaction_msg.terms.amount_by_currency_id.keys()
# ],
# from_supplies=[
# quantity if quantity > 0 else 0
# for quantity in transaction_msg.terms.quantities_by_good_id.values()
# ]
# + [
# value if value > 0 else 0
# for value in transaction_msg.terms.amount_by_currency_id.values()
# ],
# to_supplies=[
# -quantity if quantity < 0 else 0
# for quantity in transaction_msg.terms.quantities_by_good_id.values()
# ]
# + [
# -value if value < 0 else 0
# for value in transaction_msg.terms.amount_by_currency_id.values()
# ],
# value=0,
# trade_nonce=int(tx_nonce),
# ledger_api=self.context.ledger_apis.get_api(strategy.ledger_id),
# skill_callback_id=self.context.skill_id,
# skill_callback_info={
# "dialogue_label": fipa_dialogue.dialogue_label.json
# },
# )
else:
signing_dialogues = cast(
SigningDialogues, self.context.signing_dialogues
)
signing_dialogue = cast(
Optional[SigningDialogue], signing_dialogues.update(signing_msg)
)
assert (
signing_dialogue is not None
), "Could not construct sigining dialogue."
self.context.logger.info(
"sending signing_msg={} to decison maker following ACCEPT.".format(
signing_msg
)
)
self.context.decision_maker_message_queue.put(signing_msg)
else:
self.context.logger.debug(
"decline the Accept (as {}).".format(fipa_dialogue.role)
)
fipa_msg = FipaMessage(
performative=FipaMessage.Performative.DECLINE,
message_id=new_msg_id,
dialogue_reference=fipa_dialogue.dialogue_label.dialogue_reference,
target=accept.message_id,
)
fipa_msg.counterparty = accept.counterparty
fipa_dialogue.update(fipa_msg)
dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
dialogues.dialogue_stats.add_dialogue_endstate(
FipaDialogue.EndState.DECLINED_ACCEPT, fipa_dialogue.is_self_initiated
)
self.context.outbox.put_message(message=fipa_msg)
def _on_match_accept(
self, match_accept: FipaMessage, fipa_dialogue: FipaDialogue
) -> None:
"""
Handle a matching Accept.
:param match_accept: the MatchAccept message
:param fipa_dialogue: the fipa_dialogue
:return: None
"""
self.context.logger.debug(
"on_match_accept: msg_id={}, dialogue_reference={}, origin={}, target={}".format(
match_accept.message_id,
match_accept.dialogue_reference,
fipa_dialogue.dialogue_label.dialogue_opponent_addr,
match_accept.target,
)
)
if match_accept.info.get("signature") is not None:
transactions = cast(Transactions, self.context.transactions)
signing_msg = transactions.pop_pending_initial_acceptance(
fipa_dialogue.dialogue_label, match_accept.target
)
strategy = cast(Strategy, self.context.strategy)
counterparty_signature = match_accept.info.get("signature")
if strategy.is_contract_tx:
pass
# contract = cast(ERC1155Contract, self.context.contracts.erc1155)
# if not contract.is_deployed:
# ledger_api = self.context.ledger_apis.get_api(strategy.ledger_id)
# contract_address = self.context.shared_state.get(
# "erc1155_contract_address", None
# )
# assert (
# contract_address is not None
# ), "ERC1155Contract address not set!"
# contract.set_deployed_instance(
# ledger_api, cast(str, contract_address),
# )
# strategy = cast(Strategy, self.context.strategy)
# tx_nonce = transaction_msg.skill_callback_info.get("tx_nonce", None)
# tx_signature = match_accept.info.get("tx_signature", None)
# assert (
# tx_nonce is not None and tx_signature is not None
# ), "tx_nonce or tx_signature not available"
# transaction_msg = contract.get_atomic_swap_batch_transaction_msg(
# from_address=self.context.agent_address,
# to_address=match_accept.counterparty,
# token_ids=[
# int(key)
# for key in transaction_msg.terms.quantities_by_good_id.keys()
# ]
# + [
# int(key)
# for key in transaction_msg.terms.amount_by_currency_id.keys()
# ],
# from_supplies=[
# -quantity if quantity < 0 else 0
# for quantity in transaction_msg.terms.quantities_by_good_id.values()
# ]
# + [
# -value if value < 0 else 0
# for value in transaction_msg.terms.amount_by_currency_id.values()
# ],
# to_supplies=[
# quantity if quantity > 0 else 0
# for quantity in transaction_msg.terms.quantities_by_good_id.values()
# ]
# + [
# value if value > 0 else 0
# for value in transaction_msg.terms.amount_by_currency_id.values()
# ],
# value=0,
# trade_nonce=int(tx_nonce),
# ledger_api=self.context.ledger_apis.get_api(strategy.ledger_id),
# skill_callback_id=self.context.skill_id,
# signature=tx_signature,
# skill_callback_info={
# "dialogue_label": dialogue.dialogue_label.json
# },
# )
else:
signing_msg.set(
"skill_callback_info",
{
**signing_msg.skill_callback_info,
**{"counterparty_signature": counterparty_signature},
},
)
signing_dialogues = cast(
SigningDialogues, self.context.signing_dialogues
)
signing_dialogue = cast(
| |
import configparser
c = configparser.ConfigParser()
c.read("production.ini")
config = {}
config['host'] = c['dboption']['chost']
config['port'] = int(c['dboption']['cport'])
config['user'] = c['dboption']['cuser']
config['pw'] = c['dboption']['cpw']
config['db'] = c['dboption']['cdb']
config['homepath'] = c['option']['home']
config['hosturl'] = c['option']['hosturl']
config['news'] = c['news']
config['smtp'] = {}
config['smtp']['sender'] = c['option']['smtp-sender']
config['smtp']['server'] = c['option']['smtp']
config['collection_table'] = {}
config['collection_table']['template'] = c['option']['template_collection_sheet']
config['collection_table']['ordered'] = c['option']['collection_table_ordered']
config['collection_table']['filled'] = c['option']['collection_table_filled']
config['dwb'] = {}
config['dwb']['name_suffix'] = c['option']['dwb_name_suffix']
config['dwb']['connection_string'] = c['option']['dwb_connection_string']
config['dwb']['use_dwb'] = int(c['option']['use_dwb'])
if not c.has_option('option', 'dev_group'):
log.critical('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
raise NameError('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
config['dev_group'] = c['option']['dev_group']
taxon_ids = """100408, 100430, 100431, 100451, 100453, 3000243, 3100522, 3200125,
3200126, 4000014, 4402020, 4403366, 4403382, 4403383, 4404012,
4404135, 4404679, 4405947, 4406565, 4407062, 4408012, 5000093,
5000095, 5000203, 5009403, 5009532, 5100497, 5200013, 5210014,
5220011, 5400004, 5401236, 5413793, 5416518, 5416650, 5426341,
5428084, 5428327, 5428727, 5428849, 5428977, 5429029, 5429176,
5429405, 5430460, 5431215"""
states = {'de': ["Europa",
"Baden-Württemberg",
"Bayern",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Mecklenburg-Vorpommern",
"Niedersachsen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Saarland",
"Sachsen",
"Sachsen-Anhalt",
"Schleswig-Holstein",
"Thüringen"],
'en': ["Europe",
"Baden-Württemberg",
"Bavaria",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hesse",
"Mecklenburg-Vorpommern",
"Lower Saxony",
"North Rhine Westphalia",
"RhinelandPalatinate",
"Saarland",
"Saxony",
"Saxony-Anhalt",
"Schleswig-Holstein",
"Thuringia"]}
messages = {}
messages['results'] = {}
messages['results']['choose_taxa'] = {'de': '- Bitte wählen Sie ein Taxon aus -',
'en': '- Please choose a taxon -'}
messages['results']['choose_states'] = {'de': '- Bitte wählen Sie ein Bundesland aus -',
'en': '- Please choose a state -'}
messages['news_edit'] = {'de': ' Bearbeiten ', 'en': ' Edit '}
messages['news_reset'] = {'de': " Zurücksetzen ", 'en': " Reset "}
messages['news_reset_html'] = {'de': "<h2><strong>Titel</strong></h2><p>Inhalt</p>",
'en': "<h2><strong>Title</strong></h2><p>Content</p>"}
messages['news_message_saved'] = {'de': "News gespeichert!", 'en': "News saved!"}
messages['news_message_updated'] = {'de': "News bearbeitet!", 'en': "News updated!"}
messages['news_message_empty'] = {'de': "Bitte geben Sie Titel und Inhalt des neuen Newsbeitrages ein!",
'en': "Please enter title and content of the news posting!"}
messages['news_cancel'] = {'de': " Abbrechen ", 'en': " Cancel "}
messages['contact'] = {'de': 'Bitte überprüfen Sie die eingegebenen Daten.', 'en': 'Please check the data entered.'}
messages['contact_send'] = {'de': 'Die Mail wurde versandt!', 'en': 'Send mail was successful!'}
messages['letter_sender'] = {'de': 'Absender', 'en': 'Sender'}
messages['letter_send_to'] = {'de': 'Empfänger', 'en': 'Send to'}
messages['letter_order_no'] = {'de': 'Auftragsnummer {0}', 'en': 'Order no. {0}'}
messages['letter_no_samples'] = {'de': 'Anzahl Proben: {0}', 'en': 'No. samples: {0}'}
messages['letter_body1'] = {'de': 'Hinweis: Bitte drucken Sie das Anschreiben aus oder notieren Sie alternativ die ',
'en': 'Please print this cover letter or write the'}
messages['letter_body2'] = {'de': 'Auftragsnummer auf einem Zettel und legen diesen dem Probenpaket bei.',
'en': 'order number on a slip and send it together with your parcel '
'containing the samples.'}
messages['pls_select'] = {'de': 'Bitte wählen', 'en': 'Please select'}
messages['wrong_credentials'] = {'de': 'Falscher Benutzer oder Passwort!', 'en': 'Wrong user or password!'}
messages['still_locked'] = {'de': 'Sie wurden noch nicht von einem Koordinator freigeschaltet!',
'en': 'Your account must be unlocked by the Administrator!'}
messages['required_fields'] = {'de': 'Bitte alle Pflichtfelder ausfüllen!',
'en': 'Please fill out all required fields!'}
messages['username_present'] = {'de': 'Nutzername schon vorhanden, bitte wählen Sie einen anderen.',
'en': 'Username already present, please choose another one.'}
messages['user_created'] = {'de': 'Ihre Registrierungsanfrage wird bearbeitet. Sie werden in Kürze eine Email '
'Benachichtigung zum Stand Ihrer Freigabe für das GBOL Webportal erhalten.',
'en': 'User created. Please wait for unlock of your account by the administrator.'}
messages['reg_exp_mail_subject'] = {'de': 'Ihre Registrierung beim GBOL Webportal',
'en': 'Your Registration at GBOL Webportal'}
messages['reg_exp_mail_body'] = {'de': 'Hallo {salutation} {title} {vorname} {nachname},\n\n'
'wir haben Ihre Registrierung für die taxonomische Expertise {expertisename} '
'erhalten und an die entsprechenden Koordinatoren weitergeleitet.\n\n'
'Viele Grüße\nIhr GBOL Team',
'en': 'Hello {salutation} {title} {vorname} {nachname},\n\n'
'We have received Your registration for the taxonomic expertise {3} and '
'have send them to the corresponding GBOL-taxon coordinators.\n\n'
'Best regards,\nYour GBOL team'}
messages['reg_exp_chg_mail_body'] = {'de': 'Hallo {tk_user},\n\n{req_user} hat sich für die Expertise {expertisename} '
'registriert.\nBitte prüfen Sie die Angaben und zertifizieren die '
'Expertise anschließend.\n\nViele Grüße\nIhr GBOL Team',
'en': 'Hello {tk_user},\n\n{req_user} applies for the taxonomic expertise '
'{expertisename}.\nPlease check the data and approve or decline the request.'
'\n\nBest regards, Your GBOL team'}
messages['reg_exp_accept'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde erfolgreich von einem Koordinator freigegeben.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been approved by the coordinator.
Best regards,
The GBOL Team
"""}
messages['reg_exp_decline'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich bei uns melden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been refused by the coordinator.
If You have any questions regarding the GBOL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['pwd_forgot_email_body'] = {'de': """{0},
eine Anfrage zum Zurücksetzen des Passworts für Ihr Benutzerkonto auf
dem German Barcode of Life Webportal wurde gestellt.
Sie können Ihr Passwort mit einem Klick auf folgenden Link ändern:
http://{1}/sammeln/change-password?link={2}
Ihr Benutzername lautet: {3}
Dieser Link kann nur einmal verwendet werden und leitet Sie zu einer Seite,
auf der Sie ein neues Passwort festlegen können. Er ist einen Tag lang gültig
und läuft automatisch aus, falls Sie ihn nicht verwenden.
Viele Grüße
Das Team von German Barcode of Life""",
'en': """{0},
a request for password reset for your useraccount on the
German Barcode of Life webportal has been posed.
You can change your password with the following link:
http://{1}/sammeln/change-password?link={2}
Your user name is: {3}
Please note: this link can only be used once. The link will direct you to a
website where you can enter a new password.
The link is valid for one day.
Best wishes,
Your team from German Barcode of Life"""}
messages['pwd_forgot_email_subject'] = {'de': 'Neue Login-Daten für {0} auf German Barcode of Life',
'en': 'New login data for your user {0} on German Barcode of '
'Life webportal'}
messages['pwd_forgot_sent'] = {'de': 'Das Passwort und weitere Hinweise wurden an '
'die angegebene Email-Adresse gesendet.',
'en': 'The password and further tips werde sent to your email address.'}
messages['pwd_forgot_not_found'] = {'de': 'Es wurde kein Benutzer mit eingegebenem Namen bzw. Email gefunden.',
'en': 'No user found with the name or the email entered.'}
messages['pwd_unmatch'] = {'de': 'Die beiden Passwörter stimmen nicht überein.', 'en': 'Passwords do not match.'}
messages['pwd_saved'] = {'de': 'Neues Passwort gespeichert.', 'en': 'New password saved'}
messages['pwd__link_used'] = {'de': 'Link wurde bereits benutzt.', 'en': 'The link has been used already'}
messages['pwd__link_invalid'] = {'de': 'Kein gültiger Link.', 'en': 'Link invalid'}
messages['pwd__link_timeout'] = {'de': 'Link ist nicht mehr gültig.', 'en': 'Link has timed out'}
messages['order_success'] = {'de': 'Danke, Ihre Bestellung wurde entgegengenommen.',
'en': 'Thank You, the order has been received.'}
messages['order_info_missing'] = {'de': 'Bitte füllen Sie alle Felder aus.', 'en': 'Please fill out all fields.'}
messages['edt_no_passwd'] = {'de': 'Bitte geben Sie Ihr Passwort an, um das Benutzerprofil zu ändern.',
'en': 'Please provide your password in order to change the userprofile.'}
messages['edt_passwd_wrong'] = {'de': 'Falsches Passwort.', 'en': 'Wrong password.'}
messages['edt_passwd_mismatch'] = {'de': 'Die beiden neuen Passwörter stimmen nicht überein.',
'en': 'Both new passwords do not match.'}
messages['edt_success'] = {'de': 'Benutzerprofil erfolgreich geändert', 'en': 'Userprofile updated.'}
messages['err_upload'] = {'de': 'Ein Fehler ist beim Hochladen der Sammeltabelle aufgetreten. '
'Bitte schicken Sie Ihre Sammeltabelle per E-Mail an den Taxonkoordinator.',
'en': 'An error occured when uploading the collection sheet. Please sent it to the '
'taxon coordinator via e-mail.'}
messages['succ_upload'] = {'de': 'Die Sammeltabelle wurde erfolgreich hochgeladen!',
'en': 'Collection sheet uploaded successfully!'}
messages['download'] = {'de': 'Herunterladen', 'en': 'Download'}
messages['cert'] = {'de': 'zertifiziert', 'en': 'certified'}
messages['subm'] = {'de': 'beantragt', 'en': 'submitted'}
messages['select'] = {'de': 'Auswahl', 'en': 'Please select'}
messages['robot'] = {'de': 'Registrierung konnte nicht durchgeführt werden!', 'en': 'Could not process registration!'}
messages['email_reg_subject'] = {'de': 'GBOL Registrierung', 'en': 'GBOL Registration'}
messages['email_reg_body'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde erfolgreich von einem Koordinator freigegeben.
Sie können sich nun im dem Experten-Bereich anmelden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBOL account has been approved by the coordinator.
You can now login into the expert area.
Best regards,
The GBOL Team
"""}
messages['email_reg_body_decline'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich von GBOL bei uns melden.
Best regards,
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBoL account has been refused by the coordinator.
If You have any questions regarding the GBoL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['states'] = {'de': | |
return True
else:
return False
def export(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="CmDomainQType",
pretty_print=True,
):
imported_ns_def_ = GenerateDSNamespaceDefs_.get("CmDomainQType")
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
if self.original_tagname_ is not None and name_ == "CmDomainQType":
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ":"
showIndent(outfile, level, pretty_print)
outfile.write(
"<%s%s%s"
% (
namespaceprefix_,
name_,
namespacedef_ and " " + namespacedef_ or "",
)
)
already_processed = set()
self._exportAttributes(
outfile, level, already_processed, namespaceprefix_, name_="CmDomainQType"
)
if self._hasContent():
outfile.write(">%s" % (eol_,))
self._exportChildren(
outfile,
level + 1,
namespaceprefix_,
namespacedef_,
name_="CmDomainQType",
pretty_print=pretty_print,
)
showIndent(outfile, level, pretty_print)
outfile.write("</%s%s>%s" % (namespaceprefix_, name_, eol_))
else:
outfile.write("/>%s" % (eol_,))
def _exportAttributes(
self, outfile, level, already_processed, namespaceprefix_="", name_="CmDomainQType"
):
pass
def _exportChildren(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="CmDomainQType",
fromsubclass_=False,
pretty_print=True,
):
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
if self.Question is not None:
namespaceprefix_ = (
self.Question_nsprefix_ + ":"
if (UseCapturedNS_ and self.Question_nsprefix_)
else ""
)
self.Question.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="Question",
pretty_print=pretty_print,
)
if self.ExampleWords is not None:
namespaceprefix_ = (
self.ExampleWords_nsprefix_ + ":"
if (UseCapturedNS_ and self.ExampleWords_nsprefix_)
else ""
)
self.ExampleWords.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="ExampleWords",
pretty_print=pretty_print,
)
if self.ExampleSentences is not None:
namespaceprefix_ = (
self.ExampleSentences_nsprefix_ + ":"
if (UseCapturedNS_ and self.ExampleSentences_nsprefix_)
else ""
)
self.ExampleSentences.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="ExampleSentences",
pretty_print=pretty_print,
)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == "Question":
obj_ = QuestionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Question = obj_
obj_.original_tagname_ = "Question"
elif nodeName_ == "ExampleWords":
obj_ = ExampleWordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ExampleWords = obj_
obj_.original_tagname_ = "ExampleWords"
elif nodeName_ == "ExampleSentences":
obj_ = ExampleSentencesType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ExampleSentences = obj_
obj_.original_tagname_ = "ExampleSentences"
# end class CmDomainQType
class QuestionsType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, CmDomainQ=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get("parent_object_")
self.ns_prefix_ = None
if CmDomainQ is None:
self.CmDomainQ = []
else:
self.CmDomainQ = CmDomainQ
self.CmDomainQ_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(CurrentSubclassModule_, QuestionsType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if QuestionsType.subclass:
return QuestionsType.subclass(*args_, **kwargs_)
else:
return QuestionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_CmDomainQ(self):
return self.CmDomainQ
def set_CmDomainQ(self, CmDomainQ):
self.CmDomainQ = CmDomainQ
def add_CmDomainQ(self, value):
self.CmDomainQ.append(value)
def insert_CmDomainQ_at(self, index, value):
self.CmDomainQ.insert(index, value)
def replace_CmDomainQ_at(self, index, value):
self.CmDomainQ[index] = value
def _hasContent(self):
if self.CmDomainQ:
return True
else:
return False
def export(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="QuestionsType",
pretty_print=True,
):
imported_ns_def_ = GenerateDSNamespaceDefs_.get("QuestionsType")
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
if self.original_tagname_ is not None and name_ == "QuestionsType":
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ":"
showIndent(outfile, level, pretty_print)
outfile.write(
"<%s%s%s"
% (
namespaceprefix_,
name_,
namespacedef_ and " " + namespacedef_ or "",
)
)
already_processed = set()
self._exportAttributes(
outfile, level, already_processed, namespaceprefix_, name_="QuestionsType"
)
if self._hasContent():
outfile.write(">%s" % (eol_,))
self._exportChildren(
outfile,
level + 1,
namespaceprefix_,
namespacedef_,
name_="QuestionsType",
pretty_print=pretty_print,
)
showIndent(outfile, level, pretty_print)
outfile.write("</%s%s>%s" % (namespaceprefix_, name_, eol_))
else:
outfile.write("/>%s" % (eol_,))
def _exportAttributes(
self, outfile, level, already_processed, namespaceprefix_="", name_="QuestionsType"
):
pass
def _exportChildren(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="QuestionsType",
fromsubclass_=False,
pretty_print=True,
):
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
for CmDomainQ_ in self.CmDomainQ:
namespaceprefix_ = (
self.CmDomainQ_nsprefix_ + ":"
if (UseCapturedNS_ and self.CmDomainQ_nsprefix_)
else ""
)
CmDomainQ_.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="CmDomainQ",
pretty_print=pretty_print,
)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == "CmDomainQ":
obj_ = CmDomainQType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.CmDomainQ.append(obj_)
obj_.original_tagname_ = "CmDomainQ"
# end class QuestionsType
class CmSemanticDomainType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(
self,
guid=None,
Name=None,
Abbreviation=None,
Description=None,
Questions=None,
SubPossibilities=None,
gds_collector_=None,
**kwargs_,
):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get("parent_object_")
self.ns_prefix_ = None
self.guid = _cast(None, guid)
self.guid_nsprefix_ = None
self.Name = Name
self.Name_nsprefix_ = None
self.Abbreviation = Abbreviation
self.Abbreviation_nsprefix_ = None
self.Description = Description
self.Description_nsprefix_ = None
self.Questions = Questions
self.Questions_nsprefix_ = None
self.SubPossibilities = SubPossibilities
self.SubPossibilities_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(CurrentSubclassModule_, CmSemanticDomainType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CmSemanticDomainType.subclass:
return CmSemanticDomainType.subclass(*args_, **kwargs_)
else:
return CmSemanticDomainType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_Abbreviation(self):
return self.Abbreviation
def set_Abbreviation(self, Abbreviation):
self.Abbreviation = Abbreviation
def get_Description(self):
return self.Description
def set_Description(self, Description):
self.Description = Description
def get_Questions(self):
return self.Questions
def set_Questions(self, Questions):
self.Questions = Questions
def get_SubPossibilities(self):
return self.SubPossibilities
def set_SubPossibilities(self, SubPossibilities):
self.SubPossibilities = SubPossibilities
def get_guid(self):
return self.guid
def set_guid(self, guid):
self.guid = guid
def _hasContent(self):
if (
self.Name is not None
or self.Abbreviation is not None
or self.Description is not None
or self.Questions is not None
or self.SubPossibilities is not None
):
return True
else:
return False
def export(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="CmSemanticDomainType",
pretty_print=True,
):
imported_ns_def_ = GenerateDSNamespaceDefs_.get("CmSemanticDomainType")
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
if self.original_tagname_ is not None and name_ == "CmSemanticDomainType":
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ":"
showIndent(outfile, level, pretty_print)
outfile.write(
"<%s%s%s"
% (
namespaceprefix_,
name_,
namespacedef_ and " " + namespacedef_ or "",
)
)
already_processed = set()
self._exportAttributes(
outfile, level, already_processed, namespaceprefix_, name_="CmSemanticDomainType"
)
if self._hasContent():
outfile.write(">%s" % (eol_,))
self._exportChildren(
outfile,
level + 1,
namespaceprefix_,
namespacedef_,
name_="CmSemanticDomainType",
pretty_print=pretty_print,
)
showIndent(outfile, level, pretty_print)
outfile.write("</%s%s>%s" % (namespaceprefix_, name_, eol_))
else:
outfile.write("/>%s" % (eol_,))
def _exportAttributes(
self, outfile, level, already_processed, namespaceprefix_="", name_="CmSemanticDomainType"
):
if self.guid is not None and "guid" not in already_processed:
already_processed.add("guid")
outfile.write(
" guid=%s"
% (
self.gds_encode(
self.gds_format_string(quote_attrib(self.guid), input_name="guid")
),
)
)
def _exportChildren(
self,
outfile,
level,
namespaceprefix_="",
namespacedef_="",
name_="CmSemanticDomainType",
fromsubclass_=False,
pretty_print=True,
):
if pretty_print:
eol_ = "\n"
else:
eol_ = ""
if self.Name is not None:
namespaceprefix_ = (
self.Name_nsprefix_ + ":" if (UseCapturedNS_ and self.Name_nsprefix_) else ""
)
self.Name.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="Name",
pretty_print=pretty_print,
)
if self.Abbreviation is not None:
namespaceprefix_ = (
self.Abbreviation_nsprefix_ + ":"
if (UseCapturedNS_ and self.Abbreviation_nsprefix_)
else ""
)
self.Abbreviation.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="Abbreviation",
pretty_print=pretty_print,
)
if self.Description is not None:
namespaceprefix_ = (
self.Description_nsprefix_ + ":"
if (UseCapturedNS_ and self.Description_nsprefix_)
else ""
)
self.Description.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="Description",
pretty_print=pretty_print,
)
if self.Questions is not None:
namespaceprefix_ = (
self.Questions_nsprefix_ + ":"
if (UseCapturedNS_ and self.Questions_nsprefix_)
else ""
)
self.Questions.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="Questions",
pretty_print=pretty_print,
)
if self.SubPossibilities is not None:
namespaceprefix_ = (
self.SubPossibilities_nsprefix_ + ":"
if (UseCapturedNS_ and self.SubPossibilities_nsprefix_)
else ""
)
self.SubPossibilities.export(
outfile,
level,
namespaceprefix_,
namespacedef_="",
name_="SubPossibilities",
pretty_print=pretty_print,
)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_("guid", node)
if value is not None and "guid" not in already_processed:
already_processed.add("guid")
self.guid = value
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == "Name":
obj_ = NameType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Name = obj_
obj_.original_tagname_ = "Name"
elif nodeName_ == "Abbreviation":
obj_ = AbbreviationType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Abbreviation = obj_
obj_.original_tagname_ = "Abbreviation"
elif nodeName_ == "Description":
obj_ = DescriptionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Description = obj_
obj_.original_tagname_ = "Description"
elif nodeName_ == "Questions":
obj_ = QuestionsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Questions = | |
import numpy as np
import pygame as pg
import gym
from gym import spaces
class Ris(gym.Env):
NOP, LEFT, RIGHT, ROT = range(4)
def __init__(self, scale, width=8, height=16, piece_set='lettris'):
self.action_space = spaces.Discrete(4)
board = np.zeros((height, width))
self.width = width
self.height = height
self.time = 0
self.cutoff = 4000
self.board = np.array(board, dtype=int)
self.size = len(board)
self.cell_size = scale
self.piece_types = Ris.piece_sets[piece_set]
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = self.piece_types[np.random.randint(0, len(self.piece_types))]
self.subframe = 0
self.subframes = 5
self.screen = None
self.incoming_garbage = 0
def reset(self):
board = np.zeros((self.height, self.width))
self.board = np.array(board, dtype=int)
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = self.piece_types[np.random.randint(0, len(self.piece_types))]
self.subframe = 0
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
self.time = 0
state = np.array([[
self.board,
piece,
np.zeros((self.height, self.width)),
np.zeros((self.height, self.width))
]])
return state
def resolve_lines(self):
removed = 0
for i in range(len(self.board)):
line = self.board[i]
if all(x == 1 for x in line):
removed = removed + 1
for j in range(i - 1):
self.board[i - j] = self.board[i - j - 1]
return removed
def apply_garbage(self, n_lines):
done = False
if n_lines > 0:
if np.any(self.board[n_lines - 1]):
done = True
else:
self.board = np.roll(self.board, -n_lines, axis=0)
for i in range(n_lines):
garbage_line = np.ones(self.width)
garbage_line[np.random.randint(0, self.width)] = 0
self.board[self.height - 1 - i] = garbage_line
def step(self, action):
self.time = self.time + 1
self.subframe = self.subframe + 1
done = False
reward = 0
lines_cleared = 0
if action == Ris.LEFT:
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_left = (i + self.falling_piece_pos[0] - 1, j + self.falling_piece_pos[1])
if pos_left[0] < 0 or self.board[pos_left[1]][pos_left[0]] != 0:
coll = True
if not coll:
self.falling_piece_pos = (self.falling_piece_pos[0] - 1, self.falling_piece_pos[1])
if action == Ris.RIGHT:
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_left = (i + self.falling_piece_pos[0] + 1, j + self.falling_piece_pos[1])
if pos_left[0] >= len(self.board[0]) or self.board[pos_left[1]][pos_left[0]] != 0:
coll = True
if not coll:
self.falling_piece_pos = (self.falling_piece_pos[0] + 1, self.falling_piece_pos[1])
if action == Ris.ROT:
rotated = np.rot90(self.falling_piece_shape)
coll = False
for i in range(4):
for j in range(4):
if not coll and rotated[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
if pos[0] not in range(0, len(self.board[0])) or \
pos[1] not in range(0, len(self.board)) or \
self.board[pos[1]][pos[0]] != 0:
coll = True
if not coll:
self.falling_piece_shape = rotated
if self.subframe == self.subframes - 1:
self.subframe = 0
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_below = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1] + 1)
if pos_below[1] >= len(self.board) or self.board[pos_below[1]][pos_below[0]] != 0:
coll = True
if coll:
bottom = False
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
self.board[pos[1]][pos[0]] = 1
if pos[1] > (len(self.board) // 2):
bottom = True
lines_cleared = self.resolve_lines()
if lines_cleared > 0:
reward = (2 + lines_cleared) ** 2
else:
self.apply_garbage(self.incoming_garbage)
self.incoming_garbage = 0
if self.falling_piece_pos[1] == 0:
done = True
reward = -10
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = np.rot90(self.piece_types[np.random.randint(0, len(self.piece_types))], k=np.random.randint(0, 4))
else:
self.falling_piece_pos = (self.falling_piece_pos[0], self.falling_piece_pos[1] + 1)
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
timing_layer = np.zeros((self.height, self.width))
if self.subframe == self.subframes - 2:
timing_layer = np.ones((self.height, self.width))
garbage_layer = np.zeros((self.height, self.width))
for i in range(self.incoming_garbage):
garbage_layer[self.height - 1 - i] = np.ones(self.width)
state = np.array([[
self.board,
piece,
garbage_layer,
timing_layer
]])
if self.time > self.cutoff:
done = True
return state, reward, done, { 'lines_cleared' : lines_cleared }
def step_new(self, action):
self.time = self.time + 1
self.subframe = self.subframe + 1
done = False
reward = 0
lines_cleared = 0
dynamic_layer = np.array(np.zeros((self.height, self.width)), dtype=int)
lbound, rbound, ubound, dbound = (self.width, 0, self.height, 0)
for c in range(4):
for r in range(4):
if self.falling_piece_shape[r][c] == 1:
pos = (c + self.falling_piece_pos[0], r + self.falling_piece_pos[1])
dynamic_layer[pos[1]][pos[0]] = 1
if pos[0] < lbound: lbound = pos[0]
if pos[0] > rbound: rbound = pos[0]
if pos[1] < ubound: ubound = pos[1]
if pos[1] > dbound: dbound = pos[1]
if action == Ris.LEFT:
if lbound > 0:
preview_layer = np.roll(dynamic_layer, -1)
if not np.all(np.bitwise_xor(self.board, preview_layer)):
self.falling_piece_pos = (self.falling_piece_pos[0] - 1, self.falling_piece_pos[1])
if action == Ris.RIGHT:
if rbound < self.width - 1:
preview_layer = np.roll(dynamic_layer, 1)
if not np.all(np.bitwise_xor(self.board, preview_layer)):
self.falling_piece_pos = (self.falling_piece_pos[0] + 1, self.falling_piece_pos[1])
if action == Ris.ROT:
rotated = np.rot90(self.falling_piece_shape)
coll = False
for i in range(4):
for j in range(4):
if not coll and rotated[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
if pos[0] not in range(0, len(self.board[0])) or \
pos[1] not in range(0, len(self.board)) or \
self.board[pos[1]][pos[0]] != 0:
coll = True
if not coll:
self.falling_piece_shape = rotated
if self.subframe == self.subframes - 1:
self.subframe = 0
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_below = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1] + 1)
if pos_below[1] >= len(self.board) or self.board[pos_below[1]][pos_below[0]] != 0:
coll = True
if coll:
bottom = False
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
self.board[pos[1]][pos[0]] = 1
if pos[1] > (len(self.board) // 2):
bottom = True
lines_cleared = self.resolve_lines()
if lines_cleared > 0:
reward = (2 + lines_cleared) ** 2
else:
self.apply_garbage(self.incoming_garbage)
self.incoming_garbage = 0
if self.falling_piece_pos[1] == 0:
done = True
reward = -10
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = np.rot90(self.piece_types[np.random.randint(0, len(self.piece_types))], k=np.random.randint(0, 4))
else:
self.falling_piece_pos = (self.falling_piece_pos[0], self.falling_piece_pos[1] + 1)
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
timing_layer = np.zeros((self.height, self.width))
if self.subframe == self.subframes - 2:
timing_layer = np.ones((self.height, self.width))
garbage_layer = np.zeros((self.height, self.width))
for i in range(self.incoming_garbage):
garbage_layer[self.height - 1 - i] = np.ones(self.width)
state = np.array([[
self.board,
piece,
garbage_layer,
timing_layer
]])
if self.time > self.cutoff:
done = True
return state, reward, done, { 'lines_cleared' : lines_cleared }
def draw(self, screen, heatmap=None):
# draw static pieces
for i in range(len(self.board[0])):
for j in range(len(self.board)):
cell = pg.Rect(self.cell_size * i, self.cell_size * j, self.cell_size, self.cell_size)
if self.board[j][i] == 1:
pg.draw.rect(screen, (0, 100, 0), cell)
pg.draw.rect(screen, (0, 90, 0), cell, 1)
else:
pg.draw.rect(screen, (64, 64, 64), cell)
pg.draw.rect(screen, (58, 58, 58), cell, 1)
# draw falling piece
for i in range(4):
for j in range(4):
cell = pg.Rect(self.cell_size * (i + self.falling_piece_pos[0]), self.cell_size * (j + self.falling_piece_pos[1]), self.cell_size, self.cell_size)
if self.falling_piece_shape[j][i] == 1:
pg.draw.rect(screen, (0, 120, 0), cell)
pg.draw.rect(screen, (0, 110, 0), cell, 1)
def render(self, mode=''):
self.draw(self.screen)
pg.display.flip()
def play(self, framerate=30):
clock = pg.time.Clock()
while True:
self.render()
action = Ris.NOP
events = pg.event.get()
for event in events:
if event.type == pg.KEYDOWN:
if event.key == pg.K_LEFT:
action = Ris.LEFT
if event.key == pg.K_RIGHT:
action = Ris.RIGHT
if event.key == pg.K_UP:
action = Ris.ROT
state, reward, done, _ = self.step(action)
if done: self.reset()
clock.tick(int(framerate))
piece_sets = {
'lettris' : [
[[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[0,0,0,0]]
],
'koktris' : [
[[0,0,0,0],
[0,0,1,0],
[1,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,1,0,0],
[0,1,1,1],
[0,0,0,0]],
[[0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,1,0,0],
[1,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[1,1,0,0],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,0,1,1],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,0,0,0],
[1,1,1,1],
[0,0,0,0]],
]
}
if __name__ == "__main__":
cell_size = 24
env = Ris(cell_size, 10, 20, piece_set='koktris')
screen | |
'a2', mingot],
['lista', 'noun', 'a2', mingot],
['literario', 'adjective', 'a2', mingot],
['localidad', 'noun', 'a2', mingot],
['lote', 'noun', 'a2', mingot],
['llegada', 'noun', 'a2', mingot],
['llorar', 'verb', 'a2', mingot],
['lluvia', 'noun', 'a2', mingot],
['madrina', 'noun', 'a2', mingot],
['maravilloso', 'adjective', 'a2', mingot],
['marcha', 'noun', 'a2', mingot],
['media', 'noun', 'a2', mingot],
['mejorar', 'verb', 'a2', mingot],
['memoria', 'noun', 'a2', mingot],
['mencionar', 'verb', 'a2', mingot],
['merecer', 'verb', 'a2', mingot],
['mérito', 'noun', 'a2', mingot],
['metro', 'noun', 'a2', mingot],
['mesa', 'noun', 'a2', mingot],
['mina', 'noun', 'a2', mingot],
['mínimo', 'adjective', 'a2', mingot],
['mínimo', 'noun', 'a2', mingot],
['minuto', 'noun', 'a2', mingot],
['moderno', 'adjective', 'a2', mingot],
['monte', 'noun', 'a2', mingot],
['mostrar', 'verb', 'a2', mingot],
['mueble', 'adjective', 'a2', mingot, '?'],
['mueble', 'noun', 'a2', mingot],
['municipal', 'adjective', 'a2', mingot],
['negar', 'verb', 'a2', mingot],
['normal', 'adjective', 'a2', mingot],
['notar', 'verb', 'a2', mingot],
['novedad', 'noun', 'a2', mingot],
['observar', 'verb', 'a2', mingot],
['ordenar', 'verb', 'a2', mingot],
['página', 'noun', 'a2', mingot],
['pago', 'noun', 'a2', mingot],
['pan', 'noun', 'a2', mingot],
['párroco', 'noun', 'a2', mingot],
['pensamiento', 'noun', 'a2', mingot],
['periodista', 'noun', 'a2', mingot],
['periodo', 'noun', 'a2', mingot],
['permanecer', 'verb', 'a2', mingot],
['personalidad', 'noun', 'a2', mingot],
['pertenecer', 'verb', 'a2', mingot],
['peso', 'noun', 'a2', mingot],
['piedra', 'noun', 'a2', mingot],
['piso', 'noun', 'a2', mingot],
['planta', 'noun', 'a2', mingot],
['pleno', 'adjective', 'a2', mingot],
['pleno', 'noun', 'a2', mingot],
['población', 'noun', 'a2', mingot],
['posibilidad', 'noun', 'a2', mingot],
['potencia', 'noun', 'a2', mingot],
['precioso', 'adjective', 'a2', mingot],
['precisamente', 'adverb', 'a2', mingot],
['presentación', 'noun', 'a2', mingot],
['preocupación', 'noun', 'a2', mingot],
['procurar', 'verb', 'a2', mingot],
['producto', 'noun', 'a2', mingot],
['profesión', 'noun', 'a2', mingot],
['profundo', 'adjective', 'a2', mingot],
['prometer', 'verb', 'a2', mingot],
['propaganda', 'noun', 'a2', mingot],
['propiedad', 'noun', 'a2', mingot],
['proponer', 'verb', 'a2', mingot],
['proporcionar', 'verb', 'a2', mingot],
['propósito', 'noun', 'a2', mingot],
['propuesta', 'noun', 'a2', mingot],
['puerto', 'noun', 'a2', mingot],
['puesto', 'noun', 'a2', mingot],
['puro', 'adjective', 'a2', mingot],
['puro', 'noun', 'a2', mingot, '?'],
['quince', 'adjective', 'a2', mingot],
['quince', 'noun', 'a2', mingot],
['rato', 'noun', 'a2', mingot],
['realización', 'noun', 'a2', mingot],
['recomendar', 'verb', 'a2', mingot],
['rechazar', 'verb', 'a2', mingot],
['resto', 'noun', 'a2', mingot],
['retirar', 'verb', 'a2', mingot],
['rogar', 'verb', 'a2', mingot],
['rojo', 'adjective', 'a2', mingot],
['soia', 'noun', 'a2', mingot, '?'],
['sanitario', 'adjective', 'a2', mingot],
['sanitario', 'noun', 'a2', mingot],
['sector', 'noun', 'a2', mingot],
['sede', 'noun', 'a2', mingot],
['seguidamente', 'adverb', 'a2', mingot],
['servidor', 'noun', 'a2', mingot, '?'],
['significar', 'verb', 'a2', mingot],
['simpático', 'adjective', 'a2', mingot],
['sitio', 'noun', 'a2', mingot],
['sobretodo', 'noun', 'a2', mingot],
['solución', 'noun', 'a2', mingot],
['someter', 'verb', 'a2', mingot],
['sorprender', 'verb', 'a2', mingot],
['suceder', 'verb', 'a2', mingot],
['sueño', 'noun', 'a2', mingot],
['surgir', 'verb', 'a2', mingot],
['taller', 'noun', 'a2', mingot],
['tardar', 'verb', 'a2', mingot],
['tarea', 'noun', 'a2', mingot],
['técnica', 'noun', 'a2', mingot],
['telegrama', 'noun', 'a2', mingot],
['templo', 'noun', 'a2', mingot],
['temporada', 'noun', 'a2', mingot],
['tensión', 'noun', 'a2', mingot],
['terreno', 'noun', 'a2', mingot],
['tipo', 'noun', 'a2', mingot],
['torero', 'noun', 'a2', mingot],
['traje', 'noun', 'a2', mingot],
['transcurrir', 'verb', 'a2', mingot],
['transporte', 'noun', 'a2', mingot],
['tras', 'preposition', 'a2', mingot],
['trasladar', 'verb', 'a2', mingot],
['traslado', 'noun', 'a2', mingot],
['tratado', 'adjective', 'a2', mingot],
['tratado', 'noun', 'a2', mingot],
['triunfo', 'noun', 'a2', mingot],
['tropa', 'noun', 'a2', mingot],
['urgente', 'adjective', 'a2', mingot],
['usar', 'verb', 'a2', mingot],
['utilizar', 'verb', 'a2', mingot],
['veinte', 'adjective', 'a2', mingot],
['veinte', 'noun', 'a2', mingot],
['vencer', 'verb', 'a2', mingot],
['vestido', 'adjective', 'a2', mingot],
['vestido', 'noun', 'a2', mingot],
['viajar', 'verb', 'a2', mingot],
['victoria', 'noun', 'a2', mingot],
['vuelta', 'noun', 'a2', mingot],
# sexto nivel
['abajo', 'adverb', 'a2', mingot],
['abogado', 'noun', 'a2', mingot],
['abogado', 'adjective', 'a2', mingot, '?'],
['acaso', 'adverb', 'a2', mingot],
['acertar', 'verb', 'a2', mingot],
['acoger', 'verb', 'a2', mingot],
['acompañado', 'adjective', 'a2', mingot],
['actualidad', 'noun', 'a2', mingot],
['actualmente', 'adverb', 'a2', mingot],
['acusar', 'verb', 'a2', mingot],
['adecuado', 'adjective', 'a2', mingot],
['adelantar', 'verb', 'a2', mingot],
['adquisición', 'noun', 'a2', mingot],
['afán', 'noun', 'a2', mingot],
['agradecimiento', 'noun', 'a2', mingot],
['ah', 'exclamation', 'a2', mingot],
['alegre', 'adjective', 'a2', mingot],
['alejar', 'verb', 'a2', mingot],
['alumno', 'noun', 'a2', mingot],
['amado', 'adjective', 'a2', mingot],
['amado', 'noun', 'a2', mingot],
['ambiente', 'noun', 'a2', mingot],
['amenazar', 'verb', 'a2', mingot],
['amistad', 'noun', 'a2', mingot],
['ampliar', 'verb', 'a2', mingot],
['anciano', 'adjective', 'a2', mingot],
['anciano', 'noun', 'a2', mingot],
['ángel', 'noun', 'a2', mingot],
['animal', 'adjective', 'a2', mingot],
['animal', 'noun', 'a2', mingot],
['ánimo', 'noun', 'a2', mingot],
['ánimo', 'exclamation', 'a2', mingot, '?'],
['anteriormente', 'adverb', 'a2', mingot],
['aparato', 'noun', 'a2', mingot],
['apartado', 'adjective', 'a2', mingot],
['apartado', 'noun', 'a2', mingot],
['apóstol', 'noun', 'a2', mingot],
['apoyo', 'noun', 'a2', mingot],
['apreciar', 'verb', 'a2', mingot],
['aprender', 'verb', 'a2', mingot],
['artista', 'noun', 'a2', mingot],
['arriba', 'adverb', 'a2', mingot],
['arriba', 'exclamation', 'a2', mingot],
['ateneo', 'noun', 'a2', mingot],
['aumento', 'noun', 'a2', mingot],
['automóvil', 'noun', 'a2', mingot],
['auxiliar', 'adjective', 'a2', mingot],
['auxiliar', 'noun', 'a2', mingot],
['auxiliar', 'verb', 'a2', mingot],
['avanzar', 'verb', 'a2', mingot],
['aventura', 'noun', 'a2', mingot],
['aviso', 'noun', 'a2', mingot],
['azul', 'adjective', 'a2', mingot],
['azul', 'noun', 'a2', mingot],
['balcón', 'noun', 'a2', mingot],
['bandera', 'noun', 'a2', mingot],
['barco', 'noun', 'a2', mingot],
['bendecir', 'verb', 'a2', mingot],
['bienestar', 'noun', 'a2', mingot],
['caber', 'verb', 'a2', mingot],
['calcular', 'verb', 'a2', mingot],
['calor', 'noun', 'a2', mingot],
['capaz', 'adjective', 'a2', mingot],
['capilla', 'noun', 'a2', mingot],
['carbón', 'noun', 'a2', mingot],
['catedral', 'noun', 'a2', mingot],
['celestial', 'adjective', 'a2', mingot],
['cerrar', 'verb', 'a2', mingot],
['ciclo', 'noun', 'a2', mingot],
['círculo', 'noun', 'a2', mingot],
['clásico', 'adjective', 'a2', mingot],
['cobrar', 'verb', 'a2', mingot],
['colaboración', 'noun', 'a2', mingot],
['competencia', 'noun', 'a2', mingot],
['complacer', 'verb', 'a2', mingot],
['completamente', 'adverb', 'a2', mingot],
['comunicación', 'noun', 'a2', mingot],
['confirmar', 'verb', 'a2', mingot],
['conforme', 'adjective', 'a2', mingot],
['consistir', 'verb', 'a2', mingot],
['constante', 'adjective', 'a2', mingot],
['constar', 'verb', 'a2', mingot],
['consuelo', 'noun', 'a2', mingot],
['consultar', 'verb', 'a2', mingot],
['convencer', 'verb', 'a2', mingot],
['convento', 'noun', 'a2', mingot],
['conversación', 'noun', 'a2', mingot],
['copia', 'noun', 'a2', mingot],
['corona', 'noun', 'a2', mingot],
['correspondencia', 'noun', 'a2', mingot],
['cortar', 'verb', 'a2', mingot],
['corto', 'adjective', 'a2', mingot],
['corto', 'noun', 'a2', mingot, '?'],
['costar', 'verb', 'a2', mingot],
['crónica', 'noun', 'a2', mingot],
['dato', 'noun', 'a2', mingot],
['debido', 'noun', 'a2', mingot],
['decisión', 'noun', 'a2', mingot],
['definitivo', 'adjective', 'a2', mingot],
['delante', 'adverb', 'a2', mingot],
['depender', 'verb', 'a2', mingot],
['deporte', 'noun', 'a2', mingot],
['descansar', 'verb', 'a2', mingot],
['descanso', 'noun', 'a2', mingot],
['desconocido', 'adjective', 'a2', mingot],
['desconocido', 'noun', 'a2', mingot],
['describir', 'verb', 'a2', mingot],
['descubrir', 'verb', 'a2', mingot],
['desgracia', 'noun', 'a2', mingot],
['despacho', 'noun', 'a2', mingot],
['detrás', 'adverb', 'a2', mingot],
['dibujo', 'noun', 'a2', mingot],
['dictar', 'verb', 'a2', mingot],
['dicha', 'noun', 'a2', mingot],
['diferencia', 'noun', 'a2', mingot],
['digno', 'adjective', 'a2', mingot],
['directamente', 'adverb', 'a2', mingot],
['disgusto', 'noun', 'a2', mingot],
['dispuesto', 'adjective', 'a2', mingot],
['divertir', 'verb', 'a2', mingot],
['dividir', 'verb', 'a2', mingot],
['dudar', 'verb', 'a2', mingot],
['dulce', 'adjective', 'a2', mingot],
['dulce', 'noun', 'a2', mingot],
['durar', 'verb', 'a2', mingot],
['economía', 'noun', 'a2', mingot],
['eficaz', 'noun', 'a2', mingot],
['ejecutar', 'verb', 'a2', mingot],
['ejemplar', 'adjective', 'a2', mingot],
['ejemplar', 'noun', 'a2', mingot],
['emprender', 'verb', 'a2', mingot],
['encargo', 'noun', 'a2', mingot],
['encima', 'adverb', 'a2', mingot],
['enseguida', 'adverb', 'a2', mingot],
['escolar', 'adjective', 'a2', mingot],
['escolar', 'noun', 'a2', mingot],
['espléndido', 'adjective', 'a2', mingot],
['estación', 'noun', 'a2', mingot],
['estrecho', 'adjective', 'a2', mingot],
['estrecho', 'noun', 'a2', mingot],
['evitar', 'verb', 'a2', mingot],
['ex', 'adjective', 'a2', mingot],
['ex', 'noun', 'a2', mingot],
['exacto', 'adjective', 'a2', mingot],
['extensión', 'noun', 'a2', mingot],
['factor', 'noun', 'a2', mingot],
['fallecer', 'verb', 'a2', mingot],
['favorable', 'adjective', 'a2', mingot],
['filosofía', 'noun', 'a2', mingot],
['fórmula', 'noun', 'a2', mingot],
['frecuencia', 'noun', 'a2', mingot],
['fundar', 'verb', 'a2', mingot],
['género', 'noun', 'a2', mingot],
['gratitud', 'noun', 'a2', mingot],
['grato', 'adjective', 'a2', mingot],
['herida', 'noun', 'a2', mingot],
['hierro', 'noun', 'a2', mingot],
['hombre', 'noun', 'a2', mingot],
['hospital', 'noun', 'a2', mingot],
['humilde', 'adjective', 'a2', mingot],
['ignorar', 'verb', 'a2', mingot],
['ilustre', 'adjective', 'a2', mingot],
['imagen', 'noun', 'a2', mingot],
['imaginar', 'verb', 'a2', mingot],
['impedir', 'verb', 'a2', mingot],
['impresión', 'noun', 'a2', mingot],
['incluir', 'verb', 'a2', mingot],
['influencia', 'noun', 'a2', mingot],
['ingeniero', 'noun', 'a2', mingot],
['iniciar', 'verb', 'a2', mingot],
['inmenso', 'adjective', 'a2', mingot],
['insistir', 'verb', 'a2', mingot],
['instalar', 'verb', 'a2', mingot],
['instante', 'noun', 'a2', mingot],
['inscribir', 'verb', 'a2', mingot],
['intención', 'noun', 'a2', mingot],
['intentar', 'verb', 'a2', mingot],
['interpretación', 'noun', 'a2', mingot],
['invitar', 'verb', 'a2', mingot],
['izquierda', 'noun', 'a2', mingot],
['jardín', 'noun', 'a2', mingot],
['jerarquía', 'noun', 'a2', mingot],
['jornada', 'noun', 'a2', mingot],
['juego', 'noun', 'a2', mingot],
['juez', 'noun', 'a2', mingot],
['juicio', 'noun', 'a2', mingot],
['justo', 'adjective', 'a2', mingot],
['justo', 'adverb', 'a2', mingot],
['juzgar', 'verb', 'a2', mingot],
['lector', 'noun', 'a2', mingot],
['lectura', 'noun', 'a2', mingot],
['leche', 'noun', 'a2', mingot],
['lengua', 'noun', 'a2', mingot],
['licencia', 'noun', 'a2', mingot],
['luchar', 'verb', 'a2', mingot],
['madrileño', 'adjective', 'a2', mingot],
['madrileño', 'noun', 'a2', mingot],
['mando', 'noun', 'a2', mingot],
['marcar', 'verb', 'a2', mingot],
['matrimonio', 'noun', 'a2', mingot],
['mayoría', 'noun', 'a2', mingot],
['meter', 'verb', 'a2', mingot],
['miedo', 'noun', 'a2', mingot],
['misionero', 'adjective', 'a2', mingot],
['misionero', 'noun', 'a2', mingot],
['misterio', 'noun', 'a2', mingot],
['modelo', 'noun', 'a2', mingot],
['modificación', 'noun', 'a2', mingot],
['molestar', 'verb', 'a2', mingot],
['molestia', 'noun', 'a2', mingot],
['monasterio', 'noun', 'a2', mingot],
['monja', 'noun', 'a2', mingot],
['montar', 'verb', 'a2', mingot],
['muestra', 'noun', 'a2', mingot],
['multitud', 'noun', 'a2', mingot],
['navidad', 'noun', 'a2', mingot],
['negocio', 'noun', 'a2', mingot],
['notable', 'adjective', 'a2', mingot],
['notable', 'noun', 'a2', mingot, '?'],
['objetivo', 'adjective', 'a2', mingot, '?'],
['objetivo', 'noun', 'a2', mingot],
['ocupación', 'noun', 'a2', mingot],
['olvido', 'noun', 'a2', mingot],
['opinión', 'noun', 'a2', mingot],
['oportunidad', 'noun', 'a2', mingot],
['ordinario', 'adjective', 'a2', mingot],
['orientar', 'verb', 'a2', mingot],
['otoño', 'noun', 'a2', mingot],
['par', 'adjective', 'a2', mingot],
['par', 'noun', 'a2', mingot],
['parar', 'verb', 'a2', mingot],
['participar', 'verb', 'a2', mingot],
['paseo', 'noun', 'a2', mingot],
['pescado', 'noun', 'a2', mingot],
['peligroso', 'adjective', 'a2', mingot],
['pelo', 'noun', 'a2', mingot],
['pérdida', 'noun', 'a2', mingot],
['peregrinación', 'noun', 'a2', mingot],
['perfectamente', 'adverb', 'a2', mingot],
['permiso', 'noun', 'a2', mingot],
['plano', 'adjective', 'a2', mingot],
['plano', 'noun', 'a2', mingot],
['plata', 'noun', 'a2', mingot],
['poderoso', 'adjective', 'a2', mingot],
['posición', 'noun', 'a2', mingot],
['práctico', 'adjective', 'a2', mingot],
['precisar', 'verb', 'a2', mingot],
['preocupar', 'verb', 'a2', mingot],
['presencia', 'noun', 'a2', mingot],
['presupuesto', 'noun', 'a2', mingot],
['princesa', 'noun', 'a2', mingot],
['principalmente', 'adverb', 'a2', mingot],
['probar', 'verb', 'a2', mingot],
['proseguir', 'verb', 'a2', mingot],
['proteger', 'verb', 'a2', mingot],
['proyectar', 'verb', 'a2', mingot],
['puesto que', 'conjunction', 'a2', mingot],
['puntuación', 'noun', 'a2', mingot],
['quinta', 'noun', 'a2', mingot],
['quinto', 'adjective', 'a2', mingot],
['rama', 'noun', 'a2', mingot],
['ramo', 'noun', 'a2', mingot],
['rápido', 'adjective', 'a2', mingot],
['realmente', 'adverb', 'a2', mingot],
['rebelde', 'adjective', 'a2', mingot],
['reciente', 'adjective', 'a2', mingot],
['reclamar', 'verb', 'a2', mingot],
['recto', 'adjective', 'a2', mingot],
['recto', 'noun', 'a2', mingot, '?'],
['redactar', 'verb', 'a2', mingot],
['referencia', 'noun', 'a2', mingot],
['regalo', 'noun', 'a2', mingot],
['regir', 'verb', 'a2', mingot],
['relacionar', 'verb', 'a2', mingot],
['relativo', 'adjective', 'a2', mingot],
['remedio', 'noun', 'a2', mingot],
['remitir', 'verb', 'a2', mingot],
['rendir', 'verb', 'a2', mingot],
['reservar', 'verb', 'a2', mingot],
['resolución', 'noun', 'a2', mingot],
['respectivo', 'adjective', 'a2', mingot],
['respecto a', 'preposition', 'a2', mingot],
['respecto de', 'preposition', 'a2', mingot],
['responsabilidad', 'noun', 'a2', mingot],
['revista', 'noun', 'a2', mingot],
['revolución', 'noun', 'a2', mingot],
['rezar', 'verb', 'a2', mingot],
['robo', 'noun', 'a2', mingot],
['rodear', 'verb', 'a2', mingot],
['romano', 'adjective', 'a2', mingot],
['romano', | |
<filename>Protheus_WebApp/Modules/SIGAFIN/FINA080TESTCASE.py<gh_stars>10-100
import unittest
import time
from tir import Webapp
from datetime import datetime
DateSystem = datetime.today().strftime('%d/%m/%Y')
class FINA080(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAFIN", DateSystem, "T1", "D MG 01 ", "06")
inst.oHelper.Program("FINA080")
#{Protheus.doc} test_FINA080_CT110
#Baixa de título Moeda 2, com banco moeda 1 informando cheque
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/project/10231/testCase?folderId=17808
def test_FINA080_CT110(self):
prefixo = "TIR"
titulo = "F080CT110"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
cheque = "F080CT110"
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SetKey("F12")
self.oHelper.WaitShow("Parametros")
self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "NORMAL")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Cheque No.", cheque)
self.oHelper.CheckResult("Taxa contratada", "5,0000")
self.oHelper.CheckResult("= Valor Pago", "5.000,00")
self.oHelper.CheckResult("Valor US$", "1.000,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT113
#Baixa de título Moeda 2 (com banco moeda 1 cheque cadastrado)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/project/10231/testCase?folderId=17808
def test_FINA080_CT113(self):
prefixo = "TIR"
titulo = "F080CT112"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
cheque = "F080CT112"
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "NORMAL")
self.oHelper.SetValue("Banco", banco)
self.oHelper.CheckResult("Agência", agencia)
self.oHelper.CheckResult("Conta", conta)
self.oHelper.CheckResult("Cheque No.", cheque)
self.oHelper.CheckResult("Taxa contratada", "5,0000")
self.oHelper.CheckResult("= Valor Pago", "10.000,00")
self.oHelper.CheckResult("Valor US$", "2.000,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT114
#Baixa total de título Moeda 2, com taxa de permanência informada, (banco moeda 1, taxa do dia, – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50384
def test_FINA080_CT114(self):
prefixo = "TIR"
titulo = "F080CT114"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
data = "16/04/2020"
#Parametrização default
#self.oHelper.AddParameter("MV_TIPOCM", "", "T", "T", "T")
#self.oHelper.SetParameters()
time.sleep(5)
self.oHelper.SetButton("x")
self.oHelper.ChangeEnvironment("16/04/2020","T1", "D MG 01 ","06")
self.oHelper.Program("FINA080")
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("Data Pagto.", data)
self.oHelper.CheckResult("Data Debito", data)
self.oHelper.SetValue("Taxa contratada", "140,0000")
self.oHelper.SetValue("+ Tx.Permanenc.", "140,00")
self.oHelper.CheckResult("= Valor Pago", "140.140,00")
self.oHelper.CheckResult("Valor US$", "1.001,00")
self.oHelper.CheckResult("+ Corr.Monetária", "1.011,01")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT115
#Baixa total de título Moeda 2, com taxa de permanência calculada (banco moeda 1, taxa do Contratada – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50385
def test_FINA080_CT115(self):
prefixo = "TIR"
titulo = "F080CT115"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
self.oHelper.WaitShow("Baixa de Titulos")
#Parametrização default
#self.oHelper.AddParameter("MV_TIPOCM", "", "T", "T", "T")
#self.oHelper.SetParameters()
time.sleep(5)
self.oHelper.SetButton("x")
self.oHelper.ChangeEnvironment("26/04/2020","T1", "D MG 01 ","06")
self.oHelper.Program("FINA080")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Taxa contratada", "5,0000")
self.oHelper.CheckResult("+ Tx.Permanenc.", "50,00")
self.oHelper.CheckResult("= Valor Pago", "10.050,00")
self.oHelper.CheckResult("Valor US$", "2.010,00")
self.oHelper.CheckResult("+ Corr.Monetária", "1.005,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT116
#Baixa Parcial de título Moeda 2 com desconto informado, (banco moeda 1, taxa do dia – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50386
def test_FINA080_CT116(self):
prefixo = "TIR"
titulo = "F080CT116"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
#Parametrização default
#self.oHelper.AddParameter("MV_TIPOCM", "", "T", "T", "T")
#self.oHelper.SetParameters()
time.sleep(5)
self.oHelper.SetButton("x")
self.oHelper.ChangeEnvironment("16/04/2020","T1", "D MG 01 ","06")
self.oHelper.Program("FINA080")
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Taxa contratada", "140,0000")
self.oHelper.SetValue("= Valor Pago", "56.000,00")
self.oHelper.CheckResult("Valor US$", "400,00")
self.oHelper.CheckResult("+ Corr.Monetária", "404,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Taxa contratada", "135,0000")
self.oHelper.CheckResult("- Pagtos.Parciais", "400,00")
self.oHelper.SetValue("- Descontos", "1000,00")
self.oHelper.CheckResult("= Valor Pago", "80.000,00")
self.oHelper.CheckResult("Valor US$", "592,59")
self.oHelper.CheckResult("+ Corr.Monetária", "-2.364,43")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT117
#Baixa parcial de título Moeda 2 com acréscimo (banco moeda 1, taxa do Contratada – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50387
def test_FINA080_CT117(self):
prefixo = "TIR"
titulo = "F080CT117"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
self.oHelper.WaitShow("Baixa de Titulos")
#Parametrização default
#self.oHelper.AddParameter("MV_TIPOCM", "", "T", "T", "T")
#self.oHelper.SetParameters()
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Taxa contratada", "5,0000")
self.oHelper.CheckResult("+ Acrescimo", "100,00")
self.oHelper.SetValue("= Valor Pago", "2.000,00")
self.oHelper.CheckResult("Valor US$", "400,00")
self.oHelper.CheckResult("+ Corr.Monetária", "200,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Taxa contratada", "4,8000")
self.oHelper.CheckResult("+ Acrescimo", "0,00")
self.oHelper.CheckResult("- Pagtos.Parciais", "300,00")
self.oHelper.CheckResult("= Valor Pago", "8.160,00")
self.oHelper.CheckResult("Valor US$", "1700,00")
self.oHelper.CheckResult("+ Corr.Monetária", "510,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT120
#Baixa total de título Moeda 2 (banco moeda 2, taxa do dia – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50467
def test_FINA080_CT120(self):
prefixo = "TIR"
titulo = "F080CT120"
parcela = " "
tipo = "NF "
banco = "002"
agencia = "08000"
conta = "080110"
#Data Definida no CT114
time.sleep(5)
self.oHelper.SetButton("x")
self.oHelper.ChangeEnvironment("16/04/2020","T1", "D MG 01 ","06")
self.oHelper.Program("FINA080")
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("= Valor Pago", "1.000,00")
self.oHelper.CheckResult("Valor US$", "1.000,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT121
#Baixa Parcial de título Moeda 2 (banco moeda 2, taxa do Contratada – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50468
def test_FINA080_CT121(self):
prefixo = "TIR"
titulo = "F080CT121"
parcela = " "
tipo = "NF "
banco = "002"
agencia = "08000"
conta = "080110"
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("= Valor Pago", "600,00")
self.oHelper.CheckResult("Valor US$", "600,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("= Valor Pago", "1.400,00")
self.oHelper.CheckResult("Valor US$", "1.400,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT122
#Baixa em lote de título Moeda 2 (banco moeda 2, taxa do dia – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50469
def test_FINA080_CT122(self):
banco = "002"
agencia = "08000"
conta = "080110"
ntitulos = '2'
lote ='LTC122'
natureza ='FIN080SIMP'
data = "16/04/2020"
titulo1 = "LOTACT122"
titulo2 = "LOTBCT122"
#Data Definida no CT114
#time.sleep(5)
#self.oHelper.SetButton("x")
#self.oHelper.ChangeEnvironment("16/04/2020","T1", "D MG 01 ","06")
#self.oHelper.Program("FINA080")
self.oHelper.SetButton("Outras Ações", "Lote")
self.oHelper.SetValue("cBanco", banco, name_attr=True)
self.oHelper.SetValue("cAgencia", agencia, name_attr=True)
self.oHelper.SetValue("cConta", conta, name_attr=True)
self.oHelper.SetValue("nNroTit", ntitulos, name_attr=True)
self.oHelper.SetValue("cLoteFin", lote, name_attr=True)
self.oHelper.SetValue("DVENCINI", data, name_attr=True)
self.oHelper.SetValue("DVENCFIM", data, name_attr=True)
self.oHelper.SetValue("cNatDe", natureza, name_attr=True)
self.oHelper.SetValue("cNatAte", natureza, name_attr=True)
self.oHelper.SetButton("Ok")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.ClickBox("No. Titulo", titulo1)
self.oHelper.ClickBox("No. Titulo", titulo2)
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("= Valor Pago", "1.000,00")
self.oHelper.CheckResult("Valor US$", "1.000,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("= Valor Pago", "2.000,00")
self.oHelper.CheckResult("Valor US$", "2.000,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.CheckHelp(text='TOTGERAL',button='Fechar')
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT123
#Baixa lote de título Moeda 2 com multa informada (banco moeda 1, taxa do Contratada – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50470
def test_FINA080_CT123(self):
banco = "001"
agencia = "08000"
conta = "080110"
ntitulos = '2'
lote ='LTC123'
natureza ='FIN080SIMP'
data = "16/04/2020"
titulo1 = "LOTACT123"
titulo2 = "LOTBCT123"
#Parametrização default
#self.oHelper.AddParameter("MV_TIPOCM", "", "T", "T", "T")
#self.oHelper.SetParameters()
self.oHelper.SetButton("Outras Ações", "Lote")
self.oHelper.SetValue("cBanco", banco, name_attr=True)
self.oHelper.SetValue("cAgencia", agencia, name_attr=True)
self.oHelper.SetValue("cConta", conta, name_attr=True)
self.oHelper.SetValue("nNroTit", ntitulos, name_attr=True)
self.oHelper.SetValue("cLoteFin", lote, name_attr=True)
self.oHelper.SetValue("DVENCINI", data, name_attr=True)
self.oHelper.SetValue("DVENCFIM", data, name_attr=True)
self.oHelper.SetValue("cNatDe", natureza, name_attr=True)
self.oHelper.SetValue("cNatAte", natureza, name_attr=True)
self.oHelper.SetButton("Ok")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.ClickBox("No. Titulo", titulo1)
self.oHelper.ClickBox("No. Titulo", titulo2)
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Data Pagto.", data)
self.oHelper.SetValue("Taxa contratada", "5,0000")
self.oHelper.SetValue("+ Multa", "20,00")
self.oHelper.CheckResult("= Valor Pago", "5.020,00")
self.oHelper.CheckResult("Valor US$", "1.004,00")
self.oHelper.CheckResult("+ Corr.Monetária", "502,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos - LOTE")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Data Pagto.", data)
self.oHelper.SetValue("Taxa contratada", "5,0000")
self.oHelper.SetValue("+ Multa", "30,00")
self.oHelper.CheckResult("= Valor Pago", "10.030,00")
self.oHelper.CheckResult("Valor US$", "2.006,00")
self.oHelper.CheckResult("+ Corr.Monetária", "0,00")
self.oHelper.SetButton("Salvar")
self.oHelper.CheckHelp(text='TOTGERAL',button='Fechar')
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT124
#Baixa total de título (Sem impostos, com acréscimo, valores acessórios - Moeda 1 – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50488
def test_FINA080_CT124(self):
prefixo = "TIR"
titulo = "F080CT124"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "NORMAL")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("+ Acrescimo", "100,00")
self.oHelper.CheckResult("+ Valores Acessórios", "35,00")
self.oHelper.CheckResult("= Valor Pago", "10.135,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT125
#Baixa parcial de título (Sem impostos, com acréscimo, valores acessórios - Moeda 1 – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50489
def test_FINA080_CT125(self):
prefixo = "TIR"
titulo = "F080CT125"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
conta = "080110"
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("+ Acrescimo", "90,00")
self.oHelper.CheckResult("+ Valores Acessórios", "15,00")
self.oHelper.SetValue("= Valor Pago", "600,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DEBITO CC")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.CheckResult("- Pagtos.Parciais", "495,00")
self.oHelper.CheckResult("+ Acrescimo", "0,00")
self.oHelper.CheckResult("+ Valores Acessórios", "0,00")
self.oHelper.CheckResult("= Valor Pago", "1.505,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT126
#Baixa total de título (Com motivo de baixa DACAO – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50490
def test_FINA080_CT126(self):
prefixo = "TIR"
titulo = "F080CT126"
parcela = " "
tipo = "NF "
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "DACAO")
self.oHelper.CheckResult("= Valor Pago", "1.000,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT127
#Baixa total de título Moeda 1 (Motivo de baixa VENDOR – TIR)
#author <NAME>
#since 16/04/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-T50491
def test_FINA080_CT127(self):
prefixo = "TIR"
titulo = "F080CT127"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "08000"
Contrato = "000000000000001"
data = "16/04/2020"
PrefVen = "VEN"
tipoven = "DP"
#Data Definida no CT114
#time.sleep(5)
#self.oHelper.SetButton("x")
#self.oHelper.ChangeEnvironment("16/04/2020","T1", "D MG 01 ","06")
#self.oHelper.Program("FINA080")
self.oHelper.WaitShow("Baixa de Titulos")
#Definido no CT110
#self.oHelper.SetKey("F12")
#self.oHelper.WaitShow("Parametros")
#self.oHelper.SetValue("Mostra Lanc Contab ?" ,"Nao")
#self.oHelper.SetValue("Contabiliza On Line ?" ,"Nao")
#self.oHelper.SetValue("Utiliza Banco Anterior ?" ,"Nao")
#self.oHelper.SetButton("Ok")
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Mot.Baixa", "VENDOR")
self.oHelper.SetValue("cContrato", Contrato, name_attr=True)
self.oHelper.SetValue("cBancoV", banco, name_attr=True)
self.oHelper.SetValue("cAgenciaV", agencia, name_attr=True)
self.oHelper.SetValue("cPrefV", PrefVen, name_attr=True)
self.oHelper.SetValue("cNumV", titulo, name_attr=True)
self.oHelper.SetValue("cTipV", tipoven, name_attr=True)
self.oHelper.SetValue("dDataVencV", data, name_attr=True)
self.oHelper.CheckResult("nTxAcresV", "5,0000", name_attr=True)
self.oHelper.CheckResult("cNaturV", "VENDOR", name_attr=True)
self.oHelper.CheckResult("nValTitV", "3.000,00", name_attr=True)
self.oHelper.SetButton("Ok")
self.oHelper.SetValue("Data Pagto.", data)
self.oHelper.SetValue("Data Debito", data)
self.oHelper.CheckResult("= Valor Pago", "3.000,00")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Baixa de Titulos")
self.oHelper.SearchBrowse(f"D MG 01 {PrefVen}{titulo}{parcela}{tipoven}")
self.oHelper.SetButton("Visualizar")
self.oHelper.WaitShow("Baixa de Titulos - VISUALIZAR")
self.oHelper.CheckResult("E2_SALDO", "3.000,00")
self.oHelper.ClickFolder('Administrativo')
self.oHelper.CheckResult("E2_ACRESC", "150,00")
self.oHelper.SetButton("Cancelar")
self.oHelper.AssertTrue()
#{Protheus.doc} test_FINA080_CT128
#Baixa lote de título Moeda 1 (Sem impostos, com | |
__all__ = ["SemiDataSetInterface", "MedicalDatasetSemiInterface"]
from abc import abstractmethod
from copy import deepcopy as dcp
from itertools import repeat
from typing import Tuple, Callable, List, Type, Dict, Union
import numpy as np
from PIL import Image
from deepclustering.augment import SequentialWrapper
from deepclustering.dataloader.dataset import CombineDataset
from deepclustering.dataloader.sampler import InfiniteRandomSampler
from deepclustering.dataset.segmentation import (
MedicalImageSegmentationDataset,
PatientSampler,
)
from deepclustering.decorator import FixRandomSeed
from torch import Tensor
from torch.utils.data import Dataset, DataLoader, Subset
# this function splits, however, we want to conserve the unlabeled dataset
def _draw_indices(
targets: np.ndarray,
labeled_sample_num: int,
class_nums: int = 10,
validation_num: int = 5000,
verbose: bool = True,
seed: int = 1,
) -> Tuple[List[int], List[int], List[int]]:
"""
draw indices for labeled and unlabeled dataset separations.
:param targets: `torch.utils.data.Dataset.targets`-like numpy ndarray with all labels, used to split into labeled, unlabeled and validation dataset.
:param labeled_sample_num: labeled sample number
:param class_nums: num of classes in the target.
:param validation_num: num of validation set, usually we split the big training set into `labeled`, `unlabeled`, `validation` sets, the `test` set is taken directly from the big test set.
:param verbose: whether to print information while running.
:param seed: random seed to draw indices
:return: labeled indices and unlabeled indices
"""
labeled_sample_per_class = int(labeled_sample_num / class_nums)
validation_sample_per_class = int(validation_num / class_nums) if class_nums else 0
targets = np.array(targets)
train_labeled_idxs: List[int] = []
train_unlabeled_idxs: List[int] = []
val_idxs: List[int] = []
with FixRandomSeed(seed):
for i in range(class_nums):
idxs = np.where(targets == i)[0]
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:labeled_sample_per_class])
train_unlabeled_idxs.extend(
idxs[labeled_sample_per_class:-validation_sample_per_class]
)
val_idxs.extend(idxs[-validation_sample_per_class:])
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(val_idxs)
# highlight: this is to meet the UDA paper: unlabeled data is the true unlabeled_data + labeled_data, and there is no val_data
# train_unlabeled_idxs = train_labeled_idxs + train_unlabeled_idxs + val_idxs
# highlight: this leads to bad performance, using unlabeled = unlabeled + val
train_unlabeled_idxs = train_unlabeled_idxs + val_idxs
np.random.shuffle(train_unlabeled_idxs)
# assert train_unlabeled_idxs.__len__() == len(targets)
assert len(train_labeled_idxs) == labeled_sample_num
if verbose:
print(
f">>>Generating {len(train_labeled_idxs)} labeled data, {len(train_unlabeled_idxs)} unlabeled data, and {len(val_idxs)} validation data."
)
return train_labeled_idxs, train_unlabeled_idxs, val_idxs
class SemiDataSetInterface:
"""
Semi supervised dataloader creator interface
"""
def __init__(
self,
DataClass: Type[Dataset],
data_root: str,
labeled_sample_num: int,
validation_num=5000,
seed: int = 0,
batch_size: int = 1,
labeled_batch_size: int = None,
unlabeled_batch_size: int = None,
val_batch_size: int = None,
shuffle: bool = False,
num_workers: int = 1,
pin_memory: bool = True,
drop_last=False,
verbose: bool = True,
) -> None:
"""
when batch_size is not `None`, we do not consider `labeled_batch_size`, `unlabeled_batch_size`, and `val_batch_size`
when batch_size is `None`, `labeled_batch_size`,`unlabeled_batch_size` and `val_batch_size` should be all int and >=1
:param validation_num:
"""
super().__init__()
self.data_root = data_root
self.DataClass = DataClass
self.seed = seed
self.labeled_sample_num = labeled_sample_num
self.validation_num = validation_num
self.verbose = verbose
self._if_use_indiv_bz: bool = self._use_individual_batch_size(
batch_size,
labeled_batch_size,
unlabeled_batch_size,
val_batch_size,
verbose,
)
self.batch_params = {
"labeled_batch_size": labeled_batch_size,
"unlabeled_batch_size": unlabeled_batch_size,
"val_batch_size": val_batch_size,
}
self.dataloader_params = {
"batch_size": batch_size,
"shuffle": shuffle,
"num_workers": num_workers,
"pin_memory": pin_memory,
"drop_last": drop_last,
}
def _init_labeled_unlabled_val_and_test_sets(
self,
) -> Tuple[Subset, Subset, Subset, Dataset]: # type: ignore
"""
:param args: unknown args
:param kwargs: unknown kwargs
:return: Tuple of dataset, Labeled Dataset, Unlabeled Dataset, Val Dataset
"""
train_set, test_set = self._init_train_test_sets()
labeled_index, unlabeled_index, val_index = _draw_indices(
train_set.targets,
self.labeled_sample_num,
class_nums=10,
validation_num=self.validation_num,
seed=self.seed,
verbose=self.verbose,
)
# todo: to verify if here the dcp is necessary
labeled_set = Subset(dcp(train_set), labeled_index)
unlabeled_set = Subset(dcp(train_set), unlabeled_index)
val_set = Subset(dcp(train_set), val_index)
del train_set
return labeled_set, unlabeled_set, val_set, test_set
@staticmethod
def _use_individual_batch_size(
batch_size, l_batch_size, un_batch_size, val_batch_size, verbose
):
if (
isinstance(l_batch_size, int)
and isinstance(un_batch_size, int)
and isinstance(val_batch_size, int)
):
assert (
l_batch_size >= 1 and un_batch_size >= 1 and val_batch_size >= 1
), "batch_size should be greater than 1."
if verbose:
print(
f"Using labeled_batch_size={l_batch_size}, unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}"
)
return True
elif isinstance(batch_size, int) and batch_size >= 1:
if verbose:
print(f"Using all same batch size of {batch_size}")
return False
else:
raise ValueError(
f"batch_size setting error, given batch_size={batch_size}, labeled_batch_size={l_batch_size}, "
f"unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}."
)
@abstractmethod
def _init_train_test_sets(self) -> Tuple[Dataset, Dataset]:
raise NotImplementedError("train and test set initialization must be override")
def _create_semi_supervised_datasets(
self,
labeled_transform: Callable[[Image.Image], Tensor],
unlabeled_transform: Callable[[Image.Image], Tensor],
val_transform: Callable[[Image.Image], Tensor],
test_transform: Callable[[Image.Image], Tensor],
target_transform: Callable[[Tensor], Tensor] = None,
) -> Tuple[Subset, Subset, Subset, Dataset]:
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._init_labeled_unlabled_val_and_test_sets()
labeled_set = self.override_transforms(
labeled_set, labeled_transform, target_transform
)
unlabeled_set = self.override_transforms(
unlabeled_set, unlabeled_transform, target_transform
)
val_set = self.override_transforms(val_set, val_transform, target_transform)
test_set = self.override_transforms(test_set, test_transform, target_transform)
return labeled_set, unlabeled_set, val_set, test_set
@staticmethod
def override_transforms(dataset, img_transform, target_transform):
assert isinstance(dataset, (Dataset, Subset))
if isinstance(dataset, Subset):
dataset.dataset.transform = img_transform
dataset.dataset.target_transform = target_transform
else:
dataset.transform = img_transform
dataset.target_transform = target_transform
return dataset
def SemiSupervisedDataLoaders(
self,
labeled_transform: Callable[[Image.Image], Tensor],
unlabeled_transform: Callable[[Image.Image], Tensor],
val_transform: Callable[[Image.Image], Tensor],
test_transform: Callable[[Image.Image], Tensor],
target_transform: Callable[[Tensor], Tensor] = None,
) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._create_semi_supervised_datasets(
labeled_transform=labeled_transform,
unlabeled_transform=unlabeled_transform,
val_transform=val_transform,
test_transform=test_transform,
target_transform=target_transform,
)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
labeled_loader = DataLoader(labeled_set, **_dataloader_params)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
unlabeled_loader = DataLoader(unlabeled_set, **_dataloader_params)
_dataloader_params.update({"shuffle": False, "drop_last": False})
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("val_batch_size")}
)
val_loader = DataLoader(val_set, **_dataloader_params)
test_loader = DataLoader(test_set, **_dataloader_params)
del _dataloader_params
return labeled_loader, unlabeled_loader, val_loader, test_loader
def SemiSupervisedParallelDataLoaders(
self,
labeled_transforms: List[Callable[[Image.Image], Tensor]],
unlabeled_transforms: List[Callable[[Image.Image], Tensor]],
val_transforms: List[Callable[[Image.Image], Tensor]],
test_transforms: List[Callable[[Image.Image], Tensor]],
target_transform: Callable[[Tensor], Tensor] = None,
use_infinite_sampler: bool = False,
) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
def _override_transforms(dataset, img_transform_list, target_transform_list):
# here deep copying the datasets are needed.
return [
self.override_transforms(dcp(dataset), img_trans, target_trans)
for img_trans, target_trans in zip(
img_transform_list, target_transform_list
)
]
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._init_labeled_unlabled_val_and_test_sets()
target_transform_list = repeat(target_transform)
labeled_sets = _override_transforms(
labeled_set, labeled_transforms, target_transform_list
)
unlabeled_sets = _override_transforms(
unlabeled_set, unlabeled_transforms, target_transform_list
)
val_sets = _override_transforms(val_set, val_transforms, target_transform_list)
test_sets = _override_transforms(
test_set, test_transforms, target_transform_list
)
labeled_set = CombineDataset(*labeled_sets)
unlabeled_set = CombineDataset(*unlabeled_sets)
val_set = CombineDataset(*val_sets)
test_set = CombineDataset(*test_sets)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
if use_infinite_sampler:
_shuffle = _dataloader_params.get("shuffle", False)
_dataloader_params.update({"shuffle": False})
labeled_loader = DataLoader(
labeled_set,
**_dataloader_params,
sampler=InfiniteRandomSampler(labeled_set, shuffle=_shuffle),
)
else:
labeled_loader = DataLoader(labeled_set, **_dataloader_params)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
if use_infinite_sampler:
_shuffle = _dataloader_params.get("shuffle", False)
_dataloader_params.update({"shuffle": False})
unlabeled_loader = DataLoader(
unlabeled_set,
**_dataloader_params,
sampler=InfiniteRandomSampler(unlabeled_set, shuffle=_shuffle),
)
else:
unlabeled_loader = DataLoader(unlabeled_set, **_dataloader_params)
_dataloader_params.update({"shuffle": False, "drop_last": False})
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("val_batch_size")}
)
val_loader = DataLoader(val_set, **_dataloader_params)
test_loader = DataLoader(test_set, **_dataloader_params)
return labeled_loader, unlabeled_loader, val_loader, test_loader
class MedicalDatasetSemiInterface:
"""
Semi-supervised interface for datasets using `MedicalImageSegmentationDataset`
"""
def __init__(
self,
DataClass: Type[MedicalImageSegmentationDataset],
root_dir: str,
labeled_data_ratio: float,
unlabeled_data_ratio: float,
seed: int = 0,
verbose: bool = True,
) -> None:
super().__init__()
self.DataClass = DataClass
self.root_dir = root_dir
assert (
labeled_data_ratio + unlabeled_data_ratio
) <= 1, f"`labeled_data_ratio` + `unlabeled_data_ratio` should be less than 1.0, given {labeled_data_ratio + unlabeled_data_ratio}"
self.labeled_ratio = labeled_data_ratio
self.unlabeled_ratio = unlabeled_data_ratio
self.val_ratio = 1 - (labeled_data_ratio + unlabeled_data_ratio)
self.seed = seed
self.verbose = verbose
def compile_dataloader_params(
self,
batch_size: int = 1,
labeled_batch_size: int = None,
unlabeled_batch_size: int = None,
val_batch_size: int = None,
shuffle: bool = False,
num_workers: int = 1,
pin_memory: bool = True,
drop_last=False,
):
self._if_use_indiv_bz: bool = self._use_individual_batch_size(
batch_size,
labeled_batch_size,
unlabeled_batch_size,
val_batch_size,
self.verbose,
)
if self._if_use_indiv_bz:
self.batch_params = {
"labeled_batch_size": labeled_batch_size,
"unlabeled_batch_size": unlabeled_batch_size,
"val_batch_size": val_batch_size,
}
self.dataloader_params = {
"batch_size": batch_size,
"shuffle": shuffle,
"num_workers": num_workers,
"pin_memory": pin_memory,
"drop_last": drop_last,
}
def SemiSupervisedDataLoaders(
self,
labeled_transform: SequentialWrapper = None,
unlabeled_transform: SequentialWrapper = None,
val_transform: SequentialWrapper = None,
group_labeled=False,
group_unlabeled=False,
group_val=True,
use_infinite_sampler: bool = False,
) -> Tuple[DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
labeled_set, unlabeled_set, val_set = self._create_semi_supervised_datasets(
labeled_transform=labeled_transform,
unlabeled_transform=unlabeled_transform,
val_transform=val_transform,
)
# labeled_dataloader
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
if use_infinite_sampler:
labeled_loader = (
DataLoader(
labeled_set,
sampler=InfiniteRandomSampler(
labeled_set, shuffle=_dataloader_params.get("shuffle", False)
),
**{k: v for k, v in _dataloader_params.items() if k != "shuffle"},
)
if not group_labeled
else self._grouped_dataloader(
labeled_set, use_infinite_sampler=True, **_dataloader_params
)
)
else:
labeled_loader = (
DataLoader(labeled_set, **_dataloader_params)
if not group_labeled
else self._grouped_dataloader(
labeled_set, use_infinite_sampler=False, **_dataloader_params
)
)
# unlabeled_dataloader
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
if use_infinite_sampler:
unlabeled_loader = (
DataLoader(
unlabeled_set,
sampler=InfiniteRandomSampler(
unlabeled_set, shuffle=_dataloader_params.get("shuffle", False)
),
**{k: v for k, v in _dataloader_params.items() if k != "shuffle"},
)
if not group_unlabeled
else self._grouped_dataloader(
unlabeled_set, use_infinite_sampler=True, **_dataloader_params
)
)
else:
unlabeled_loader = (
DataLoader(unlabeled_set, **_dataloader_params)
if not group_unlabeled
else self._grouped_dataloader(
unlabeled_set, use_infinite_sampler=True, | |
row the first element is the image of the background, the second row is the platform areas that player can jump on, the third element
#is the name of the music for that background.
backlist=[(image.load("Ninja Academy.png"),[Rect(234, 527, 277, 14),Rect(270, 415, 203, 16),Rect(305, 307, 142, 14),Rect(740, 306, 143, 13),Rect(706, 417, 206, 13),Rect(670, 526, 277, 15)],"academy.mp3"),
(image.load("TrainingField.png"),[Rect(375, 542, 20, 9),Rect(490, 546, 25, 8),Rect(607, 544, 16, 11),Rect(16, 400, 132, 12),Rect(149, 391, 37, 19),Rect(190, 381, 22, 12),Rect(793, 380, 24, 12),Rect(830, 396, 61, 14),Rect(907, 398, 75, 20)],"training.mp3"),
(image.load("valley of the end.png"),[Rect(3, 387, 63, 21),Rect(10, 596, 108, 13),Rect(335, 568, 67, 21),Rect(488, 628, 40, 17),Rect(626, 558, 44, 15),Rect(905, 607, 89, 19),Rect(913, 398, 82, 21),Rect(310, 469, 399, 16)],"valley.mp3"),
(image.load("UchihaHideout.png"),[Rect(0, 489, 315, 16),Rect(683, 492, 316, 14)],"uchiha.mp3"),
(image.load("ramen shop.png"),[Rect(0, 283, 138, 11),Rect(2, 452, 178, 9),Rect(435, 305, 200, 14),Rect(569, 333, 36, 15),Rect(405, 462, 287, 10),Rect(876, 284, 123, 12),Rect(855, 450, 144, 12),Rect(1, 120, 255, 6),Rect(361, 121, 363, 5),Rect(825, 121, 174, 6)],"ramen.mp3")]
#starting background
background = backlist[0][0]
platforms = backlist[0][1]
column=0
#the next few funtions are universal to all player1s, they are the basic movement and attacks
# this is a basic funtion, blits every picture in the stances row, there two different option,
#one where the character is facing right, the other is when they are faing left
def stance(x1,y1,direction,row):
global frame,pics,parea #the "row" refers to which character it is, so first we go into character, than the 0 makes us go to the place with all the stanc pictures
cpics = pics[row][0]
if frame>len(cpics)-1: #when we blit every picture, we start over again
frame=0
if direction=="right":screen.blit( cpics[frame] ,(x1,y1-cpics[frame].get_size()[1])) #y1-cpics[frame].get_size()[1]), y1 is where the bottom of the feet should be, cpics[frame].get_size()[1]) gives us the picture length, so if we subtract them we get to top y, this is so the feet is always on the bottom
else: screen.blit(transform.flip( cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1])) #facing left #transform.flip() flips all the pictures
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1]) #the player area, where the character is standing, used to see if they are being attacked
#this is the same as before except we have x+=3 and x-=3 so the character moves
def walk(x1,y1,direction,row):
global frame,pics,x,parea
cpics = pics[row][1]
if frame>len(cpics)-1:
frame=0
if direction=="right":
screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
x+=3
else:
screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
x-=3
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
#same as walk except character moves faster
def run(x1,y1,direction,row):
global frame,pics,x,parea
cpics = pics[row][11]
if frame>len(cpics)-1:
frame=0
if direction=="right":
screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
x+=5
else:
screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
x-=5
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
#blits only one picture, the crouch
def crouch(x1,y1,direction,row):
global parea
cpics = pics[row][2]
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
parea=Rect(x1,y1-cpics[0].get_size()[1],cpics[0].get_size()[0],cpics[0].get_size()[1])
#same as stance except the end
def jump(x1,y1,direction,row):
global pics,frame,jumpdone,parea,yvalue
cpics = pics[row][3]
if frame>len(cpics)-2:
frame=len(cpics)-2
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
if y1>=yvalue: #when the character hits the ground again, the move stops. # yvalue is the value of the platform
jumpdone=True #jumpdone tell us when the moves is done, so the player doesn't have to hold the key
#same as basic funtion with an attackarea
def punch(x1,y1,direction,row):
global frame,pics,parea,attackarea
cpics = pics[row][7]
if frame>len(cpics)-1:
frame=0
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
attackarea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1]) #to see if the opponent is in this area, if they are they loose health
def hardpunch(x1,y1,direction,row):
global frame,pics,parea,attackarea,char
cpics = pics[row][8]
if frame>len(cpics)-1:
frame=0
move="stance"
if char=="madara": #madara has a different punch than others
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))#this is madara himself, the below picture is a dragon that moves forward
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
if frame>0:
if direction=="right":
screen.blit(cpics[frame],(x1+10+frame*50,y1-cpics[frame].get_size()[1])) #his punch must move forward, so each time we incease the fram, we blit it further
attackarea=Rect(x1+10+frame*50,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
else:
screen.blit(transform.flip(cpics[frame],x1-50-frame*50,0),(x1-50-frame*50,y1-cpics[frame].get_size()[1]))
attackarea=Rect(x1-50-frame*50,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
else: #this part is same as punch
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
attackarea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
#same as punch
def combo(x1,y1,direction,row):
global frame,pics,parea,attackarea
cpics = pics[row][9]
if frame>len(cpics)-1:
frame=0
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
attackarea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
#this is if the player gets hitm, they fall down or move back based on the level of the attack
def takedamage(x1,y1,direction,row,dtype):
global frame,pics,parea,attackarea,takedamagedonelevel1,takedamagedonelevel2,x,direc2
cpics = pics[row][14]
if dtype==1: #weak attack, only move back, don't fall
if frame>1:
frame=0
if frame==0 or frame==1 or frame==2: #if the opponent is on the ground they should not move, in frames 0,1 and 2 the player is never down
if direc2=="right": #if the opponent is attacking to the right than this player is going to fall to the right
x+=1
else:
x-=1
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
if frame>=1: #we only want the first few frames where the character moves back
takedamagedonelevel1=True #keeps track of wether the move is done
else:
if frame>len(cpics)-1:
frame=0
if frame==0 or frame==1 or frame==2: #if the opponent is on the ground they should not move, in frames 0,1 and 2 the player is never down
if direc2=="right": #if the opponent is attacking to the right than this player is going to fall to the right
x+=1
else:
x-=1
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
parea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
if frame+1==len(cpics): #if each picture in this is done, the move is over
takedamagedonelevel2=True #keeps track of wether the move is done
#just blits one picture, the gaurd picture
def guard(x1,y1,direction,row):
global parea
cpics = pics[row][10]
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
parea=Rect(x1,y1-cpics[0].get_size()[1],cpics[0].get_size()[0],cpics[0].get_size()[1])
# Player 2 -------------------------------------------------------------------
#there is no need to comment player twos functions because they are the same as player one with different variables, most variables have a two after them
def stance2(x1,y1,direction,row):
global frame2,pics,parea2
cpics = pics[row2][0]
if frame2>len(cpics)-1:
frame2=0
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1])) #y1-cpics[frame].get_size()[1]), this allows us to blit all picture so that the bottom is at the y level(400)
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def walk2(x1,y1,direction,row):
global frame2,pics,x2,parea2
cpics = pics[row2][1]
if frame2>len(cpics)-1:
frame2=0
if direction=="right":
screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
x2+=3
else:
screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
x2-=3
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def run2(x1,y1,direction,row):
global frame2,pics,x2,parea2
cpics = pics[row2][11]
if frame2>len(cpics)-1:
frame2=0
if direction=="right":
screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
x2+=5
else:
screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
x2-=5
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def crouch2(x1,y1,direction,row):
global parea2
cpics = pics[row2][2]
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
parea2=Rect(x1,y1-cpics[0].get_size()[1],cpics[0].get_size()[0],cpics[0].get_size()[1])
def guard2(x1,y1,direction,row):
global parea2
cpics = pics[row2][10]
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
parea2=Rect(x1,y1-cpics[0].get_size()[1],cpics[0].get_size()[0],cpics[0].get_size()[1])
def jump2(x1,y1,direction,row):
global pics,frame2,jumpdone2,parea2,yvalue2
cpics = pics[row2][3]
if frame2>len(cpics)-2:
frame2=len(cpics)-2
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
if y1>=yvalue2:
jumpdone2=True
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def punch2(x1,y1,direction,row):
global frame2,pics,parea2,attackarea2
cpics = pics[row2][7]
if frame2>len(cpics)-1:
frame2=0
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
attackarea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def hardpunch2(x1,y1,direction,row):
global frame2,pics,parea2,attackarea2,char2
cpics = pics[row2][8]
if frame2>len(cpics)-1:
frame2=0
move2="stance"
if char2=="madara":
if direction=="right":screen.blit(cpics[0],(x1,y1-cpics[0].get_size()[1]))
else: screen.blit(transform.flip(cpics[0],x1,0),(x1,y1-cpics[0].get_size()[1]))
if frame2>0:
if direction=="right":
screen.blit(cpics[frame2],(x1+10+frame2*50,y1-cpics[frame2].get_size()[1]))
attackarea2=Rect(x1+10+frame2*50,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
else:
screen.blit(transform.flip(cpics[frame2],x1-50-frame2*50,0),(x1-50-frame2*50,y1-cpics[frame2].get_size()[1]))
attackarea2=Rect(x1-50-frame2*50,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
else:
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
attackarea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def combo2(x1,y1,direction,row):
global frame2,pics,parea2,attackarea2
cpics = pics[row2][9]
if frame2>len(cpics)-1:
frame2=0
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
attackarea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
def takedamage2(x1,y1,direction,row,dtype):
global frame2,pics,parea2,takedamagedonelevel12,takedamagedonelevel22,x2,direc
cpics = pics[row][14]
if dtype==1:
if frame2>1:
frame2=0
if frame2==0 or frame2==1 or frame2==2:
if direc=="right":
x2+=1
else:
x2-=1
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
if frame2>=1:
takedamagedonelevel12=True
else:
if frame2>len(cpics)-1:
frame2=0
if frame2==0 or frame2==1 or frame2==2:
if direc=="right":
x2+=1
else:
x2-=1
if direction=="right":screen.blit(cpics[frame2],(x1,y1-cpics[frame2].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame2],x1,0),(x1,y1-cpics[frame2].get_size()[1]))
parea2=Rect(x1,y1-cpics[frame2].get_size()[1],cpics[frame2].get_size()[0],cpics[frame2].get_size()[1])
if frame2+1==len(cpics):
takedamagedonelevel22=True
#-----------------------------------------------------
#collision is when two special attacks collide, then those two moves stop and there is an explosion
def collision(x1,y1):
global collision,screencopy
for i in collisionl: #this way the characters can't move while this is happening
screen.blit(screencopy,(0,0))
screen.blit(i,(x1,y1-i.get_size()[1]))
display.flip()
time.wait(50)
#special moves: most special moves are different so each one requires it own code
#each character has three special moves, but they need six function, 3 for player one and three for player two
#the attack done variables keep track of the attack, so if the character presses the attack key once it continues till it is over
#this is same as the basic functions, except he moves in some frames so we change the x
def narutoattack1(x1,y1,direction):
global frame,pics,count,attack1done,x,attackarea
cpics = pics[0][4]
if frame>len(cpics)-2:
frame=len(cpics)-2
if frame>=7:
if direc=="right": x+=10
else: x-=10
if direction=="right":screen.blit(cpics[frame],(x1,y1-cpics[frame].get_size()[1]))
else: screen.blit(transform.flip(cpics[frame],x1,0),(x1,y1-cpics[frame].get_size()[1]))
if frame>=7:
attackarea=Rect(x1,y1-cpics[frame].get_size()[1],cpics[frame].get_size()[0],cpics[frame].get_size()[1])
count+=1
if count==150: #after a while the move will stop, count keeps track of the time of this move in action
attack1done=True
count=0
#till the 43 frame, the character does a few movements, than after that the ball moves towards the opponent in a linear fashion
#that is, the ball moves only on the ground does not go up and down
def narutoattack2(x1,y1,direction):
global frame,pics,first,rincrease,lincrease,count,attack2done,attackarea,x,x2
cpics = pics[0][5]
if first: #when the player first clicks the key to launch the move first becomes true, so we can reset a few variables
frame=0
rincrease=0 #to see where the ball shoud go
lincrease=0
if frame<=43: #the first 43 | |
<reponame>An-dz/tilecutter
# TileCutter Project Module (Old version)
import logging, os, sys
import wx
import config
from tc import Paths
from environment import getenvvar
config = config.Config()
paths = Paths()
# project[view][season][frame][image][xdim][ydim][zdim]
# view=NSEW, 0,1,2,3 - array - controlled by global enable
# season=summer/winter, 0,1 - array - controlled by global bool enable
# frame=0,++ - array - controlled by global number of frames variable
# image=back/front, 0,1 - array - controlled by global bool enable
class ProjectImage(object):
"""An individual image object, consisting of a cached image, path to that image and offset dimensions"""
def __init__(self, parent, b):
"""Initialise default values, new image, empty path, zeroed offsets"""
self.parent = parent
# Also needs some provision for setting the cutting mask on a per-image basis (like the offset)
# given that fine-tuning of the mask is a desirable feature
if b in [True, 1]:
self.b = True
elif b in [False, 0]:
self.b = False
# Whatever is in the path entry box
self.value_path = ""
# Last valid/real path entered
self.value_valid_path = ""
self.reload_image()
self.offset = [0, 0]
self.cutimageset = None
def __getitem__(self, key):
return self.cutimageset[key]
def cut_image(self, cutting_function, dims, p):
"""Generates an array of cut images based on this image
using the cutting routine"""
self.reload_image()
self.cutimageset = cutting_function(self.bitmap(), dims, self.offset, p)
def image(self):
"""Return a wxImage representation of the cached image"""
if self.value_image is None:
self.reload_image()
return self.value_image
def bitmap(self):
"""Return a wxBitmap representation of the cached image"""
if self.value_bitmap is None:
self.reload_image()
return self.value_bitmap
def del_image(self):
"""Delete stored images, to enable pickling"""
self.value_image = None
self.value_bitmap = None
self.cutimageset = None
def reload_image(self):
"""Refresh the cached image"""
if self.value_valid_path == "":
self.value_image = wx.Image(1, 1)
self.value_bitmap = wx.Bitmap(self.value_image)
else:
abspath = paths.join_paths(self.parent.parent.parent.savefile(), self.value_valid_path)
self.value_image = wx.Image(1, 1)
self.value_image.LoadFile(abspath, wx.BITMAP_TYPE_ANY)
self.value_bitmap = wx.Bitmap(self.value_image)
def valid_path(self):
"""Return the valid/real path of this image"""
return self.value_valid_path
def path(self, path=None):
"""Set or return the path of this image as entered"""
if path is not None:
self.value_path = path
logging.debug("value_path set to: '%s'" % self.value_path)
abspath = paths.join_paths(self.parent.parent.parent.savefile(), self.value_path)
if (paths.is_input_file(abspath) and os.path.exists(abspath)) or path == "":
self.value_valid_path = path
self.reload_image()
logging.debug("Valid image path set to '%s', new cached image will be loaded" % str(self.value_valid_path))
self.on_change()
else:
return self.value_path
def back(self):
"""Returns True if this is a backimage, false if it is a frontimage"""
return self.b
def on_change(self):
# When something in the project has changed
self.parent.on_change()
class ProjectFrame(object):
"""Contains a single frame of the project, with a front and back image"""
def __init__(self, parent):
"""Initialise array containing two images"""
self.parent = parent
self.images = []
self.images.append(ProjectImage(self, 0))
self.images.append(ProjectImage(self, 1))
def __getitem__(self, key):
return self.images[key]
def __len__(self):
return len(self.images)
def on_change(self):
# When something in the project has changed
self.parent.on_change()
class ProjectFrameset(object):
"""Contains a sequence of ProjectFrame objects for each animation frame of this direction/season combination"""
def __init__(self, parent, season):
self.parent = parent
# 0 for summer, 1 for winter
self.season = season
self.frames = []
self.frames.append(ProjectFrame(self))
def __getitem__(self, key):
return self.frames[key]
def __len__(self):
return len(self.frames)
# Needs methods to add a frame, remove a frame, move frames up/down etc. (To be added with animation support)
def on_change(self):
# When something in the project has changed
self.parent.on_change()
class Project(object):
"""Model containing all information about a project."""
def __init__(self, parent):
"""Initialise this project, and set default values"""
self.parent = parent
# Create a 4/2 array of ProjectImages arrays, which can then contain a variable number of
# Frame objects (each of which contains a Front and Back Image)
# [0]->South, [1]->East, [2]->North, [3]->West
# [0][0]->Summer, [0][1]->Winter
self.images = []
for _a in range(4):
b = []
b.append(ProjectFrameset(self, 0))
b.append(ProjectFrameset(self, 1))
self.images.append(b)
self.dims = ProjectDims(self)
self.files = ProjectFiles(self)
self.active = ActiveImage(self)
self.val_temp_dat = "Obj=building\nName=test_1\nType=cur\nPassengers=100\nintro_year=1900\nchance=100"
def on_change(self):
# When something in the project has changed, notify containing app to
# allow for updating of UI
logging.info("Root on_change triggered, sending message to App")
self.parent.project_has_changed()
def __getitem__(self, key):
return self.images[key]
def temp_dat_properties(self, value=None):
"""References a string containing arbitrary dat properties for the project"""
if value is not None:
self.val_temp_dat = value
logging.debug("TEMP dat properties set to %s" % self.val_temp_dat)
self.on_change()
return 0
else:
return self.val_temp_dat
def set_all_images(self, path):
"""Set the path for all images to the same path"""
for d in range(len(self.images)):
for s in range(len(self.images[d])):
for f in range(len(self.images[d][s])):
for i in range(len(self.images[d][s][f])):
self.images[d][s][f][i].path(path)
self.on_change()
def cut_images(self, cutting_function):
"""Produce cut imagesets for all images in this project"""
# Can make this work conditionally based on which images are enabled later
for d in range(len(self.images)):
for s in range(len(self.images[d])):
for f in range(len(self.images[d][s])):
for i in range(len(self.images[d][s][f])):
self.images[d][s][f][i].cut_image(cutting_function, (self.x(), self.y(), self.z(), d), self.paksize())
def del_images(self):
"""Delete all image data representations, ready for pickling"""
for d in range(len(self.images)):
for s in range(len(self.images[d])):
for f in range(len(self.images[d][s])):
for i in range(len(self.images[d][s][f])):
self.images[d][s][f][i].del_image()
def prep_serialise(self):
"""Prepare this object for serialisation"""
# Remove images as we cannot pickle these and do not want to
self.del_images()
# Return parent reference so it can be added back by post_serialise
parent = self.parent
self.del_parent()
return [parent]
def post_serialise(self, params):
"""After serialisation re-add parameters removed by prep_serialise"""
self.set_parent(params[0])
def del_parent(self):
"""Delete the parent reference ready for pickling"""
self.parent = None
def set_parent(self, parent):
"""Set the parent for Event references"""
self.parent = parent
def offset(self, x=None, y=None):
"""Increases/decreases the offset for the active image, if set to 0 that offset dimension is reset"""
old_x = self.active.image.offset[0]
old_y = self.active.image.offset[1]
changed = False
if x == 0:
self.active.image.offset[0] = 0
changed = True
elif x is not None:
self.active.image.offset[0] += x
if not config.negative_offset_allowed:
if self.active.image.offset[0] < 0:
self.active.image.offset[0] = 0 # Limit to 0
changed = True
if y == 0:
self.active.image.offset[1] = 0
changed = True
elif y is not None:
self.active.image.offset[1] += y
if not config.negative_offset_allowed:
if self.active.image.offset[1] < 0:
self.active.image.offset[1] = 0 # Limit to 0
changed = True
if changed is True:
logging.debug("Active Image offset changed to: %s" % str(self.active.image.offset))
self.on_change()
if old_x != self.active.image.offset[0] or old_y != self.active.image.offset[1]:
return 1
else:
return 0
else:
return self.active.image.offset
def active_image_path(self, path=None):
"""Set or return the path of the active image"""
return self.active_image().path(path)
def active_image(self, direction=None, season=None, frame=None, layer=None):
"""Set or return the currently active image"""
# If parameters have been changed at all, update
changed = False
if direction is not None and direction != self.active.direction:
self.active.direction = direction
changed = True
logging.debug("Active Image direction changed to: %s" % str(self.active.direction))
if season is not None and season != self.active.season:
self.active.season = season
changed = True
logging.debug("Active Image season changed to: %s" % str(self.active.season))
if frame is not None and frame != self.active.frame:
self.active.frame = frame
changed = True
logging.debug("Active Image frame changed to: %s" % str(self.active.frame))
if layer is not None and layer != self.active.layer:
self.active.layer = layer
changed = True
logging.debug("Active Image layer changed to: %s" % str(self.active.layer))
if changed is True:
self.active.update_image()
else:
return self.active.image
def x(self, value=None):
"""Set or return X dimension"""
if value is not None:
if value in config.choicelist_dims:
self.dims.x = int(value)
logging.info("X dimension set to %i" % self.dims.x)
self.on_change()
return 0
else:
logging.warn("Attempt to set X dimension failed - Value (%s) outside of acceptable range" % str(value))
return 1
else:
return self.dims.x
def y(self, value=None):
"""Set or return Y dimension"""
if value is not None:
if value in config.choicelist_dims:
self.dims.y = int(value)
logging.info("Y dimension set to %i" % self.dims.y)
self.on_change()
return 0
else:
logging.warn("Attempt to set Y dimension failed - Value (%s) outside of acceptable range" % str(value))
return 1
else:
return self.dims.y
def z(self, value=None):
"""Set or return Z dimension"""
if value is not None:
if value in config.choicelist_dims_z:
self.dims.z = int(value)
logging.info("Z dimension set to %i" % self.dims.z)
self.on_change()
return 0
else:
logging.warn("Attempt to set Z dimension failed - Value (%s) outside of acceptable range" % str(value))
return 1
else:
return self.dims.z
def paksize(self, value=None):
"""Set or return paksize"""
if value is not None:
if int(value) in range(16, 32766):
self.dims.paksize = int(value)
logging.info("Paksize set to | |
title, description, configuration, dry_run=True):
"""Creates a new Scan process with the given configuration and returns
the new Scan model. All changes to the database will occur in an atomic transaction.
:param name: The identifying name of this Scan process
:type name: string
:param title: The human-readable name of this Scan process
:type title: string
:param description: A description of this Scan process
:type description: string
:param configuration: The Scan configuration
:type configuration: dict
:param dry_run: Whether the scan will execute as a dry run
:type dry_run: bool
:returns: The new Scan process
:rtype: :class:`ingest.models.Scan`
:raises :class:`ingest.scan.configuration.exceptions.InvalidScanConfiguration`: If the configuration is
invalid.
"""
# Validate the configuration, no exception is success
config = ScanConfiguration(configuration)
config.validate()
scan = Scan()
scan.name = name
scan.title = title
scan.description = description
scan.configuration = config.get_dict()
scan.save()
return scan
@transaction.atomic
def edit_scan(self, scan_id, title=None, description=None, configuration=None):
"""Edits the given Scan process and saves the changes in the database. All database changes occur in an atomic
transaction. An argument of None for a field indicates that the field should not change.
:param scan_id: The unique identifier of the Scan process to edit
:type scan_id: int
:param title: The human-readable name of this Scan process
:type title: string
:param description: A description of this Scan process
:type description: string
:param configuration: The Strike process configuration
:type configuration: dict
:raises :class:`ingest.scan.configuration.exceptions.InvalidScanConfiguration`: If the configuration is
invalid.
"""
scan = Scan.objects.select_for_update().get(pk=scan_id)
if scan.job:
raise ScanIngestJobAlreadyLaunched
# Validate the configuration, no exception is success
if configuration:
config = ScanConfiguration(configuration)
config.validate()
scan.configuration = config.get_dict()
# Update editable fields
if title:
scan.title = title
if description:
scan.description = description
scan.save()
def get_scan_job_type(self):
"""Returns the Scale Scan job type
:returns: The Scan job type
:rtype: :class:`job.models.JobType`
"""
return JobType.objects.get(name='scale-scan', version='1.0')
def get_scans(self, started=None, ended=None, names=None, order=None):
"""Returns a list of Scan processes within the given time range.
:param started: Query Scan processes updated after this amount of time.
:type started: :class:`datetime.datetime`
:param ended: Query Scan processes updated before this amount of time.
:type ended: :class:`datetime.datetime`
:param names: Query Scan processes associated with the name.
:type names: list[string]
:param order: A list of fields to control the sort order.
:type order: list[string]
:returns: The list of Scan processes that match the time range.
:rtype: list[:class:`ingest.models.Scan`]
"""
# Fetch a list of scans
scans = Scan.objects.select_related('job', 'job__job_type') \
.select_related('dry_run_job', 'dry_run_job__job_type') \
.defer('configuration')
# Apply time range filtering
if started:
scans = scans.filter(last_modified__gte=started)
if ended:
scans = scans.filter(last_modified__lte=ended)
# Apply additional filters
if names:
scans = scans.filter(name__in=names)
# Apply sorting
if order:
scans = scans.order_by(*order)
else:
scans = scans.order_by('last_modified')
return scans
def get_details(self, scan_id):
"""Returns the Scan process for the given ID with all detail fields included.
:param scan_id: The unique identifier of the Scan process.
:type scan_id: int
:returns: The Scan process with all detail fields included.
:rtype: :class:`ingest.models.Scan`
"""
scan = Scan.objects.select_related('job', 'job__job_type')
scan = scan.select_related('dry_run_job', 'dry_run_job__job_type')
scan = scan.get(pk=scan_id)
return scan
@transaction.atomic
def queue_scan(self, scan_id, dry_run=True):
"""Retrieves a Scan model and uses metadata to place a job to run the
Scan process on the queue. All changes to the database will occur in an
atomic transaction.
:param scan_id: The unique identifier of the Scan process.
:type scan_id: int
:param dry_run: Whether the scan will execute as a dry run
:type dry_run: bool
:returns: The new Scan process
:rtype: :class:`ingest.models.Scan`
"""
scan = Scan.objects.select_for_update().get(pk=scan_id)
scan_type = self.get_scan_job_type()
job_data = JobData()
job_data.add_property_input('Scan ID', str(scan.id))
job_data.add_property_input('Dry Run', str(dry_run))
event_description = {'scan_id': scan.id}
if scan.job:
raise ScanIngestJobAlreadyLaunched
if dry_run:
event = TriggerEvent.objects.create_trigger_event('DRY_RUN_SCAN_CREATED', None, event_description, now())
scan.dry_run_job = Queue.objects.queue_new_job(scan_type, job_data, event)
else:
event = TriggerEvent.objects.create_trigger_event('SCAN_CREATED', None, event_description, now())
scan.job = Queue.objects.queue_new_job(scan_type, job_data, event)
scan.save()
return scan
class Scan(models.Model):
"""Represents an instance of a Scan process which will run and detect files
in a workspace for ingest
:keyword name: The identifying name of this Scan process
:type name: :class:`django.db.models.CharField`
:keyword title: The human-readable name of this Scan process
:type title: :class:`django.db.models.CharField`
:keyword description: An optional description of this Scan process
:type description: :class:`django.db.models.TextField`
:keyword configuration: JSON configuration for this Scan process
:type configuration: :class:`django.contrib.postgres.fields.JSONField`
:keyword dry_run_job: The job that is performing the Scan process as dry run
:type dry_run_job: :class:`django.db.models.ForeignKey`
:keyword job: The job that is performing the Scan process with ingests
:type job: :class:`django.db.models.ForeignKey`
:keyword file_count: Number of files identified by last execution of Scan
:type file_count: :class:`django.db.models.BigIntegerField`
:keyword created: When the Scan process was created
:type created: :class:`django.db.models.DateTimeField`
:keyword last_modified: When the Scan process was last modified
:type last_modified: :class:`django.db.models.DateTimeField`
"""
name = models.CharField(max_length=50, unique=True)
title = models.CharField(blank=True, max_length=50, null=True)
description = models.TextField(blank=True, null=True)
configuration = django.contrib.postgres.fields.JSONField(default=dict)
dry_run_job = models.ForeignKey('job.Job', blank=True, null=True, on_delete=models.PROTECT, related_name='+')
job = models.ForeignKey('job.Job', blank=True, null=True, on_delete=models.PROTECT, related_name='+')
file_count = models.BigIntegerField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
objects = ScanManager()
def get_scan_configuration(self):
"""Returns the configuration for this Scan process
:returns: The configuration for this Scan process
:rtype: :class:`ingest.scan.configuration.scan_configuration.ScanConfiguration`
"""
return ScanConfiguration(self.configuration)
def get_scan_configuration_as_dict(self):
"""Returns the configuration for this Scan process as a dict
:returns: The configuration for this Scan process
:rtype: dict
"""
return self.get_scan_configuration().get_dict()
class Meta(object):
"""meta information for database"""
db_table = 'scan'
StrikeValidation = namedtuple('StrikeValidation', ['is_valid', 'errors', 'warnings'])
class StrikeManager(models.Manager):
"""Provides additional methods for handling Strike processes
"""
@transaction.atomic
def create_strike(self, name, title, description, configuration):
"""Creates a new Strike process with the given configuration and returns the new Strike model. The Strike model
will be saved in the database and the job to run the Strike process will be placed on the queue. All changes to
the database will occur in an atomic transaction.
:param name: The identifying name of this Strike process
:type name: string
:param title: The human-readable name of this Strike process
:type title: string
:param description: A description of this Strike process
:type description: string
:param configuration: The Strike configuration
:type configuration: dict
:returns: The new Strike process
:rtype: :class:`ingest.models.Strike`
:raises :class:`ingest.strike.configuration.exceptions.InvalidStrikeConfiguration`: If the configuration is
invalid.
"""
# Validate the configuration, no exception is success
config = StrikeConfiguration(configuration)
config.validate()
strike = Strike()
strike.name = name
strike.title = title
strike.description = description
strike.configuration = config.get_dict()
strike.save()
strike_type = self.get_strike_job_type()
job_data = JobData()
job_data.add_property_input('Strike ID', unicode(strike.id))
event_description = {'strike_id': strike.id}
event = TriggerEvent.objects.create_trigger_event('STRIKE_CREATED', None, event_description, now())
strike.job = Queue.objects.queue_new_job(strike_type, job_data, event)
strike.save()
return strike
@transaction.atomic
def create_strike_v6(self, name, title, description, configuration):
"""Creates a new Strike process with the given configuration and returns the new Strike model. The Strike model
will be saved in the database and the job to run the Strike process will be placed on the queue. All changes to
the database will occur in an atomic transaction.
:param name: The identifying name of this Strike process
:type name: string
:param title: The human-readable name of this Strike process
:type title: string
:param description: A description of this Strike process
:type description: string
:param configuration: The Strike configuration
:type configuration: dict
:returns: The new Strike process
:rtype: :class:`ingest.models.Strike`
:raises :class:`ingest.strike.configuration.exceptions.InvalidStrikeConfiguration`: If the configuration is
invalid.
"""
# Validate the configuration, no exception is success
config = StrikeConfigurationV6(configuration)
config.validate()
strike = Strike()
strike.name = name
strike.title = title
strike.description = description
strike.configuration = config.get_dict()
strike.save()
strike_type = self.get_strike_job_type()
job_data = JobData()
job_data.add_property_input('Strike ID', unicode(strike.id))
event_description = {'strike_id': strike.id}
event = TriggerEvent.objects.create_trigger_event('STRIKE_CREATED', None, event_description, now())
strike.job = Queue.objects.queue_new_job(strike_type, job_data, event)
strike.save()
return strike
@transaction.atomic
def edit_strike(self, strike_id, title=None, description=None, configuration=None):
"""Edits the given Strike process and saves the changes in the database. All database changes occur in an atomic
transaction. An argument of None for a field indicates that the field should not change.
:param strike_id: The unique identifier of the Strike process to edit
:type strike_id: int
:param title: The human-readable name of this Strike process
:type title: string
:param description: A description of this Strike process
:type description: string
:param configuration: The Strike process configuration
:type configuration: dict
:raises :class:`ingest.strike.configuration.exceptions.InvalidStrikeConfiguration`: If the configuration is
invalid.
"""
strike | |
import cv2
import os
import sys
import math
import json
import shutil
import random
import argparse
import matplotlib
from xml.dom import minidom
import numpy as np
from matplotlib.patches import Polygon
from shapely.geometry import Polygon as shape_poly
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from shapely.geometry import Polygon as shape_poly
from collections import defaultdict
from PIL import Image, ImageDraw
from utils.utils import BBox,polygon_to_bbox,has_overlap,get_volume, semmap_to_lightmap
from utils.svg_utils import PolygonWall, get_polygon, get_icon, get_points, get_direction
from utils.semantics import *
import igibson
parser = argparse.ArgumentParser("Convert Cubicasa5k...")
parser.add_argument('--model_dir', dest='model_dir')
parser.add_argument('--viz', dest='viz', action='store_true')
parser.add_argument('--save_root', dest='save_root',
default=os.path.join(igibson.cubicasa_dataset_path, 'scenes'))
def get_z_dim(e):
desc = [p for p in e.childNodes if p.nodeName=='desc']
if len(desc) == 0:
raise ValueError('Furniture has no z dim annotation, should skip')
dim_infos = desc[0].firstChild.nodeValue.split(' ')
if len(dim_infos) != 4:
raise ValueError('Furniture should have 4 dimension info. {}'.format(dim_infos))
dim_dict = {}
for d in dim_infos:
if ":" not in d or len(d.split(':')) != 2:
raise ValueError('Furniture dim should have : as separator, and two elements. {}'.format(dim_infos))
k,v = d.split(':')
dim_dict[k] = float(v)
if 'Height' not in dim_dict or 'Elevation' not in dim_dict:
raise ValueError('Height/Elevantion key not present in {}'.format(dim_infos))
return dim_dict
def get_transformation_matrix(ee):
transform = ee.getAttribute("transform")
strings = transform.split(',')
a = float(strings[0][7:])
b = float(strings[1])
c = float(strings[2])
d = float(strings[3])
e = float(strings[-2])
f = float(strings[-1][:-1])
M = np.array([[a, c, e],
[b, d, f],
[0, 0, 1]])
if ee.parentNode.getAttribute("class") == "FixedFurnitureSet":
parent_transform = ee.parentNode.getAttribute("transform")
strings = parent_transform.split(',')
a_p = float(strings[0][7:])
b_p = float(strings[1])
c_p = float(strings[2])
d_p = float(strings[3])
e_p = float(strings[-2])
f_p = float(strings[-1][:-1])
M_p = np.array([[a_p, c_p, e_p],
[b_p, d_p, f_p],
[0, 0, 1]])
M = np.matmul(M_p, M)
return M
def get_rotation_angle(ee):
M = get_transformation_matrix(ee)
if abs(M[0,0]) > 1.:
return 0 if M[0,0] > 0 else math.pi
return np.arccos(M[0,0])
def get_category(e):
if "FixedFurniture " not in e.getAttribute("class"):
return None
class_name = e.getAttribute("class").split("FixedFurniture ")[-1]
if class_name.startswith('ElectricalAppliance'):
toks = class_name.split('ElectricalAppliance')
if len(toks) != 2:
return None
class_name = toks[1].strip()
if class_name == "":
return None
return class_name
def get_polygon(e):
pol = next(p for p in e.childNodes if p.nodeName == "polygon")
points = pol.getAttribute("points").split(' ')
points = points[:-1]
X, Y = np.array([]), np.array([])
for a in points:
y, x = a.split(',')
X = np.append(X, np.round(float(x)))
Y = np.append(Y, np.round(float(y)))
return X/100., Y/100.
def get_wall(svg, shape):
wall_bboxes = []
height, width = shape
wall_image = Image.new('1', (height, width), 0)
d = ImageDraw.Draw(wall_image)
for e in svg.getElementsByTagName('g'):
try:
if e.getAttribute("id") == "Wall":
wall = PolygonWall(e, 1, shape)
bbox = polygon_to_bbox(wall.Y, wall.X, (0,2.4), None)
wall_bboxes.append(bbox)
coords = bbox.get_coords()*100.
y = coords[:,0]
x = coords[:,1]
d.polygon(list(zip(y,x)),fill=1)
except ValueError as k:
if str(k) != 'small wall':
raise k
continue
return wall_bboxes, wall_image
def get_window_door(svg):
window_bboxes = []
door_bboxes = []
for e in svg.getElementsByTagName('g'):
if e.getAttribute("id") == "Window":
X, Y = get_points(e)
bbox = polygon_to_bbox(Y, X, (0.8,2.1), None)
window_bboxes.append(bbox)
if e.getAttribute("id") == "Door":
# How to reperesent empty door space
X, Y = get_points(e)
bbox = polygon_to_bbox(Y, X, (0,2.2), None)
door_bboxes.append(bbox)
return window_bboxes, door_bboxes
def get_furniture(svg, wall_image):
furniture_bboxes = []
for e in svg.getElementsByTagName('g'):
if "FixedFurniture " in e.getAttribute("class"):
category = get_category(e)
if category is None or category in cubi_to_skip:
continue
rr, cc, X, Y = get_icon(e)
if len(X) != 4:
continue
z_dim = get_z_dim(e)
z_min = (z_dim['Elevation']) / 100.
z_max = z_min + z_dim['Height'] / 100.
bbox = polygon_to_bbox(Y, X, (z_min,z_max), get_rotation_angle(e),flip_image=wall_image)
furniture_bboxes.append((cubi_cat_mapping[category], bbox))
return furniture_bboxes
def get_floor(svg):
floor_polygons = []
for e in svg.getElementsByTagName('g'):
if "Space " in e.getAttribute("class"):
room_type_raw = e.getAttribute("class").split(" ")[1]
if room_type_raw not in cubi_all_rooms:
room_type_ig = 'undefined'
else:
room_type_ig = cubi_all_rooms[room_type_raw]
X, Y = get_points(e)
floor_polygons.append((room_type_ig,
np.vstack([Y/100.,X/100.]).transpose()))
return floor_polygons
def main():
args = parser.parse_args()
model_dir = os.path.normpath(args.model_dir)
print(model_dir)
# model_id = "_".join(model_dir.split('/')[-2:])
model_id = os.path.basename(os.path.normpath(model_dir))
svg_file = os.path.join(model_dir, 'model.svg')
img_path = os.path.join(model_dir, 'F1_scaled.png')
svg = minidom.parse(svg_file) # parseString also exists
fplan = cv2.imread(img_path)
fplan = cv2.cvtColor(fplan, cv2.COLOR_BGR2RGB) # correct color channels
height, width, nchannel = fplan.shape
shape = height, width
wall_bboxes, wall_image = get_wall(svg, shape)
window_bboxes, door_bboxes = get_window_door(svg)
furniture_bboxes = get_furniture(svg, wall_image)
floor_polygons = get_floor(svg)
overlaps = defaultdict(lambda : [])
for i in range(len(furniture_bboxes)):
for j in range(i+1, len(furniture_bboxes)):
if has_overlap(furniture_bboxes[i][1],
furniture_bboxes[j][1]):
overlaps[(i,furniture_bboxes[i][1])].append((j,furniture_bboxes[j][1]))
overlaps[(j,furniture_bboxes[j][1])].append((i,furniture_bboxes[i][1]))
to_delete = []
for _ in range(len(overlaps)):
overlaps_list = list(overlaps.items())
overlaps_list.sort(key=lambda x:-get_volume(x[0][1]))
overlaps_list.sort(key=lambda x:-len(x[1]))
o = overlaps_list[0]
# try shrinking
shrink_success = False
bbox = o[0][1]
edge_x_og = bbox.edge_x[:]
edge_y_og = bbox.edge_y[:]
for scale_factor in range(20):
scale = 1. - 0.01 * scale_factor
# try scale edge_x
bbox.edge_x = edge_x_og * scale
overlap = False
for i in o[1]:
if has_overlap(bbox, i[1]):
overlap=True
break
if not overlap:
shrink_success = True
break
bbox.edge_x = edge_x_og
# try scale edge_y
bbox.edge_y = edge_y_og * scale
overlap = False
for i in o[1]:
if has_overlap(bbox, i[1]):
overlap=True
break
if not overlap:
shrink_success = True
break
bbox.edge_y = edge_y_og
# try scale both
bbox.edge_y = edge_y_og * scale
bbox.edge_x = edge_x_og * scale
overlap = False
for i in o[1]:
if has_overlap(bbox, i[1]):
overlap=True
break
if not overlap:
shrink_success = True
break
if shrink_success:
furniture_bboxes[o[0][0]] = (furniture_bboxes[o[0][0]][0], bbox)
else:
# add to delete
to_delete.append(o[0][0])
# update graph
for j in o[1]:
overlaps[j].remove(o[0])
del overlaps[o[0]]
for i in sorted(to_delete, reverse=True):
del furniture_bboxes[i]
##################################
# Splitting into separate floors #
##################################
total_image = Image.new('1', (height, width), 0)
d = ImageDraw.Draw(total_image)
for group in [wall_bboxes, door_bboxes, window_bboxes]:
for bbox in group:
coords = bbox.get_coords()*100.
y = coords[:,0]
x = coords[:,1]
d.polygon(list(zip(y,x)),fill=1)
for _,bbox in furniture_bboxes:
coords = bbox.get_coords()*100.
y = coords[:,0]
x = coords[:,1]
d.polygon(list(zip(y,x)),fill=1)
for _,coords in floor_polygons:
y = coords[:,0]*100.
x = coords[:,1]*100.
d.polygon(list(zip(y,x)),fill=1)
int_image = np.array(total_image).astype(np.uint8)
binary = cv2.threshold(int_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
ret, labels = cv2.connectedComponents(binary)
masks = []
for label in range(1,ret):
mask = np.zeros_like(int_image)
mask[labels == label] = 1
masks.append(mask)
# iterate through list of masks, each of which is a floor
for floor_i,mask_i in enumerate(masks):
wall_bboxes_i = []
window_bboxes_i = []
door_bboxes_i= []
furniture_bboxes_i = []
floor_polygons_i = []
for bbox in wall_bboxes:
y,x = (bbox.center*100.).astype(int)
if y >= height:
y = height - 1
if x >= width:
x = width- 1
if mask_i[x,y] == 1:
wall_bboxes_i.append(bbox)
for bbox in door_bboxes:
y,x = (bbox.center*100.).astype(int)
if y >= height:
y = height - 1
if x >= width:
x = width- 1
if mask_i[x,y] == 1:
door_bboxes_i.append(bbox)
for bbox in window_bboxes:
y,x = (bbox.center*100.).astype(int)
if y >= height:
y = height - 1
if x >= width:
x = width- 1
if mask_i[x,y] == 1:
window_bboxes_i.append(bbox)
for c,bbox in furniture_bboxes:
y,x = (bbox.center*100.).astype(int)
if y >= height:
y = height - 1
if x >= width:
x = width- 1
if mask_i[x,y] == 1:
furniture_bboxes_i.append((c,bbox))
for poly in floor_polygons:
y,x = (np.array(
shape_poly(poly[1]).representative_point())*100.
).astype(int)
if y >= height:
y = height - 1
if x >= width:
x = width- 1
if mask_i[x,y] == 1:
floor_polygons_i.append(poly)
if len(wall_bboxes_i) < 4 or len(floor_polygons_i) < 1:
# This suggests that the mask doesn't represent a floor
continue
model_dir = os.path.join(args.save_root,
'{}_floor_{}'.format(model_id,floor_i))
os.makedirs(model_dir, exist_ok=True)
save_dir = os.path.join(model_dir, 'misc')
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, 'wall.json'), 'w') as fp:
json.dump([bbox.as_dict() for bbox in wall_bboxes_i], fp)
with open(os.path.join(save_dir, 'window.json'), 'w') as fp:
json.dump([bbox.as_dict() for bbox in window_bboxes_i], fp)
with open(os.path.join(save_dir, 'door.json'), 'w') as fp:
json.dump([bbox.as_dict() for bbox in door_bboxes_i], fp)
with open(os.path.join(save_dir, 'furniture.json'), 'w') as fp:
json.dump([(cat,bbox.as_dict()) for
cat,bbox in furniture_bboxes_i], fp)
with open(os.path.join(save_dir, 'floor.json'), 'w') as fp:
json.dump([(cat,poly.tolist()) for cat,poly in
floor_polygons_i], fp)
layout_dir = os.path.join(model_dir, 'layout')
os.makedirs(layout_dir, exist_ok=True)
coords = [bbox.get_coords() for bbox in wall_bboxes_i]
stacked = np.vstack(coords)
xmin, ymin = stacked.min(axis=0)
xmax, ymax = stacked.max(axis=0)
max_length = np.max([np.abs(xmin), np.abs(ymin), np.abs(xmax), np.abs(ymax)])
max_length = np.ceil(max_length).astype(np.int)
ins_image = Image.new('L', (2 * max_length * 100, 2 * max_length * 100), 0)
d1 = ImageDraw.Draw(ins_image)
sem_image = Image.new('L', (2 * max_length * 100, 2 * max_length * 100), 0)
d2 = ImageDraw.Draw(sem_image)
for i, (cat, poly) in enumerate(floor_polygons_i):
room_id = rooms.index(cat)
pts = [((x + max_length)*100.,(y + max_length)*100.)
for x,y in poly]
d1.polygon(pts,fill=i+1)
d2.polygon(pts,fill=room_id+1)
ins_image.save(os.path.join(layout_dir, 'floor_insseg_0.png'))
sem_image.save(os.path.join(layout_dir, 'floor_semseg_0.png'))
padded_image = Image.new('L', (3000, 3000), 0)
og_size = sem_image.size
padded_image.paste(sem_image,
((3000-og_size[0])//2,
(3000-og_size[1])//2))
light_image = semmap_to_lightmap(np.array(padded_image))
light_image.save(os.path.join(layout_dir, | |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Core Classes and Functions.
This file stores the core classes and functions for the MRBLEs Analysis module.
"""
# [File header] | Copy and edit for each file in this project!
# title : core.py
# description : MRBLEs - Core Functions
# author : <NAME>
# credits : <NAME>, <NAME>
# date : 20160308
# [Future imports]
from __future__ import (absolute_import, division, print_function)
from builtins import (super, range, zip, round, int, object)
# [Modules]
# General Python
import multiprocessing as mp
import sys
import types
import warnings
from math import ceil, sqrt
# Other
import cv2
import numpy as np
import pandas as pd
import photutils
import skimage as sk
import skimage.morphology
import skimage.segmentation
import xarray as xr
from matplotlib import pyplot as plt
from packaging import version
from scipy import ndimage as ndi
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.mixture import GaussianMixture
# Intra-Package dependencies
from mrbles.data import ImageDataFrame
# Function compatibility issues
# Function compatibility between Python 2.x and 3.x
if sys.version_info < (3, 0):
warnings.warn(
"mrbles: Please use Python >3.6 for multiprocessing.")
# NumPy compatibility issue
if version.parse(np.__version__) < version.parse("1.14.0"):
warnings.warn('mrbles: Please upgrade module NumPy >1.14.0!')
RCOND = -1
else:
RCOND = None
# Decorators
def accepts(*types): # NOQA
"""Check input parameters for data types."""
def _check_accepts(func):
assert len(types) == func.__code__.co_argcount
def _new_func(*args, **kwds):
for (arg_f, type_f) in zip(args, types):
assert isinstance(arg_f, type_f), \
"arg %r does not match %s" % (arg_f, type_f)
return func(*args, **kwds)
_new_func.func_name = func.__name__
return _new_func
return _check_accepts
# Classes
class FindBeadsImaging(ImageDataFrame):
"""Find and identify beads and their regions using imaging.
Parallel computing version.
Parameters
----------
bead_size : int
Approximate width of beads (circles) in pixels.
border_clear : boolean
Beads touching border or ROI will be removed.
Defaults to True.
circle_size : int
Set circle size for auto find circular ROI.
Attributes
----------
area_min : float
Sets the minimum area in pixels. Set to minimum size inside of ring.
Defaults to 0.1 * area of set bead_size.
area_max : float
Sets the maximum area in pixels. Set maximum size outside of ring.
Defaults to 1.5 * area of set bead_size.
eccen_max : float
Get or set maximum eccentricity of beads in value 0 to 1, where a
perfect circle is 0.
Defaults to 0.65.
"""
def __init__(self, bead_size,
border_clear=True, circle_size=None, parallelize=False):
"""Find and identify beads and their regions using imaging."""
super(FindBeadsImaging, self).__init__()
self._bead_size = bead_size
self.border_clear = border_clear
self.circle_size = circle_size
self.parallelize = parallelize
# Default values for filtering
self._area_min = 0.25 * self.circle_area(bead_size)
self._area_max = 1.5 * self.circle_area(bead_size)
self._eccen_max = 0.65
# Default values for local background
self.mask_bkg_size = 11
self.mask_bkg_buffer = 2
# Data set
self._dataframe = None
self._bead_dims = None
# Adaptive Thersholding
self.thr_block = 15
self.thr_c = 11
# Properties - Settings
@property
def bead_size(self):
"""Get or set approximate width of beads (circles) in pixels."""
return self._bead_size
@property
def area_min(self):
"""Get or set minimum area of beads (circles) in pixels."""
return self._area_min
@area_min.setter
def area_min(self, value):
self._area_min = value
@property
def area_max(self):
"""Get or set minimum area of beads (circles) in pixels."""
return self._area_max
@area_max.setter
def area_max(self, value):
self._area_max = value
@property
def eccen_max(self):
"""Get or set maximum eccentricity of beads from 0 to 1.
A perfect circle is 0 and parabola is 1.
"""
return self._eccen_max
@eccen_max.setter
def eccen_max(self, value):
self._eccen_max = value
# Main function
def find(self, image):
"""Execute finding beads image(s)."""
if image.ndim == 3:
if (sys.version_info >= (3, 0)) and (self.parallelize is True):
mp_worker = mp.Pool()
result = mp_worker.map(self._find, image)
mp_worker.close()
mp_worker.join()
else:
result = list(map(self._find, image))
r_m = [i[0] for i in result]
r_d = [i[1] for i in result]
self._dataframe = xr.concat(r_m, dim='f')
self._bead_dims = pd.concat(r_d,
keys=list(range(len(r_d))),
names=['f', 'bead_index'])
else:
self._dataframe, self._bead_dims = self._find(image)
def _find(self, image):
if self.circle_size is not None:
img, roi_mask = self.circle_roi(image, self.circle_size)
else:
img = self._img2ubyte(image)
bin_img = self.img2bin(img, self.thr_block, self.thr_c)
mask_inside, _ = self._find_inside(bin_img)
if np.unique(mask_inside).size <= 1:
blank_img = np.zeros_like(bin_img)
mask_bead = blank_img
mask_ring = blank_img
mask_outside = blank_img
mask_inside = blank_img
mask_bkg = blank_img
bead_dims = None
overlay_image = blank_img
else:
mask_bead, mask_bead_neg = self._find_watershed(mask_inside,
bin_img)
# Create and update final masks
mask_ring = mask_bead - mask_inside
mask_ring[mask_ring < 0] = 0
mask_inside[mask_bead_neg < 0] = 0
# Create outside and buffered background areas around bead
mask_outside = self.make_mask_outside(mask_bead,
self.mask_bkg_size,
buffer=0)
mask_bkg = self.make_mask_outside(mask_bead_neg,
self.mask_bkg_size,
buffer=self.mask_bkg_buffer)
if self.circle_size is not None:
mask_bkg[~roi_mask] = 0
mask_bkg[mask_bkg < 0] = 0
bead_dims = self.get_dimensions(mask_bead)
if bead_dims is None:
blank_img = np.zeros_like(bin_img)
mask_bead = blank_img
mask_ring = blank_img
mask_outside = blank_img
mask_inside = blank_img
mask_bkg = blank_img
bead_dims = None
overlay_image = blank_img
else:
bead_dims_overlay = bead_dims.loc[:, ('x_centroid',
'y_centroid',
'radius')]
overlay_image = self.cross_overlay(img,
bead_dims_overlay,
color=False)
masks = xr.DataArray(data=np.array([mask_bead,
mask_ring,
mask_inside,
mask_outside,
mask_bkg,
overlay_image],
dtype=np.uint16),
dims=['c', 'y', 'x'],
coords={'c': ['mask_full',
'mask_ring',
'mask_inside',
'mask_outside',
'mask_bkg',
'mask_check']})
return [masks, bead_dims]
def _find_inside(self, bin_img):
seg_img = self._bin2seg(bin_img)
filter_params_inside = [[self._area_min, self._area_max]]
filter_names_inside = ['area']
slice_types_inside = ['outside']
mask_inside, mask_inside_neg = self.filter_mask(seg_img,
filter_params_inside,
filter_names_inside,
slice_types_inside,
border_clear=False)
return mask_inside, mask_inside_neg
def _find_watershed(self, mask_inside, bin_img):
bin_img_invert = self._img_invert(bin_img)
mask_all_bin = mask_inside + bin_img_invert
mask_all_bin[mask_all_bin > 0] = 1
dist_trans = ndi.distance_transform_edt(mask_all_bin, sampling=3)
mask_full = sk.morphology.watershed(np.negative(dist_trans),
markers=mask_inside,
mask=mask_all_bin)
filter_params = [self._eccen_max,
[self.area_min, self.area_max]]
filter_names = ['eccentricity', 'area']
slice_types = ['up', 'outside']
mask_bead, mask_bead_neg = self.filter_mask(mask_full,
filter_params,
filter_names,
slice_types,
self.border_clear)
return mask_bead, mask_bead_neg
# Functions
def _data_return(self, value):
if self._dataframe.ndim > 3:
data = self._dataframe.loc[:, value].values
else:
data = self._dataframe.loc[value].values
return data
def mask(self, mask_type):
"""Return labeled mask of the specified mask type."""
return self._data_return(mask_type)
@property
def mask_types(self):
"""Return list of mask types."""
return self._dataframe.c.values.tolist()
# Properties - Output values
@property
def bead_num(self):
"""Return number of beads labeled mask."""
return self.get_unique_count(self._data_return("mask_full"))
@property
def bead_labels(self):
"""Return all positive labels of labeled mask."""
return self.get_unique_values(self._data_return("mask_full"))
@property
def bead_dims(self):
"""Return found bead dimensions."""
return self._bead_dims
# Class methods
@classmethod
def make_mask_outside(cls, mask, size, buffer=0):
"""Return labeled mask of area around bead."""
if buffer > 0:
mask_min = cls._morph_mask_step(buffer, mask)
else:
mask_min = mask
mask_outside = cls._morph_mask_step(size, mask)
mask_outside[mask_min > 0] = 0
return mask_outside
@classmethod
def img2bin(cls, image, thr_block=15, thr_c=11):
"""Convert and adaptive threshold image."""
img = cls._img2ubyte(image)
img_thr = cv2.adaptiveThreshold(src=img,
maxValue=1,
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, # NOQA
thresholdType=cv2.THRESH_BINARY,
blockSize=thr_block,
C=thr_c)
return img_thr
@classmethod
def filter_mask(cls, mask, filter_params, filter_names, slice_types,
border_clear=False):
"""Filter labeled mask based on provided parameters."""
# Get dimensions from the mask
props = cls.get_dimensions(mask)
# Get labels to be removed
lbls_out = cls.filter_properties(
props, filter_params, filter_names, slice_types)
# Create new masks
mask_pos = mask.copy()
mask_neg = mask.copy()
# Set mask to 0 or negative label for labels outside limits.
if lbls_out.size > 0:
for lbl in lbls_out:
mask_pos[mask == lbl] = 0
mask_neg[mask == lbl] = -lbl
if border_clear is True:
sk.segmentation.clear_border(mask_pos, in_place=True)
sk.segmentation.clear_border(mask_neg, bgval=-1, in_place=True)
return mask_pos, mask_neg
@classmethod
def filter_properties(cls, properties, filter_params, filter_names,
slice_types):
"""Get labels of areas outside of limits."""
lbls_out_tmp = [cls.filter_property(properties, param, name, stype)
for param, name, stype in zip(filter_params,
filter_names,
slice_types)]
lbls_out = np.unique(np.hstack(lbls_out_tmp))
return lbls_out
@classmethod
def circle_roi(cls, image, circle_size, hough_settings=None):
"""Apply a circular image ROI.
Parameters
----------
image : NumPy array image
hough_settings : list, int
Settings for HoughCircles in list.
list[0] = dp, list[1] = param1, list[2] = param2
"""
img = cls._img2ubyte(image)
# Default Hough settings
if hough_settings is None:
hough_dp = 2
hough_param1 = 10
hough_param2 = 7
else:
hough_dp = hough_settings[0]
hough_param1 = hough_settings[1]
hough_param2 = hough_settings[2]
dims = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT,
dp=hough_dp,
minDist=img.shape[0],
minRadius=circle_size,
maxRadius=img.shape[0],
param1=hough_param1,
param2=hough_param2)
if len(dims[0]) != 1:
mask_img = img
mask = np.zeros_like(img, dtype=np.uint8)
warnings.warn("No circular ROI found. Defaulting to whole image. "
"Please adjust circle_size, not use circle_size, or "
"crop images.")
else:
circle_y, circle_x, _ = np.round(np.ravel(dims[0])).astype(np.int)
mask = cls.sector_mask(img.shape,
[circle_x, circle_y],
circle_size)
mask_img = img.copy()
mask_img[~mask] = 0
return mask_img, mask
# Static methods
@staticmethod
def sector_mask(shape, center, radius):
"""Return a boolean mask for a circular ROI."""
mesh_x, mesh_y = np.ogrid[:shape[0], :shape[1]]
center_x, center_y = center
# convert cartesian --> polar coordinates
r_2 = (mesh_x - center_x) * (mesh_x - center_x) + \
(mesh_y - center_y) * (mesh_y - center_y)
circmask = r_2 <= radius * radius
return | |
to tell the model.simulate() function that it should assume explicit time',
'dependence by calling it as',
'``model.simulate(...,rates_have_explicit_time_dependence=True)``',
]))
self.quadratic_event_updates = quadratic_event_updates
self.quadratic_rate_functions = quadratic_rate_functions
self.quadratic_events = quadratic_events
return self
def dydt(self,t,y):
"""
Compute the current momenta of the epidemiological model.
Parameters
----------
t : :obj:`float`
Current time
y : numpy.ndarray
The entries correspond to the compartment frequencies
(or counts, depending on population size).
"""
ynew = sum([r(t,y) * dy for dy, r in zip(self.linear_event_updates, self.linear_rate_functions)])
ynew += sum([r(t,y) * dy for dy, r in zip(self.birth_event_updates, self.birth_rate_functions)])
if self.correct_for_dynamical_population_size:
population_size = y.sum()
else:
population_size = self.initial_population_size
ynew += sum([r(t,y)/population_size * dy for dy, r in zip(self.quadratic_event_updates, self.quadratic_rate_functions)])
return ynew
def get_numerical_dydt(self):
"""
Return a function that obtains ``t`` and ``y`` as an input and returns ``dydt`` of this system
"""
return self.dydt
def get_time_leap_and_proposed_compartment_changes(self,
t,
current_event_rates = None,
get_event_rates = None,
get_compartment_changes = None,
use_ivp_solver = None,
rates_have_explicit_time_dependence = None,
):
"""
For the current event rates, obtain a proposed
time leap and concurrent state change vector.
This method is needed for stochastic simulations.
Parameters
----------
t : float
current time
current_event_rates : list, default = None
A list of constant rate values.
Will be ignored if
``self.rates_have_explicit_time_dependence`` is ``True``,
which is why ``None`` is a valid value.
get_event_rates : function, default = None
A function that takes time ``t`` and current
state ``y`` as input and computes the rates of
all possible events.
If ``None``, will attempt
to set this to self.get_event_rates().
get_compartment_changes : function, default = None
A function that takes computed event rates
and returns a random state change with
probability proportional to its rate.
If ``None``, will attempt
to set this to self.get_compartment_changes().
use_ivp_solver : bool, default = None
Whether or not to use an initial value problem solver
to obtain a time leap in explicitly time-dependent
problems.
If ``None``, will use the value
of the class attribute ``self.use_ivp_solver``.
rates_have_explicit_time_dependence : bool, default = None
Whether or not the problem is explicitly time-dependent.
If ``None``, will use the value
of the class attribute ``self.rates_have_explicit_time_dependence``.
Returns
-------
tau : float
A time leap.
dy : numpy.ndarray
A state change vector.
"""
if get_event_rates is None:
get_event_rates = self.get_event_rates
if get_compartment_changes is None:
get_compartment_changes = self.get_compartment_changes
if use_ivp_solver is None:
use_ivp_solver = self.use_ivp_solver
if rates_have_explicit_time_dependence is None:
rates_have_explicit_time_dependence = self.rates_have_explicit_time_dependence
if rates_have_explicit_time_dependence:
# solve the integral numerically
if use_ivp_solver:
new_t = time_leap_ivp(t, self.y0, get_event_rates)
else:
new_t = time_leap_newton(t, self.y0, get_event_rates)
tau = new_t - t
proposed_event_rates = get_event_rates(new_t, self.y0)
dy = get_compartment_changes(proposed_event_rates)
else:
total_event_rate = current_event_rates.sum()
tau = np.random.exponential(1/total_event_rate)
dy = get_compartment_changes(current_event_rates)
return tau, dy
def get_compartment_changes(self, rates):
"""
Sample a state change vector with probability
proportional to its rate in ``rates``.
Needed for stochastic simulations.
Parameters
==========
rates : numpy.ndarray
A non-zero list of rates.
Expects ``rates`` to be sorted according
to
``self.birth_event_updates + self.linear_event_updates + self.quadratic_event_updates``.
Returns
=======
dy : numpy.ndarray
A state change vector.
"""
idy = custom_choice(rates/rates.sum())
if idy < len(self.birth_event_updates):
return self.birth_event_updates[idy]
elif idy < len(self.birth_event_updates) + len(self.linear_event_updates):
idy -= len(self.birth_event_updates)
return self.linear_event_updates[idy]
else:
idy -= (len(self.birth_event_updates) + len(self.linear_event_updates))
return self.quadratic_event_updates[idy]
def get_event_rates(self, t, y):
"""
Get a list of rate values corresponding to the previously
set events.
Parameters
----------
t : float
Current time
y : numpy.ndarray
Current state vector
Returns
-------
rates : list
A list of rate values corresponding to rates.
Ordered as ``birth_rate_functions +
linear_rate_functions + quadratic_rate_functions``.
"""
rates = [r(t,y) for r in self.birth_rate_functions]
rates += [r(t,y) for r in self.linear_rate_functions]
if self.correct_for_dynamical_population_size:
population_size = self.y0.sum()
else:
population_size = self.initial_population_size
rates += [ r(t,self.y0)/population_size for r in self.quadratic_rate_functions ]
rates = np.array(rates)
return rates
def get_numerical_event_and_rate_functions(self):
"""
This function is needed to generalize
stochastic simulations for child classes.
Returns
-------
get_event_rates : func
A function that takes the current time ``t`` and
state vector ``y``
and returns numerical event rate lists.
get_compartment_changes : funx
A function that takes a numerical list of event ``rates``
and returns a random event state change vector
with probability proportional to its entry in ``rates``.
"""
return self.get_event_rates, self.get_compartment_changes
def simulate(self,
tmax,
return_compartments=None,
sampling_dt=None,
sampling_callback=None,
adopt_final_state=False,
use_ivp_solver=None,
rates_have_explicit_time_dependence=None,
ignore_warnings=False,
):
"""
Returns values of the given compartments at the demanded
time points (as a numpy.ndarray of shape
``(return_compartments), len(time_points)``.
If ``return_compartments`` is None, all compartments will
be returned.
Parameters
----------
tmax : float
maximum length of the simulation
return_compartments : list of compartments, default = None:
The compartments for which to return time series.
If ``None``, all compartments will be returned.
sampling_dt : float, default = None
Temporal distance between samples of the compartment counts.
If ``None``, every change will be returned.
sampling_callback : funtion, default = None
A function that's called when a sample is taken
use_ivp_solver : bool, default = None
Wether or not to use an initial value problem solver
to obtain a time leap in explicitly time-dependent
problems.
If ``None``, will use the value
of the class attribute ``self.use_ivp_solver``.
rates_have_explicit_time_dependence : bool, default = None
Wether or not the problem is explicitly time-dependent.
If ``None``, will use the value
of the class attribute ``self.rates_have_explicit_time_dependence``.
ignore_warnings : bool, default = False
wether or not to raise warnings about unset explicit time.
Returns
-------
t : numpy.ndarray
times at which compartment counts have been sampled
result : dict
Dictionary mapping a compartment to a time series of its count.
"""
if return_compartments is None:
return_compartments = self.compartments
if sampling_callback is not None and sampling_dt is None:
raise ValueError('A sampling callback function can only be set if sampling_dt is set, as well.')
ndx = [self.get_compartment_id(C) for C in return_compartments]
current_state = self.y0.copy()
compartments = [ current_state.copy() ]
if not adopt_final_state:
initial_state = current_state.copy()
initial_time = self.t0
t = self.t0
time = [self.t0]
get_event_rates, get_compartment_changes = self.get_numerical_event_and_rate_functions()
current_event_rates = get_event_rates(t, self.y0)
total_event_rate = current_event_rates.sum()
if sampling_callback is not None:
sampling_callback()
if self.rates_have_functional_dependence and\
(\
((rates_have_explicit_time_dependence is not None) and (not rates_have_explicit_time_dependence))\
or ((rates_have_explicit_time_dependence is None) and (not self.rates_have_explicit_time_dependence))\
):
if not ignore_warnings:
warnings.warn('Some rates have a functional dependence but no explicit time dependence was detected or set. '+\
'In case you know that these rates change depending on time explicitly, call this function with keyword '+\
'``rates_have_explicit_time_dependence=True`` or set ``model.rates_have_explicit_time_dependence=True.`` '+\
'You can suppress this warning by calling this function with keyword '+\
'``ignore_warnings=True``.',
)
# Check for a) zero event rate and b) zero possibility for any nodes being changed still.
# This is important because it might happen that nodes
# have a non-zero reaction rate but no targets left
# at which point the simulation will never halt.
while t < tmax and \
total_event_rate > 0:
# sample and advance time according to current total rate
tau, dy = self.get_time_leap_and_proposed_compartment_changes(t,
current_event_rates=current_event_rates,
get_event_rates=get_event_rates,
get_compartment_changes=get_compartment_changes,
use_ivp_solver=use_ivp_solver,
rates_have_explicit_time_dependence=rates_have_explicit_time_dependence,
)
new_t = t + tau
# break if simulation time is reached
if new_t >= tmax:
break
# sampling
if sampling_dt is not None:
# sample all the time steps that were demanded in between the two events
last_sample_dt = time[-1]
for idt in range(1,int(np.ceil((new_t-last_sample_dt)/sampling_dt))):
time.append(last_sample_dt+idt*sampling_dt)
compartments.append(current_state.copy())
if sampling_callback is not None:
sampling_callback()
# write losses and gains into the current state vector
current_state += dy
# save the current state if sampling_dt wasn't specified
if sampling_dt is None:
time.append(new_t)
compartments.append(current_state.copy())
# save current state
self.t0 = new_t
self.y0 = current_state.copy()
current_event_rates = get_event_rates(new_t, self.y0)
total_event_rate = current_event_rates.sum()
# advance time
t = new_t
if sampling_dt is not None:
next_sample = time[-1] + sampling_dt
if next_sample <= tmax:
time.append(next_sample)
compartments.append(current_state)
if sampling_callback is not None:
sampling_callback()
# convert to result dictionary
time = np.array(time)
result = np.array(compartments)
if not adopt_final_state:
self.y0 = initial_state
self.t0 = initial_time
else:
self.t0 = tmax
return time, { compartment: result[:,c_ndx] for c_ndx, compartment in zip(ndx, return_compartments) }
class SIModel(EpiModel):
"""
An SI model derived from :class:`epipack.numeric_epi_models.EpiModel`.
"""
def __init__(self, infection_rate, | |
"raiseInTry" :setvar(); raise MyErr1
setvar()
if myraise7 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise8 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise8 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise9 == "Unhandled": setvar(); raise MyErr4
setvar()
#uncomment the following 2 lines once we have the fix for PS:1752
#and accordingly adjust the final expected result value
#yield 1; setvar()
#yield 2; setvar()
myraise1 = ["raiseInTry","outerTry","Unhandled","None"]
myraise2 = ["raiseInExcept", "raiseInElse","Unhandled","None"]
myraise3 = ["raiseInFinally","Unhandled","None"]
myraise4 = ["raiseInTry","Unhandled","None"]
myraise5 = ["Unhandled","None"]
myraise6 = ["Unhandled","None"]
myraise7 = ["raiseInTry","Unhandled","None"]
myraise8 = ["Unhandled","None"]
myraise9 = ["Unhandled","None"]
def fun():
for a in myraise1:
for b in myraise2:
for c in myraise3:
for d in myraise4:
for e in myraise5:
for f in myraise6:
for g in myraise7:
for h in myraise8:
for i in myraise9:
k = TestUnifiedTry(a,b,c,d,e,f,g,h,i)
while(True):
try:
k.next()
except MyErr4: setvar();break
except StopIteration: setvar();break
fun()
self.assertEqual(globals()["gblvar"],141985)
def test_try_catch_finally_on_targets(self):
#test try-catch-finally on targets
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
def TestTargets(ret):
x = 0
y = 0
z = 0
setvar()
while( z < 6 ) :
z += 1
while( y < 8 ) :
y += 1
while( x < 20 ) :
x += 1
setvar()
try:
setvar()
if not x % 3 : setvar();continue
if not x % 4 : setvar();break
if not x % 5 : setvar();1 / 0
if not x % 7 and ret == "try" : setvar();return
setvar()
except:
setvar()
if not y % 3 : setvar();continue
if not y % 4 : setvar();break
if not y % 7 and ret == "except" : setvar();return
setvar()
else:
setvar()
if not x % 11 : setvar();continue
if not x % 13 : setvar();break
if not x % 19 and ret == "else" : setvar();return
setvar()
finally:
setvar()
#IPy does support continue under finally, just for CPy compatibility we do not test it here
#if z % 2 : setvar();continue
if not z % 2 : setvar();break
if not z % 5 and ret == "finally" : setvar();return
setvar()
setvar()
return
ret = ["try","except","else","finally"]
for r in ret:
TestTargets(r)
self.assertEqual(globals()["gblvar"],403)
def test_yield_in_finally(self):
"""test yield in finally"""
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
def test_yield_finally():
setvar()
try: setvar();1/0
except:setvar()
else: setvar()
finally:
setvar();yield 100
setvar();yield 100
setvar()
setvar()
try:
k = test_yield_finally()
while(1):
next(k)
except StopIteration: pass
self.assertEqual(globals()["gblvar"],8)
def test_string_partition(self):
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.partition('://'), ('http','://','www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'))
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.partition('stringnotpresent'), ('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython','',''))
self.assertEqual('stringisnotpresent'.partition('presentofcoursenot'), ('stringisnotpresent','',''))
self.assertEqual(''.partition('stringnotpresent'), ('','',''))
self.assertEqual('onlymatchingtext'.partition('onlymatchingtext'), ('','onlymatchingtext',''))
self.assertEqual('alotoftextherethatisapartofprefixonlyprefix_nosuffix'.partition('_nosuffix'), ('alotoftextherethatisapartofprefixonlyprefix','_nosuffix',''))
self.assertEqual('noprefix_alotoftextherethatisapartofsuffixonlysuffix'.partition('noprefix_'), ('','noprefix_','alotoftextherethatisapartofsuffixonlysuffix'))
self.assertEqual('\0'.partition('\0'), ('','\0',''))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\00\56\78'), ('\00\ff\67\56\d8\89\33\09\99\ee\20','\00\56\78','\45\77\e9'))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\78\45\77\e9'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56','\78\45\77\e9',''))
self.assertEqual('\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\ff\67\56\d8\89\33\09\99'), ('','\ff\67\56\d8\89\33\09\99','\ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.partition('random'), (u'\ff\67\56\d8\89\33\09\99some ','random',' 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.partition(u'\33\09\99some r'), (u'\ff\67\56\d8\89','\33\09\99some r','andom 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertRaises(ValueError,'sometextheretocauseanexeption'.partition,'')
self.assertRaises(ValueError,''.partition,'')
self.assertRaises(TypeError,'some\90text\ffhere\78to\88causeanexeption'.partition,None)
self.assertRaises(TypeError,''.partition,None)
prefix = """ this is some random text
and it has lots of text
"""
sep = """
that is multilined
and includes unicode \00 \56
\01 \02 \06 \12\33\67\33\ff \ee also"""
suffix = """
\78\ff\43\12\23ok"""
str = prefix + sep + suffix
self.assertEqual(str.partition(sep),(prefix,sep,suffix))
self.assertEqual(str.partition('nomatch'),(str,'',''))
self.assertRaises(TypeError,str.partition,None)
self.assertRaises(ValueError,str.partition,'')
def test_string_rpartition(self):
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?Project://Name=IronPython'.rpartition('://'), ('http://www.codeplex.com/WorkItem/List.aspx?Project','://','Name=IronPython'))
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.rpartition('stringnotpresent'), ('', '', 'http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'))
self.assertEqual('stringisnotpresent'.rpartition('presentofcoursenot'), ('','', 'stringisnotpresent'))
self.assertEqual(''.rpartition('stringnotpresent'), ('','',''))
self.assertEqual('onlymatchingtext'.rpartition('onlymatchingtext'), ('','onlymatchingtext',''))
self.assertEqual('alotoftextherethatisapartofprefixonlyprefix_nosuffix'.rpartition('_nosuffix'), ('alotoftextherethatisapartofprefixonlyprefix','_nosuffix',''))
self.assertEqual('noprefix_alotoftextherethatisapartofsuffixonlysuffix'.rpartition('noprefix_'), ('','noprefix_','alotoftextherethatisapartofsuffixonlysuffix'))
self.assertEqual('\0'.partition('\0'), ('','\0',''))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\00\56\78\45\77\e9'.rpartition('\00\56\78'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78','\00\56\78','\45\77\e9'))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9\78\45\77\e9'.rpartition('\78\45\77\e9'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9','\78\45\77\e9',''))
self.assertEqual('\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.rpartition('\ff\67\56\d8\89\33\09\99'), ('','\ff\67\56\d8\89\33\09\99','\ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.rpartition('random'), (u'\ff\67\56\d8\89\33\09\99some ','random',' 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.rpartition(u'\33\09\99some r'), (u'\ff\67\56\d8\89','\33\09\99some r','andom 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertRaises(ValueError,'sometextheretocauseanexeption'.rpartition,'')
self.assertRaises(ValueError,''.rpartition,'')
self.assertRaises(TypeError,'some\90text\ffhere\78to\88causeanexeption'.rpartition,None)
self.assertRaises(TypeError,''.rpartition,None)
prefix = """ this is some random text
and it has lots of text
"""
sep = """
that is multilined
and includes unicode \00 \56
\01 \02 \06 \12\33\67\33\ff \ee also"""
suffix = """
\78\ff\43\12\23ok"""
str = prefix + sep + suffix
self.assertEqual(str.rpartition(sep),(prefix,sep,suffix))
self.assertEqual(str.rpartition('nomatch'),('','', str))
self.assertRaises(TypeError,str.rpartition,None)
self.assertRaises(ValueError,str.rpartition,'')
def test_string_startswith(self):
class A:pass
# failure scenarios
self.assertRaises(TypeError,'string'.startswith,None)
self.assertRaises(TypeError,'string'.startswith,(None,"strin","str"))
self.assertRaises(TypeError,'string'.startswith,(None,))
self.assertRaises(TypeError,'string'.startswith,(["this","is","invalid"],"str","stri"))
self.assertRaises(TypeError,'string'.startswith,(("string","this is invalid","this is also invalid",),))
self.assertRaises(TypeError,''.startswith,None)
self.assertRaises(TypeError,''.startswith,(None,"strin","str"))
self.assertRaises(TypeError,''.startswith,(None,))
self.assertRaises(TypeError,''.startswith,(["this","is","invalid"],"str","stri"))
self.assertRaises(TypeError,''.startswith,(("string","this is invalid","this is also invalid",),))
# success scenarios
self.assertEqual('no matching string'.startswith(("matching","string","here")),False)
self.assertEqual('here matching string'.startswith(("matching","string","here")), True)
self.assertEqual('here matching string'.startswith(("here", "matching","string","here")), True)
self.assertEqual('here matching string'.startswith(("matching","here","string",)), True)
self.assertEqual('here matching string'.startswith(("here matching string","here matching string","here matching string",)), True)
s = 'here \12 \34 \ff \e5 \45 matching string'
m = "here \12 \34 \ff \e5 \45 "
m1 = " \12 \34 \ff \e5 \45 "
n = "here \12 \34 \ff \e5 \46 "
n1 = " \12 \34 \ff \e5 \46 "
self.assertEqual(s.startswith((m,None)), True)
self.assertEqual(s.startswith((m,123, ["here","good"])), True)
self.assertEqual(s.startswith(("nomatch",m,123, ["here","good"])), True)
# with start parameter = 0
self.assertEqual(s.startswith((m,None),0), True)
self.assertEqual(s.startswith((n,"nomatch"),0), False)
self.assertEqual(s.startswith((s,"nomatch"),0), True)
self.assertEqual(s.startswith((s + "a","nomatch"),0), False)
self.assertRaises(TypeError, s.startswith,(n,None),0)
self.assertRaises(TypeError, s.startswith,(None, n),0)
self.assertRaises(TypeError, s.startswith,(A, None, m),0)
# with start parameter > 0
self.assertEqual(s.startswith((m1,None),4), True)
self.assertEqual(s.startswith((m,"nomatch"),4), False)
self.assertEqual(s.startswith((n1,"nomatch"),4), False)
self.assertEqual(s.startswith((" \12 \34 \fd \e5 \45 ","nomatch"),4), False)
self.assertEqual(s.startswith((s," \12 \34 \ff \e5 \45 matching string"),4), True)
self.assertEqual(s.startswith((" \12 \34 \ff \e5 \45 matching string" + "a","nomatch"),4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4)
self.assertRaises(TypeError, s.startswith,(None, n1),4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4)
self.assertEqual(s.startswith(("g",None),len(s) - 1), True)
self.assertEqual(s.startswith(("g","nomatch"),len(s)), False)
self.assertEqual(s.startswith(("g","nomatch"),len(s) + 400), False)
# with start parameter < 0
self.assertEqual(s.startswith(("string",None),-6), True)
self.assertEqual(s.startswith(("stro","nomatch"),-6), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6), False)
self.assertEqual(s.startswith(("stringandmore","nomatch"),-6), False)
self.assertEqual(s.startswith(("prefixandstring","nomatch"),-6), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6)
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6)
self.assertEqual(s.startswith(("here",None),-len(s)), True)
self.assertEqual(s.startswith((s,None),-len(s) - 1 ), True)
self.assertEqual(s.startswith(("here",None),-len(s) - 400), True)
# with start and end parameters
# with +ve start , +ve end
# end > start
self.assertEqual(s.startswith((m1,None),4,len(s)), True)
self.assertEqual(s.startswith((m1,None),4,len(s) + 100), True)
self.assertEqual(s.startswith((n1,"nomatch"),len(s)), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, len(s))
self.assertRaises(TypeError, s.startswith,(None, n1),4 , len(s) + 100)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, len(s))
# end < start
self.assertRaises(TypeError, s.startswith, (m1,None),4,3)
self.assertRaises(TypeError, s.startswith, (m1,None),4,2)
self.assertRaises(TypeError, s.startswith, (n1,None),4, 3)
self.assertRaises(TypeError, s.startswith, (None, n1),4 , 3)
self.assertRaises(TypeError, s.startswith, (A, None, m1),4, 0)
# end == start
self.assertEqual(s.startswith(("",None),4,4), True)
self.assertEqual(s.startswith((m1,),4,4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, 4)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , 4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, 4)
# with -ve start , +ve end
# end > start
self.assertEqual(s.startswith(("string",None),-6, len(s)), True)
self.assertEqual(s.startswith(("string",None),-6, len(s) + 100), True)
self.assertEqual(s.startswith(("string","nomatch"),-6, len(s) -2), False)
self.assertEqual(s.startswith(("stro","nomatch"),-6, len(s)-1), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,len(s)), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6,len(s) + 3)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6, len(s))
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6,len(s))
self.assertEqual(s.startswith(("here",None),-len(s), 5), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), 2), False)
self.assertEqual(s.startswith(("here",None),-len(s) - 1, 4 ), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, 2 ), False)
# end < start
self.assertRaises(TypeError, s.startswith, ("string",None),-6, 10)
self.assertRaises(TypeError, s.startswith, ("string000",None),-6,10)
self.assertRaises(TypeError, s.startswith, (None, "string"),-6, 10)
self.assertRaises(TypeError, s.startswith, (A, None, "string"),-6,10)
self.assertEqual(s.startswith(("stro","nomatch"),-6, 10), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,10), False)
# end == start
self.assertRaises(TypeError,s.startswith, ("string",None),-6, len(s) -6)
self.assertEqual(s.startswith(("",None),-6, len(s) -6), True)
# with +ve start , -ve end
# end > start
self.assertEqual(s.startswith((m1,None),4,-5 ), True)
self.assertEqual(s.startswith((m1,"nomatch"),4,-(4 + len(m) +1) ), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, -5)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , -5)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, -5)
# end < start
self.assertRaises(TypeError, s.startswith, (m1,None),4,-len(s) + 1)
self.assertRaises(TypeError, s.startswith, (n1,None),4, -len(s))
self.assertRaises(TypeError, s.startswith, (None, n1),4 , -len(s))
self.assertRaises(TypeError, s.startswith, (A, None, m1),4, -len(s))
self.assertEqual(s.startswith((m1,),4,-len(s) + 1), False)
self.assertEqual(s.startswith((m1,),4,-500), False)
# end == start
self.assertEqual(s.startswith(("",None),4,-len(s) + 4), True)
self.assertEqual(s.startswith((m1,"nomatch"),4,-len(s) + 4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, -len(s) + 4)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , -len(s) + 4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, -len(s) + 4)
# with -ve start , -ve end
# end > start
self.assertEqual(s.startswith(("stri",None),-6, -2), True)
self.assertEqual(s.startswith(("string","nomatch"),-6, -1), False)
self.assertEqual(s.startswith(("stro","nomatch"),-6, -1), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,-1), False)
self.assertEqual(s.startswith(("stringand","nomatch"),-6,-1), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6, -1)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6, -1)
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6,-1)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), -5), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), -len(s) + 2), False)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, -5 ), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, -len(s) + 2), False)
# end < start
self.assertRaises(TypeError, s.startswith, ("string",None),-6, -7)
self.assertRaises(TypeError, s.startswith, ("string000",None),-6,-8)
self.assertRaises(TypeError, s.startswith, (None, "string"),-6, -8)
self.assertRaises(TypeError, s.startswith, (A, None, "string"),-6,-8)
self.assertEqual(s.startswith(("stro","nomatch"),-6, -8), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,-8), False)
# end == start
self.assertEqual(s.startswith(("string","nomatch"),-6, -6), False)
self.assertEqual(s.startswith(("",None),-6, -6), True)
def test_string_endswith(self):
#failue scenarios
class A:pass
self.assertRaises(TypeError,'string'.endswith,None)
self.assertRaises(TypeError,'string'.endswith,(None,"tring","ing"))
self.assertRaises(TypeError,'string'.endswith,(None,))
self.assertRaises(TypeError,'string'.endswith,(["this","is","invalid"],"ring","ing"))
self.assertRaises(TypeError,'string'.endswith,(("string","this is invalid","this is also invalid",),))
self.assertRaises(TypeError,''.endswith,None)
self.assertRaises(TypeError,''.endswith,(None,"tring","ring"))
self.assertRaises(TypeError,''.endswith,(None,))
self.assertRaises(TypeError,''.endswith,(["this","is","invalid"],"tring","ring"))
self.assertRaises(TypeError,''.endswith,(("string","this is invalid","this is also invalid",),))
#Positive scenarios
self.assertEqual('no matching string'.endswith(("matching","no","here")),False)
self.assertEqual('here matching string'.endswith(("string", "matching","nomatch")), True)
self.assertEqual('here matching string'.endswith(("string", "matching","here","string")), True)
self.assertEqual('here matching string'.endswith(("matching","here","string",)), True)
self.assertEqual('here matching string'.endswith(("here matching string","here matching string","here matching string",)), True)
s = 'here \12 \34 | |
<filename>multiple-languages/python/ros-cdk-cxapi-1.0.6/src/ros_cdk_cxapi/__init__.py
'''
# `@alicloud/ros-cdk-cxapi`
> TODO: description
## Usage
```
const rosCxapi = require('@alicloud/ros-cdk-cxapi');
// TODO: DEMONSTRATE API
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import ros_cdk_assembly_schema
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-cxapi.AliyunRosStackProperties",
jsii_struct_bases=[],
name_mapping={
"template_file": "templateFile",
"parameters": "parameters",
"stack_name": "stackName",
},
)
class AliyunRosStackProperties:
def __init__(
self,
*,
template_file: builtins.str,
parameters: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
stack_name: typing.Optional[builtins.str] = None,
) -> None:
'''Artifact properties for ROS stacks.
:param template_file: A file relative to the assembly root which contains the ROS template for this stack.
:param parameters: Values for ROS stack parameters that should be passed when the stack is deployed.
:param stack_name: The name to use for the ROS stack. Default: - name derived from artifact ID
'''
self._values: typing.Dict[str, typing.Any] = {
"template_file": template_file,
}
if parameters is not None:
self._values["parameters"] = parameters
if stack_name is not None:
self._values["stack_name"] = stack_name
@builtins.property
def template_file(self) -> builtins.str:
'''A file relative to the assembly root which contains the ROS template for this stack.'''
result = self._values.get("template_file")
assert result is not None, "Required property 'template_file' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def parameters(self) -> typing.Optional[typing.Mapping[builtins.str, builtins.str]]:
'''Values for ROS stack parameters that should be passed when the stack is deployed.'''
result = self._values.get("parameters")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
@builtins.property
def stack_name(self) -> typing.Optional[builtins.str]:
'''The name to use for the ROS stack.
:default: - name derived from artifact ID
'''
result = self._values.get("stack_name")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "AliyunRosStackProperties(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-cxapi.AssemblyBuildOptions",
jsii_struct_bases=[],
name_mapping={"runtime_info": "runtimeInfo"},
)
class AssemblyBuildOptions:
def __init__(self, *, runtime_info: typing.Optional["RuntimeInfo"] = None) -> None:
'''
:param runtime_info: Include the specified runtime information (module versions) in manifest. Default: - if this option is not specified, runtime info will not be included
'''
if isinstance(runtime_info, dict):
runtime_info = RuntimeInfo(**runtime_info)
self._values: typing.Dict[str, typing.Any] = {}
if runtime_info is not None:
self._values["runtime_info"] = runtime_info
@builtins.property
def runtime_info(self) -> typing.Optional["RuntimeInfo"]:
'''Include the specified runtime information (module versions) in manifest.
:default: - if this option is not specified, runtime info will not be included
'''
result = self._values.get("runtime_info")
return typing.cast(typing.Optional["RuntimeInfo"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "AssemblyBuildOptions(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class CloudArtifact(
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-cxapi.CloudArtifact",
):
'''Represents an artifact within a cloud assembly.'''
def __init__(
self,
assembly: "CloudAssembly",
id: builtins.str,
*,
type: ros_cdk_assembly_schema.ArtifactType,
dependencies: typing.Optional[typing.Sequence[builtins.str]] = None,
metadata: typing.Optional[typing.Mapping[builtins.str, typing.Sequence[ros_cdk_assembly_schema.MetadataEntry]]] = None,
properties: typing.Optional[typing.Union[ros_cdk_assembly_schema.AliyunRosStackProperties, ros_cdk_assembly_schema.TreeArtifactProperties, ros_cdk_assembly_schema.NestedCloudAssemblyProperties]] = None,
) -> None:
'''
:param assembly: -
:param id: -
:param type: The type of artifact.
:param dependencies: IDs of artifacts that must be deployed before this artifact. Default: - no dependencies.
:param metadata: Associated metadata. Default: - no metadata.
:param properties: The set of properties for this artifact (depends on type). Default: - no properties.
'''
manifest = ros_cdk_assembly_schema.ArtifactManifest(
type=type,
dependencies=dependencies,
metadata=metadata,
properties=properties,
)
jsii.create(self.__class__, self, [assembly, id, manifest])
@jsii.member(jsii_name="fromManifest") # type: ignore[misc]
@builtins.classmethod
def from_manifest(
cls,
assembly: "CloudAssembly",
id: builtins.str,
*,
type: ros_cdk_assembly_schema.ArtifactType,
dependencies: typing.Optional[typing.Sequence[builtins.str]] = None,
metadata: typing.Optional[typing.Mapping[builtins.str, typing.Sequence[ros_cdk_assembly_schema.MetadataEntry]]] = None,
properties: typing.Optional[typing.Union[ros_cdk_assembly_schema.AliyunRosStackProperties, ros_cdk_assembly_schema.TreeArtifactProperties, ros_cdk_assembly_schema.NestedCloudAssemblyProperties]] = None,
) -> typing.Optional["CloudArtifact"]:
'''Returns a subclass of ``CloudArtifact`` based on the artifact type defined in the artifact manifest.
:param assembly: The cloud assembly from which to load the artifact.
:param id: The artifact ID.
:param type: The type of artifact.
:param dependencies: IDs of artifacts that must be deployed before this artifact. Default: - no dependencies.
:param metadata: Associated metadata. Default: - no metadata.
:param properties: The set of properties for this artifact (depends on type). Default: - no properties.
:return: the ``CloudArtifact`` that matches the artifact type or ``undefined`` if it's an artifact type that is unrecognized by this module.
'''
artifact = ros_cdk_assembly_schema.ArtifactManifest(
type=type,
dependencies=dependencies,
metadata=metadata,
properties=properties,
)
return typing.cast(typing.Optional["CloudArtifact"], jsii.sinvoke(cls, "fromManifest", [assembly, id, artifact]))
@jsii.member(jsii_name="findMetadataByType")
def find_metadata_by_type(
self,
type: builtins.str,
) -> typing.List["MetadataEntryResult"]:
'''
:param type: -
:return: all the metadata entries of a specific type in this artifact.
'''
return typing.cast(typing.List["MetadataEntryResult"], jsii.invoke(self, "findMetadataByType", [type]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="assembly")
def assembly(self) -> "CloudAssembly":
return typing.cast("CloudAssembly", jsii.get(self, "assembly"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="manifest")
def manifest(self) -> ros_cdk_assembly_schema.ArtifactManifest:
'''The artifact's manifest.'''
return typing.cast(ros_cdk_assembly_schema.ArtifactManifest, jsii.get(self, "manifest"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="messages")
def messages(self) -> typing.List["SynthesisMessage"]:
'''The set of messages extracted from the artifact's metadata.'''
return typing.cast(typing.List["SynthesisMessage"], jsii.get(self, "messages"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dependencies")
def dependencies(self) -> typing.Optional[typing.List["CloudArtifact"]]:
'''Returns all the artifacts that this artifact depends on.'''
return typing.cast(typing.Optional[typing.List["CloudArtifact"]], jsii.get(self, "dependencies"))
class CloudAssembly(
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-cxapi.CloudAssembly",
):
'''Represents a deployable cloud application.'''
def __init__(self, directory: builtins.str) -> None:
'''Reads a cloud assembly from the specified directory.
:param directory: The root directory of the assembly.
'''
jsii.create(self.__class__, self, [directory])
@jsii.member(jsii_name="getNestedAssembly")
def get_nested_assembly(self, artifact_id: builtins.str) -> "CloudAssembly":
'''Returns a nested assembly.
:param artifact_id: The artifact ID of the nested assembly.
'''
return typing.cast("CloudAssembly", jsii.invoke(self, "getNestedAssembly", [artifact_id]))
@jsii.member(jsii_name="getNestedAssemblyArtifact")
def get_nested_assembly_artifact(
self,
artifact_id: builtins.str,
) -> "NestedCloudAssemblyArtifact":
'''Returns a nested assembly artifact.
:param artifact_id: The artifact ID of the nested assembly.
'''
return typing.cast("NestedCloudAssemblyArtifact", jsii.invoke(self, "getNestedAssemblyArtifact", [artifact_id]))
@jsii.member(jsii_name="getStack")
def get_stack(self, stack_name: builtins.str) -> "RosStackArtifact":
'''Returns a ROS stack artifact by name from this assembly.
Deprecated renamed to 'getStackByName' (or 'getStackArtifact(id)')
:param stack_name: -
'''
return typing.cast("RosStackArtifact", jsii.invoke(self, "getStack", [stack_name]))
@jsii.member(jsii_name="getStackArtifact")
def get_stack_artifact(self, artifact_id: builtins.str) -> "RosStackArtifact":
'''Returns a ROS stack artifact from this assembly.
Param artifactId the artifact id of the stack (can be obtained through 'stack.artifactId').
Throws if there is no stack artifact with that id
Returns a 'RosStackArtifact' object.
:param artifact_id: -
'''
return typing.cast("RosStackArtifact", jsii.invoke(self, "getStackArtifact", [artifact_id]))
@jsii.member(jsii_name="getStackByName")
def get_stack_by_name(self, stack_name: builtins.str) -> "RosStackArtifact":
'''Returns a ROS stack artifact from this assembly.
Will only search the current assembly.
Param stackName the name of the ROS stack.
Throws if there is no stack artifact by that name
Throws if there is more than one stack with the same stack name. You can
use 'getStackArtifact - stack.artifactId' instead.
Returns a 'RosStackArtifact' object.
:param stack_name: -
'''
return typing.cast("RosStackArtifact", jsii.invoke(self, "getStackByName", [stack_name]))
@jsii.member(jsii_name="tree")
def tree(self) -> typing.Optional["TreeCloudArtifact"]:
'''Returns the tree metadata artifact from this assembly.
Throws if there is no metadata artifact by that name
Returns a 'TreeCloudArtifact' object if there is one defined in the manifest, 'undefined' otherwise.
'''
return typing.cast(typing.Optional["TreeCloudArtifact"], jsii.invoke(self, "tree", []))
@jsii.member(jsii_name="tryGetArtifact")
def try_get_artifact(self, id: builtins.str) -> typing.Optional[CloudArtifact]:
'''Attempts to find an artifact with a specific identity.
Returns A 'CloudArtifact' object or 'undefined' if the artifact does not exist in this assembly.
Param id The artifact ID
:param id: -
'''
return typing.cast(typing.Optional[CloudArtifact], jsii.invoke(self, "tryGetArtifact", [id]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="artifacts")
def artifacts(self) -> typing.List[CloudArtifact]:
'''All artifacts included in this assembly.'''
return typing.cast(typing.List[CloudArtifact], jsii.get(self, "artifacts"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="directory")
def directory(self) -> builtins.str:
'''The root directory of the cloud assembly.'''
return typing.cast(builtins.str, jsii.get(self, "directory"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="manifest")
def manifest(self) -> ros_cdk_assembly_schema.AssemblyManifest:
'''The raw assembly manifest.'''
return typing.cast(ros_cdk_assembly_schema.AssemblyManifest, jsii.get(self, "manifest"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="runtime")
def runtime(self) -> ros_cdk_assembly_schema.RuntimeInfo:
'''Runtime information such as module versions used to synthesize this assembly.'''
return typing.cast(ros_cdk_assembly_schema.RuntimeInfo, jsii.get(self, "runtime"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stacks")
def stacks(self) -> typing.List["RosStackArtifact"]:
'''
:return: all the ROS stack artifacts that are included in this assembly.
'''
return typing.cast(typing.List["RosStackArtifact"], jsii.get(self, "stacks"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
'''The schema version of the assembly manifest.'''
return typing.cast(builtins.str, jsii.get(self, "version"))
class CloudAssemblyBuilder(
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-cxapi.CloudAssemblyBuilder",
):
'''Can be used to build a cloud assembly.'''
def __init__(self, outdir: typing.Optional[builtins.str] = None) -> None:
'''Initializes a cloud assembly builder.
:param outdir: The output directory, uses temporary directory if undefined.
'''
jsii.create(self.__class__, self, [outdir])
@jsii.member(jsii_name="addArtifact")
def add_artifact(
self,
id: builtins.str,
*,
type: ros_cdk_assembly_schema.ArtifactType,
| |
= ANTLRv4Parser.SetElementRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 559
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 561
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 560
self.elementOptions()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.SetElementLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 563
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 565
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 564
self.elementOptions()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.SetElementRangeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 567
self.characterRange()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.SetElementCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 568
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def altList(self):
return self.getTypedRuleContext(ANTLRv4Parser.AltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleActionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = ANTLRv4Parser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 571
self.match(ANTLRv4Parser.LPAREN)
self.state = 582
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.COLON) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 573
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.OPTIONS:
self.state = 572
self.optionsSpec()
self.state = 578
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.AT:
self.state = 575
self.ruleAction()
self.state = 580
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 581
self.match(ANTLRv4Parser.COLON)
self.state = 584
self.altList()
self.state = 585
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulerefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # Token
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleref
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleref" ):
listener.enterRuleref(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleref" ):
listener.exitRuleref(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleref" ):
return visitor.visitRuleref(self)
else:
return visitor.visitChildren(self)
def ruleref(self):
localctx = ANTLRv4Parser.RulerefContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_ruleref)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 587
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
self.state = 589
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 588
self.argActionBlock()
self.state = 592
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 591
self.elementOptions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacterRangeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.start = None # Token
self.end = None # Token
def RANGE(self):
return self.getToken(ANTLRv4Parser.RANGE, 0)
def STRING_LITERAL(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.STRING_LITERAL)
else:
return self.getToken(ANTLRv4Parser.STRING_LITERAL, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_characterRange
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacterRange" ):
listener.enterCharacterRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacterRange" ):
listener.exitCharacterRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCharacterRange" ):
return visitor.visitCharacterRange(self)
else:
return visitor.visitChildren(self)
def characterRange(self):
localctx = ANTLRv4Parser.CharacterRangeContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_characterRange)
try:
self.enterOuterAlt(localctx, 1)
self.state = 594
localctx.start = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 595
self.match(ANTLRv4Parser.RANGE)
self.state = 596
localctx.end = self.match(ANTLRv4Parser.STRING_LITERAL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_terminal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TerminalRefContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalRef" ):
listener.enterTerminalRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalRef" ):
listener.exitTerminalRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalRef" ):
return visitor.visitTerminalRef(self)
else:
return visitor.visitChildren(self)
class TerminalLitContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalLit" ):
listener.enterTerminalLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalLit" ):
listener.exitTerminalLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalLit" ):
return visitor.visitTerminalLit(self)
else:
return visitor.visitChildren(self)
def terminal(self):
localctx = ANTLRv4Parser.TerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_terminal)
self._la = 0 # Token type
try:
self.state = 606
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TerminalRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 598
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 600
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 599
self.elementOptions()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.TerminalLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 602
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 604
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 603
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LT(self):
return self.getToken(ANTLRv4Parser.LT, 0)
def elementOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementOptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionContext,i)
def GT(self):
return self.getToken(ANTLRv4Parser.GT, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOptions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOptions" ):
listener.enterElementOptions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOptions" ):
listener.exitElementOptions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOptions" ):
return visitor.visitElementOptions(self)
else:
return visitor.visitChildren(self)
def elementOptions(self):
localctx = ANTLRv4Parser.ElementOptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_elementOptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 608
self.match(ANTLRv4Parser.LT)
self.state = 609
self.elementOption()
self.state = 614
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 610
self.match(ANTLRv4Parser.COMMA)
self.state = 611
self.elementOption()
self.state = 616
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 617
self.match(ANTLRv4Parser.GT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOption
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOption" ):
listener.enterElementOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOption" ):
listener.exitElementOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOption" ):
return visitor.visitElementOption(self)
else:
return visitor.visitChildren(self)
def elementOption(self):
localctx = ANTLRv4Parser.ElementOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_elementOption)
try:
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 619
self.identifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 620
self.identifier()
self.state = 621
self.match(ANTLRv4Parser.ASSIGN)
self.state = 624
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.state = 622
self.identifier()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
self.state = 623
self.match(ANTLRv4Parser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RuleRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleRefIdentifier" ):
listener.enterRuleRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleRefIdentifier" ):
listener.exitRuleRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleRefIdentifier" ):
return visitor.visitRuleRefIdentifier(self)
else:
return visitor.visitChildren(self)
class TokenRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokenRefIdentifier" ):
listener.enterTokenRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokenRefIdentifier" ):
listener.exitTokenRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokenRefIdentifier" ):
return visitor.visitTokenRefIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = ANTLRv4Parser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_identifier)
try:
self.state = 630
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.RuleRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 628
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
pass
elif token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TokenRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 629
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
| |
imp.cache_from_source to have
# their own cached file format, this block of code will most likely
# throw an exception.
data = bytearray(imp.get_magic())
data.extend(marshal._w_long(source_mtime))
data.extend(marshal.dumps(code_object))
try:
self.set_data(bytecode_path, data)
except NotImplementedError:
pass
return code_object
def load_module(self, fullname):
"""Concrete implementation of Loader.load_module.
Requires ExecutionLoader.get_filename and ResourceLoader.get_data to be
implemented to load source code. Use of bytecode is dictated by whether
get_code uses/writes bytecode.
"""
return self._load_module(fullname)
class _FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self._name = fullname
self._path = path
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self._path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class _SourceFileLoader(_FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_mtime(self, path):
"""Return the modification time for the path."""
return int(_os.stat(path).st_mtime)
def set_data(self, path, data):
"""Write bytes data to a file."""
parent, _, filename = path.rpartition(path_sep)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, _, part = parent.rpartition(path_sep)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except OSError as exc:
# Probably another Python process already created the dir.
if exc.errno == errno.EEXIST:
continue
else:
raise
except IOError as exc:
# If can't get proper access, then just forget about writing
# the data.
if exc.errno == errno.EACCES:
return
else:
raise
try:
with _io.FileIO(path, 'wb') as file:
file.write(data)
except IOError as exc:
# Don't worry if you can't write bytecode.
if exc.errno == errno.EACCES:
return
else:
raise
class _SourcelessFileLoader(_FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def load_module(self, fullname):
return self._load_module(fullname, sourceless=True)
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = self._bytes_from_bytecode(fullname, data, None)
found = marshal.loads(bytes_data)
if isinstance(found, code_type):
return found
else:
raise ImportError("Non-code object in {}".format(path))
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
class _ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
"""Initialize the loader.
If is_pkg is True then an exception is raised as extension modules
cannot be the __init__ module for an extension module.
"""
self._name = name
self._path = path
@_check_name
@set_package
@set_loader
def load_module(self, fullname):
"""Load an extension module."""
is_reload = fullname in sys.modules
try:
return imp.load_dynamic(fullname, self._path)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@_check_name
def is_package(self, fullname):
"""Return False as an extension module can never be a package."""
return False
@_check_name
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
@_check_name
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.(path|path_hooks|path_importer_cache)."""
@classmethod
def _path_hooks(cls, path, hooks=None):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not hooks:
hooks = sys.path_hooks
for hook in hooks:
try:
return hook(path)
except ImportError:
continue
else:
raise ImportError("no path hook found for {0}".format(path))
@classmethod
def _path_importer_cache(cls, path, default=None):
"""Get the finder for the path from sys.path_importer_cache.
If the path is not in the cache, find the appropriate finder and cache
it. If None is cached, get the default finder and cache that
(if applicable).
Because of NullImporter, some finder should be returned. The only
explicit fail case is if None is cached but the path cannot be used for
the default hook, for which ImportError is raised.
"""
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
else:
if finder is None and default:
# Raises ImportError on failure.
finder = default(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def find_module(cls, fullname, path=None):
"""Find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if not path:
path = sys.path
for entry in path:
try:
finder = cls._path_importer_cache(entry)
except ImportError:
continue
if finder:
loader = finder.find_module(fullname)
if loader:
return loader
else:
return None
class _FileFinder:
"""File-based finder.
Constructor takes a list of objects detailing what file extensions their
loader supports along with whether it can be used for a package.
"""
def __init__(self, path, *details):
"""Initialize with finder details."""
packages = []
modules = []
for detail in details:
modules.extend((suffix, detail.loader) for suffix in detail.suffixes)
if detail.supports_packages:
packages.extend((suffix, detail.loader)
for suffix in detail.suffixes)
self.packages = packages
self.modules = modules
self.path = path
def find_module(self, fullname):
"""Try to find a loader for the specified module."""
tail_module = fullname.rpartition('.')[2]
base_path = _path_join(self.path, tail_module)
if _path_isdir(base_path) and _case_ok(self.path, tail_module):
for suffix, loader in self.packages:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if (_path_isfile(full_path) and
_case_ok(base_path, init_filename)):
return loader(fullname, full_path)
else:
msg = "Not importing directory {}: missing __init__"
_warnings.warn(msg.format(base_path), ImportWarning)
for suffix, loader in self.modules:
mod_filename = tail_module + suffix
full_path = _path_join(self.path, mod_filename)
if _path_isfile(full_path) and _case_ok(self.path, mod_filename):
return loader(fullname, full_path)
return None
class _SourceFinderDetails:
loader = _SourceFileLoader
supports_packages = True
def __init__(self):
self.suffixes = _suffix_list(imp.PY_SOURCE)
class _SourcelessFinderDetails:
loader = _SourcelessFileLoader
supports_packages = True
def __init__(self):
self.suffixes = _suffix_list(imp.PY_COMPILED)
class _ExtensionFinderDetails:
loader = _ExtensionFileLoader
supports_packages = False
def __init__(self):
self.suffixes = _suffix_list(imp.C_EXTENSION)
# Import itself ###############################################################
def _file_path_hook(path):
"""If the path is a directory, return a file-based finder."""
if _path_isdir(path):
return _FileFinder(path, _ExtensionFinderDetails(),
_SourceFinderDetails(),
_SourcelessFinderDetails())
else:
raise ImportError("only directories are supported")
_DEFAULT_PATH_HOOK = _file_path_hook
class _DefaultPathFinder(PathFinder):
"""Subclass of PathFinder that implements implicit semantics for
__import__."""
@classmethod
def _path_hooks(cls, path):
"""Search sys.path_hooks as well as implicit path hooks."""
try:
return super()._path_hooks(path)
except ImportError:
implicit_hooks = [_DEFAULT_PATH_HOOK, imp.NullImporter]
return super()._path_hooks(path, implicit_hooks)
@classmethod
def _path_importer_cache(cls, path):
"""Use the default path hook when None is stored in
sys.path_importer_cache."""
return super()._path_importer_cache(path, _DEFAULT_PATH_HOOK)
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
imp.release_lock()
_IMPLICIT_META_PATH = [BuiltinImporter, FrozenImporter, _DefaultPathFinder]
_ERR_MSG = 'No module named {}'
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes settting __package__ if
the loader did not.
"""
if package:
if not hasattr(package, 'rindex'):
raise ValueError("__package__ not set to a string")
elif package not in sys.modules:
msg = ("Parent module {0!r} not loaded, cannot perform relative "
"import")
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError("Empty module name")
if level > 0:
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond "
"top-level package")
if name:
name = "{0}.{1}".format(package[:dot], name)
else:
name = package[:dot]
with _ImportLockContext():
try:
module = sys.modules[name]
if module is None:
message = ("import of {} halted; "
"None in sys.modules".format(name))
raise ImportError(message)
return module
except KeyError:
pass
parent = name.rpartition('.')[0]
path = None
if parent:
if parent not in sys.modules:
_gcd_import(parent)
# Backwards-compatibility; be nicer to skip the dict lookup.
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {} is not a package').format(name, parent)
raise ImportError(msg)
meta_path = sys.meta_path + _IMPLICIT_META_PATH
for finder in meta_path:
loader = finder.find_module(name, path)
if loader is not None:
# The parent import may have already imported this module.
if name not in sys.modules:
loader.load_module(name)
break
else:
raise ImportError(_ERR_MSG.format(name))
# Backwards-compatibility; be nicer to skip the dict lookup.
module = sys.modules[name]
if parent:
# Set the module as an attribute on its parent.
setattr(parent_module, name.rpartition('.')[2], module)
# Set __package__ if the loader did not.
if not hasattr(module, '__package__') or module.__package__ is None:
# Watch out for what comes out of sys.modules to not be a module,
# e.g. an int.
try:
module.__package__ = | |
{
0x01 : "Intruder", 0x02 : "Intruder", 0x03 : "Intruder", 0x04 : "Intruder", 0x05 : "Intruder", 0x06 : "Tamper",
0x07 : "Tamper", 0x08 : "Tamper", 0x09 : "Tamper", 0x0B : "Panic", 0x0C : "Panic", 0x20 : "Fire",
0x23 : "Emergency", 0x49 : "Gas", 0x4D : "Flood"
}
pmPanelTroubleType_t = {
0x0A : "Communication", 0x0F : "General", 0x29 : "Battery", 0x2B: "Power", 0x2D : "Battery", 0x2F : "Jamming",
0x31 : "Communication", 0x33 : "Telephone", 0x36 : "Power", 0x38 : "Battery", 0x3B : "Battery", 0x3C : "Battery",
0x40 : "Battery", 0x43 : "Battery"
}
pmPanelType_t = {
0 : "PowerMax", 1 : "PowerMax+", 2 : "PowerMax Pro", 3 : "PowerMax Complete", 4 : "PowerMax Pro Part",
5 : "PowerMax Complete Part", 6 : "PowerMax Express", 7 : "PowerMaster10", 8 : "PowerMaster30"
}
# Config for each panel type (1-9)
pmPanelConfig_t = {
"CFG_PARTITIONS" : ( 1, 1, 1, 1, 3, 3, 1, 3, 3 ),
"CFG_EVENTS" : ( 250, 250, 250, 250, 250, 250, 250, 250,1000 ),
"CFG_KEYFOBS" : ( 8, 8, 8, 8, 8, 8, 8, 8, 32 ),
"CFG_1WKEYPADS" : ( 8, 8, 8, 8, 8, 8, 8, 0, 0 ),
"CFG_2WKEYPADS" : ( 2, 2, 2, 2, 2, 2, 2, 8, 32 ),
"CFG_SIRENS" : ( 2, 2, 2, 2, 2, 2, 2, 4, 8 ),
"CFG_USERCODES" : ( 8, 8, 8, 8, 8, 8, 8, 8, 48 ),
"CFG_PROXTAGS" : ( 0, 0, 8, 0, 8, 8, 0, 8, 32 ),
"CFG_WIRELESS" : ( 28, 28, 28, 28, 28, 28, 28, 29, 62 ), # 30, 64
"CFG_WIRED" : ( 2, 2, 2, 2, 2, 2, 1, 1, 2 ),
"CFG_ZONECUSTOM" : ( 0, 5, 5, 5, 5, 5, 5, 5, 5 )
}
# PMAX EEPROM CONFIGURATION version 1_2
SettingsCommand = collections.namedtuple('SettingsCommand', 'show count type size poff psize pstep pbitoff name values')
DecodePanelSettings = {
# USER SETTINGS
"usePhoneNrs" : SettingsCommand( False, 4, "PHONE", 64, 310, 64, 8, -1, ["1st Private Tel. No.","2nd Private Tel. No.","3rd Private Tel. No.","4th Private Tel. No."], {} ), # 310, 318, 326, 334
"usrVoice" : SettingsCommand( True, 1, "BYTE", 8, 763, 8, 0, -1, "Set Voice Option", { '0':"Disable Voice", '1':"Enable Voice"} ),
"usrArmOption" : SettingsCommand( True, 1, "BYTE", 8, 280, 1, 0, 5, "Auto Arm Option", { '1':"Enable", '0':"Disable"} ),
"usrArmTime" : SettingsCommand( True, 1, "TIME", 16, 765, 16, 0, -1, "Auto Arm Time", { }),
"usrSquawk" : SettingsCommand( True, 1, "BYTE", 8, 764, 8, 0, -1, "Squawk Option", { '0':"Disable", '1':"Low Level", '2':"Medium Level", '3':"High Level"}),
"usrTimeFormat" : SettingsCommand( True, 1, "BYTE", 8, 281, 1, 0, 1, "Time Format", { '0':"USA - 12H", '1':"Europe - 24H"}),
"usrDateFormat" : SettingsCommand( True, 1, "BYTE", 8, 281, 1, 0, 2, "Date Format", { '0':"USA MM/DD/YYYY", '1':"Europe DD/MM/YYYY"}),
# PANEL DEFINITION
"entryDelays" : SettingsCommand( True, 2, "BYTE", 8, 257, 8, 1, 2, ["Entry Delay 1","Entry Delay 2"], {'0':"None", '15':"15 Seconds", '30':"30 Seconds", '45':"45 Seconds", '60':"1 Minute", '180':"3 Minutes", '240':"4 Minutes"}), # 257, 258
"exitDelay" : SettingsCommand( True, 1, "BYTE", 8, 259, 8, 0, -1, "Exit Delay", { '30':"30 Seconds", '60':"60 Seconds", '90':"90 Seconds", '120':"2 Minutes", '180':"3 Minutes", '240':"4 Minutes"}),
"bellTime" : SettingsCommand( True, 1, "BYTE", 8, 260, 8, 0, -1, "Bell Time", { '1':"1 Minute", '3':"3 Minutes", '4':"4 Minutes", '8':"8 Minutes", '10':"10 Minutes", '15':"15 Minutes", '20':"20 Minutes"}),
"abortTime" : SettingsCommand( True, 1, "BYTE", 8, 267, 8, 0, -1, "Abort Time", { '0':"None", '15':"15 Seconds", '30':"30 Seconds", '45':"45 Seconds", '60':"1 Minute", '120':"2 Minutes", '180':"3 Minutes", '240':"4 Minutes"} ),
"cancelTime" : SettingsCommand( True, 1, "BYTE", 8, 266, 8, 0, -1, "Alarm Cancel Time", { '1':"1 Minute", '5':"5 Minutes", '15':"15 Minutes", '60':"60 Minutes", '240':"4 Hours", '0':"Inactive"}),
"quickArm" : SettingsCommand( True, 1, "BYTE", 8, 283, 1, 0, 3, "Quick Arm", { '1':"On", '0':"Off"} ),
"bypass" : SettingsCommand( True, 1, "BYTE", 8, 284, 2, 0, 6, "Bypass", { '2':"Manual Bypass", '0':"No Bypass", '1':"Force Arm"} ),
"exitMode" : SettingsCommand( True, 1, "BYTE", 8, 282, 2, 0, 6, "Exit Mode", { '1':"Restart Exit", '2':"Off by Door", '0':"Normal"} ),
"piezoBeeps" : SettingsCommand( True, 1, "BYTE", 8, 261, 8, 0, -1, "Piezo Beeps", { '2':"Enable", '1':"Off when Home", '0':"Disable"} ),
"troubleBeeps" : SettingsCommand( True, 1, "BYTE", 8, 284, 2, 0, 1, "Trouble Beeps", { '3':"Enable", '1':"Off at Night", '0':"Disable"} ),
"panicAlarm" : SettingsCommand( True, 1, "BYTE", 8, 282, 2, 0, 4, "Panic Alarm", { '1':"Silent Panic", '2':"Audible Panic", '0':"Disable Panic"} ),
"swingerStop" : SettingsCommand( True, 1, "BYTE", 8, 262, 8, 0, -1, "Swinger Stop", { '1':"After 1 Time", '2':"After 2 Times", '3':"After 3 Times", '0':"No Shutdown"} ),
"crossZoning" : SettingsCommand( True, 1, "BYTE", 8, 284, 1, 0, 0, "Cross Zoning", { '1':"On", '0':"Off"} ),
"supervision" : SettingsCommand( True, 1, "BYTE", 8, 264, 8, 0, -1, "Supevision Interval", { '1':"1 Hour", '2':"2 Hours", '4':"4 Hours", '8':"8 Hours", '12':"12 Hours", '0':"Disable"} ),
"notReady" : SettingsCommand( True, 1, "BYTE", 8, 281, 1, 0, 4, "Not Ready", { '0':"Normal", '1':"In Supervision"} ),
"fobAux" : SettingsCommand( True, 2, "BYTE", 8, 263, 8, 14, -1, ["Auxiliary Keyfob Button function 1","Auxiliary Keyfob Button function 2"], { '1':"System Status", '2':"Instant Arm", '3':"Cancel Exit Delay", '4':"PGM/X-10"} ), # 263, 277
"jamDetect" : SettingsCommand( True, 1, "BYTE", 8, 256, 8, 0, -1, "Jamming Detection", { '1':"UL 20/20", '2':"EN 30/60", '3':"Class 6", '4':"Other", '0':"Disable"} ),
"latchKey" : SettingsCommand( True, 1, "BYTE", 8, 283, 1, 0, 7, "Latchkey Arming", { '1':"On", '0':"Off"} ),
"noActivity" : SettingsCommand( True, 1, "BYTE", 8, 265, 8, 0, -1, "No Activity Time", { '3':"3 Hours", '6':"6 Hours",'12':"12 Hours", '24':"24 Hours", '48':"48 Hours", '72':"72 Hours", '0':"Disable"} ),
"backLight" : SettingsCommand( True, 1, "BYTE", 8, 283, 1, 0, 5, "Back Light Time", { '1':"Allways On", '0':"Off After 10 Seconds"} ),
"duress" : SettingsCommand( True, 1, "CODE", 16, 273, 16, 0, -1, "Duress", { } ),
"piezoSiren" : SettingsCommand( True, 1, "BYTE", 8, 284, 1, 0, 5, "Piezo Siren", { '1':"On", '0':"Off"} ),
"resetOption" : SettingsCommand( True, 1, "BYTE", 8, 270, 8, 0, -1, "Reset Option", { '1':"Engineer Reset", '0':"User Reset"} ),
"tamperOption" : SettingsCommand( True, 1, "BYTE", 8, 280, 1, 0, 1, "Tamper Option", { '1':"On", '0':"Off"} ),
"sirenOnLine" : SettingsCommand( True, 1, "BYTE", 8, 282, 1, 0, 1, "Siren On Line", { '1':"Enable on Fail", '0':"Disable on Fail"} ),
"memoryPrompt" : SettingsCommand( True, 1, "BYTE", 8, 281, 1, 0, 0, "Memory Prompt", { '1':"Enable", '0':"Disable" } ),
"disarmOption" : SettingsCommand( True, 1, "BYTE", 8, 281, 2, 0, 6, "Disarm Option", { '0':"Any Time", '1':"On Entry All", '2':"On Entry Wireless", '3':"Entry + Away KP"} ),
"bellReport" : SettingsCommand( True, 1, "BYTE", 8, 283, 1, 0, 0, "Bell Report Option", { '1':"EN Standard", '0':"Others"} ),
"lowBattery" : SettingsCommand( True, 1, "BYTE", 8, 281, 1, 0, 3, "Low Battery Acknowledge", { '1':"On", '0':"Off"} ),
"screenSaver" : SettingsCommand( True, 1, "BYTE", 8, 269, 8, 0, -1, "Screen Saver", { '2':"Reset By Key", '1':"Reset By Code", '0':"Off"} ),
"confirmAlarm" : SettingsCommand( True, 1, "BYTE", 8, 268, 8, 0, -1, "Confirm Alarm Timer", { '0':"None", '30':"30 Minutes", '45':"45 Minutes", '60':"60 Minutes", '90':"90 Minutes"} ),
"acFailure" : SettingsCommand( True, 1, "BYTE", 8, 275, 8, 0, -1, "AC Failure Report", { '0':"None", '5':"5 Minutes", '30':"30 Minutes", '60':"60 Minutes", '180':"180 Minutes"} ),
"userPermit" : SettingsCommand( True, 1, "BYTE", 8, 276, 8, 0, -1, "User Permit", { '1':"Enable", '0':"Disable"} ),
# COMMUNICATION SETTINGS
"autotestTime" : SettingsCommand( True, 1, "TIME", 16, 367, 16, 0, -1, "Autotest Time", {} ),
"autotestCycle" : SettingsCommand( True, 1, "BYTE", 8, 369, 8, 0, -1, "Autotest Cycle", { '1':"1 Day", '4':"5 Days", '2':"7 Days", '3':"30 Days", '0':"Disable"} ),
"areaCode" : SettingsCommand( False, 1, "CODE", 24, 371, 24, 0, -1, "Area Code", {} ),
"outAccessNr" : SettingsCommand( False, 1, "CODE", 8, 374, 8, 0, -1, "Out Access Number", {} ),
"centralStation" : SettingsCommand( True, 2, "PHONE", 64, 288, 64, 11, -1, ["1st Central Station (CNTR) Tel. No.", "2nd Central Station (CNTR) Tel. No."], {} ), | |
<filename>exploretransform/_exploretransform.py
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import plotnine as pn
from scipy.stats import skew
from pandas.api.types import CategoricalDtype
from sklearn.base import BaseEstimator, TransformerMixin
from minepy import MINE
from scipy import stats
from dcor import distance_correlation
def nested(obj, retloc = False):
'''
----------
Parameters
----------
obj: a list, series, or dataframe
retloc: True or False
Returns
-------
retloc = True
Returns locations of nested objects: For dataframes, it returns tuples
For other objects it returns a list of indicies
retloc = False
Returns True if any nested objects reside in passed object, False otherwise
Example
-------
a = pd.DataFrame({'first' : [1, 2, 3, (1,2,3), 4, 5, 6],
'second': [2, 4, 5, [1,3,4], 6, 7, 8]}
, columns = ['first', 'second'])
nested(a, locs = True)
[(3, 0), (3, 1)]
nested(a)
Out[59]: False
----------
'''
# object types
otypes = (list, pd.core.series.Series, pd.core.frame.DataFrame)
# store locations of nested items
locs = list()
if isinstance(obj, otypes): pass
else: return "Function only accepts: List, Series, or Dataframe"
# nested types
ntypes = (list, tuple, set, np.ndarray,
pd.core.indexes.base.Index,
pd.core.series.Series,
pd.core.frame.DataFrame)
# dataframes
if isinstance(obj, (pd.core.frame.DataFrame)):
for row in range(len(obj)):
for col in range(len(obj.columns)):
if isinstance(obj.iloc[row,col], ntypes):
locs.append((row,col))
else: #other types
for i in range(len(obj)):
if isinstance(obj[i], ntypes):
locs.append(i)
if retloc: return locs
else: return len(locs) > 0
def loadboston():
'''
----------
Parameters
----------
None
Returns
-------
Boston corrected data objects:
1. df X and y dataframe
2. X predictors dataframe
3. y target series
-------
'''
source = 'https://raw.githubusercontent.com/bxp151/exploretransform/master/data/boston_corrected.txt'
df = pd.read_table(source, skiprows= 9)
df.columns = map(str.lower, df.columns)
df = df.drop( ['obs.', 'town#', 'medv', 'tract'],axis = 1)
df['chas'] = df['chas'].astype('category')
# Modify rad as ordinal
r = pd.Series(range(df['rad'].min(), df['rad'].max() + 1))
rad_cat = CategoricalDtype(categories=list(r), ordered=True)
df['rad'] = df['rad'].astype(rad_cat)
x = df.drop('cmedv', axis = 1)
y = df['cmedv']
return df, x, y
def explore(X):
'''
----------
Parameters
----------
X: dataframe to analyze
Returns
-------
Dataframe with statistics for each variable:
variable name of column
obs number of observations
q_zer number of zeros
p_zer percent zeros
q_na number of missing
p_na percent missing
q_inf quanitity of infinity
p_inf percent infinity
dtype Python dtype
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.explore(df.iloc[:,0:5])
variable obs q_zer p_zer q_na p_na q_inf p_inf dtype
0 town 506 0 0.00 0 0.0 0 0.0 object
1 lon 506 0 0.00 0 0.0 0 0.0 float64
2 lat 506 0 0.00 0 0.0 0 0.0 float64
3 crim 506 0 0.00 0 0.0 0 0.0 float64
4 zn 506 372 73.52 0 0.0 0 0.0 float64
----------
'''
# Input Checks
if isinstance(X, (pd.core.frame.DataFrame)):
if nested(X):
return "Please collapse any nested values in your dataframe"
else:
pass
else:
return "Function only accetps dataframes"
# counts zeros for numeric dtype and returns zero for others
def cntzero(series):
if is_numeric_dtype(series): return sum(series == 0)
else:return 0
# counts inf values for numeric dtype and returns zero for others
def cntinf(series):
if is_numeric_dtype(series): return sum(np.isinf(series))
else: return 0
df = pd.DataFrame({'variable': X.columns})
df['obs'] = len(X)
df['q_zer'] = X.apply(cntzero, axis = 0).values
df['p_zer'] = round(df['q_zer'] / len(X) * 100, 2)
df['q_na'] = X.isna().sum().values
df['p_na'] = round(df['q_na'] / len(X) * 100, 2)
df['q_inf'] = X.apply(cntinf, axis = 0).values
df['p_inf'] = round(df['q_inf'] / len(X) * 100, 2)
df['dtype'] = X.dtypes.to_frame('dtypes').reset_index()['dtypes']
return df
def peek(X):
'''
----------
Parameters
----------
X: dataframe to peek into
Returns
-------
Columns based on passed dataframe:
variable name of variable
dtype Python dtype
lvls unique values of variable
obs number of observations
head first five observations
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.peek(df.iloc[:,0:5])
variable dtype ... obs head
0 town object ... 506 [Nahant, Swampscott, Swampscott, Marblehead, M...
1 lon float64 ... 506 [-70.955, -70.95, -70.936, -70.928, -70.922]
2 lat float64 ... 506 [42.255, 42.2875, 42.283, 42.293, 42.298]
3 crim float64 ... 506 [0.00632, 0.02731, 0.02729, 0.0323699999999999...
4 zn float64 ... 506 [18.0, 0.0, 0.0, 0.0, 0.0]
----------
'''
# Input Checks
if isinstance(X, (pd.core.frame.DataFrame)): pass
else: return "Function only accetps dataframes"
if nested(X): return "Please collapse any nested values in your dataframe"
g = pd.DataFrame({'variable': X.columns,
'dtype': X.dtypes.to_frame('dtypes').reset_index()['dtypes']},
index=(range(0,len(X.columns))))
g['lvls'] = X.nunique().values
g['obs'] = len(X)
g['head'] = ''
# get the first 5 items for each variable
# transpose the data frame and store the values
x = X.apply((pd.DataFrame.head), axis = 0 ).T.values
for i in range(0, len(x)):
g.at[i,'head'] = x[i]
return g
def plotfreq(freqdf):
'''
----------
Parameters
----------
freqdf dataframe generated by freq()
Returns
-------
Bar chart with frequencies & percentages in descending order
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.plotfreq(et.freq(X['town']))
Warning
-------
This function will likely not plot more than 100 unique levels properly.
----------
'''
# input checks
if isinstance(freqdf, (pd.core.frame.DataFrame)): pass
else: return print("\nFunction only accetps dataframes\n")
if len(freqdf.columns) == 4: pass
else: return print("\nInput must be a dataframe generated by freq()\n")
if sum(freqdf.columns[1:4] == ['freq', 'perc', 'cump']) == 3: pass
else: return print("\nInput must be a dataframe generated by freq()\n")
if len(freqdf) < 101: pass
else: return print("\nUnable to plot more than 100 items")
# label for plot
lbl = freqdf['freq'].astype(str).str.cat('[ ' + freqdf['perc'].astype(str) + '%' + ' ]'
, sep = ' ')
# create variable to be used in aes
aesx = 'reorder(' + freqdf.columns[0] + ', freq)'
# build plot
plot = (
pn.ggplot(freqdf) +
pn.aes(x = aesx,
y = 'freq',
fill = 'freq',
label = lbl) +
pn.geom_bar(stat = 'identity') +
pn.coord_flip() +
pn.theme(axis_text_y = pn.element_text(size=6, weight = 'bold'),
legend_position = 'none') +
pn.labs(x=freqdf.columns[0], y="Freq") +
pn.scale_fill_gradient2(mid='bisque', high='blue') +
pn.geom_text(size = 6,
nudge_y = .7)
)
return plot
def freq(srs):
'''
----------
Parameters
----------
srs: series to analyze
Returns
-------
Dataframe with the following columns:
<name> The unique values of the series
freq Count of each level
perc Percent each level contributes
cump Cumulative percent
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.freq(X['town'])
town freq perc cump
0 Cambridge 30 5.93 5.93
1 <NAME> 23 4.55 10.47
2 Lynn 22 4.35 14.82
3 Boston Roxbury 19 3.75 18.58
4 Newton 18 3.56 22.13
.. ... ... ... ...
87 Topsfield 1 0.20 99.21
88 Manchester 1 0.20 99.41
89 Dover 1 0.20 99.60
90 Hanover 1 0.20 99.80
91 Lincoln 1 0.20 100.00
----------
'''
# input checks
if isinstance(srs, (pd.core.series.Series)): pass
else: return "Function only accetps series"
# Create frequency dataframe
cnts = srs.value_counts()
perc = round(cnts / sum(cnts.values) * 100, 2)
cump = round(100 * (cnts.cumsum() / cnts.sum()), 2)
freqdf = pd.DataFrame(data = dict(var = cnts.keys(),
freq = cnts,
perc = perc,
cump = cump))
freqdf.rename(columns={'var': srs.name}, inplace=True)
freqdf = freqdf.reset_index(drop = True)
return freqdf
def corrtable(X, y = None, cut = 0.9, methodx = 'spearman', methody = None, full = False):
'''
----------
Parameters
----------
X predictors dataframe
y target (unused in exploretransform v 1.0.0)
cut correlation threshold
full
True Returns the full corrtable with drop column
False (default) Returns without the drop column
methodx used to calculate correlations amount predictors
methody* used to calculate correlations between predictors & target
*(unused in exploretransform v 1.0.0)
pearson standard correlation coefficient
kendall Kendall Tau | |
cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(input)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
del input
return retVal
def InverseDFT(self, raw_dft_image):
"""
**SUMMARY**
This method provides a way of performing an inverse discrete Fourier transform
on a real/imaginary image pair and obtaining the result as a SimpleCV image. This
method is helpful if you wish to perform custom filter development.
**PARAMETERS**
* *raw_dft_image* - A list object with either one or three IPL images. Each image should
have a 64f depth and contain two channels (the real and the imaginary).
**RETURNS**
A simpleCV image.
**EXAMPLE**
Note that this is an example, I don't recommend doing this unless you know what
you are doing.
>>> raw = img.getRawDFT()
>>> cv.SomeOperation(raw)
>>> result = img.InverseDFT(raw)
>>> result.show()
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
input = []
w = raw_dft_image[0].width
h = raw_dft_image[0].height
if(len(raw_dft_image) == 1):
gs = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(self._DFT[0],gs)
input.append(gs)
else:
for img in raw_dft_image:
temp = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(img,temp)
input.append(img)
if( len(input) == 1 ):
cv.DFT(input[0], input[0], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(raw_dft_image)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
return retVal
def applyButterworthFilter(self,dia=400,order=2,highpass=False,grayscale=False):
"""
**SUMMARY**
Creates a butterworth filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int Diameter of Butterworth low pass filter
* *order* - int Order of butterworth lowpass filter
* *highpass*: BOOL True: highpass filterm False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = applyButterworth(im, dia=400,order=2,highpass=True,grayscale=False)
Output image: http://i.imgur.com/5LS3e.png
>>> img = applyButterworth(im, dia=400,order=2,highpass=False,grayscale=False)
Output img: http://i.imgur.com/QlCAY.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = applyButterworth(im, dia=400,order=2,highpass=True,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
>>> img = applyButterworth(im, dia=400,order=2,highpass=False,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
w,h = self.size()
flt = cv.CreateImage((64,64),cv.IPL_DEPTH_8U,1)
dia = int(dia/((w/64.0+h/64.0)/2.0))
if highpass:
for i in range(64):
for j in range(64):
d = sqrt((j-32)**2+(i-32)**2)
flt[i,j] = 255-(255/(1+(d/dia)**(order*2)))
else:
for i in range(64):
for j in range(64):
d = sqrt((j-32)**2+(i-32)**2)
flt[i,j] = 255/(1+(d/dia)**(order*2))
flt = Image(flt)
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyGaussianFilter(self, dia=400, highpass=False, grayscale=False):
"""
**SUMMARY**
Creates a gaussian filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int - diameter of Gaussian filter
* *highpass*: BOOL True: highpass filter False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = applyGaussianfilter(im, dia=400,highpass=True,grayscale=False)
Output image: http://i.imgur.com/DttJv.png
>>> img = applyGaussianfilter(im, dia=400,highpass=False,grayscale=False)
Output img: http://i.imgur.com/PWn4o.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = applyGaussianfilter(im, dia=400,highpass=True,grayscale=True)
Output img: http://i.imgur.com/9hX5J.png
>>> img = applyGaussianfilter(im, dia=400,highpass=False,grayscale=True)
Output img: http://i.imgur.com/MXI5T.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
w,h = self.size()
flt = cv.CreateImage((64,64),cv.IPL_DEPTH_8U,1)
dia = int(dia/((w/64.0+h/64.0)/2.0))
if highpass:
for i in range(64):
for j in range(64):
d = sqrt((j-32)**2+(i-32)**2)
val = 255-(255.0*math.exp(-(d**2)/((dia**2)*2)))
flt[i,j]=val
else:
for i in range(64):
for j in range(64):
d = sqrt((j-32)**2+(i-32)**2)
val = 255.0*math.exp(-(d**2)/((dia**2)*2))
flt[i,j]=val
flt = Image(flt)
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyUnsharpMask(self,boost=1,dia=400,grayscale=False):
"""
**SUMMARY**
This method applies unsharp mask or highboost filtering
on image depending upon the boost value provided.
DFT is applied on image using gaussian lowpass filter.
A mask is created subtracting the DFT image from the original
iamge. And then mask is added in the image to sharpen it.
unsharp masking => image + mask
highboost filtering => image + (boost)*mask
**PARAMETERS**
* *boost* - int boost = 1 => unsharp masking, boost > 1 => highboost filtering
* *dia* - int Diameter of Gaussian low pass filter
* *grayscale* - BOOL
**EXAMPLE**
Gaussian Filters:
>>> im = Image("lenna")
>>> img = applyUnsharpMask(im,2,grayscale=False) #highboost filtering
output image: http://i.imgur.com/A1pZf.png
>>> img = applyUnsharpMask(im,1,grayscale=False) #unsharp masking
output image: http://i.imgur.com/smCdL.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = applyUnsharpMask(im,2,grayscale=True) #highboost filtering
output image: http://i.imgur.com/VtGzl.png
>>> img = applyUnsharpMask(im,1,grayscale=True) #unsharp masking
output image: http://i.imgur.com/bywny.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if boost < 0:
print "boost >= 1"
return None
lpIm = self.applyGaussianFilter(dia=dia,grayscale=grayscale,highpass=False)
im = Image(self.getBitmap())
mask = im - lpIm
img = im
for i in range(boost):
img = img + mask
return img
def listHaarFeatures(self):
'''
This is used to list the built in features available for HaarCascade feature
detection. Just run this function as:
>>> img.listHaarFeatures()
Then use one of the file names returned as the input to the findHaarFeature()
function. So you should get a list, more than likely you will see face.xml,
to use it then just
>>> img.findHaarFeatures('face.xml')
'''
features_directory = os.path.join(LAUNCH_PATH, 'Features','HaarCascades')
features = os.listdir(features_directory)
print features
def _CopyAvg(self, src, dst,roi, levels, levels_f, mode):
'''
Take the value in an ROI, calculate the average / peak hue
and then set the output image roi to the value.
'''
if( mode ): # get the peak hue for an area
h = src[roi[0]:roi[0]+roi[2],roi[1]:roi[1]+roi[3]].hueHistogram()
myHue = np.argmax(h)
C = (float(myHue),float(255),float(255),float(0))
cv.SetImageROI(dst,roi)
cv.AddS(dst,c,dst)
cv.ResetImageROI(dst)
else: # get the average value for an area optionally set levels
cv.SetImageROI(src.getBitmap(),roi)
cv.SetImageROI(dst,roi)
avg = cv.Avg(src.getBitmap())
avg = (float(avg[0]),float(avg[1]),float(avg[2]),0)
if(levels is not None):
avg = (int(avg[0]/levels)*levels_f,int(avg[1]/levels)*levels_f,int(avg[2]/levels)*levels_f,0)
cv.AddS(dst,avg,dst)
cv.ResetImageROI(src.getBitmap())
cv.ResetImageROI(dst)
def pixelize(self, block_size = 10, region = None, levels=None, doHue=False):
"""
**SUMMARY**
Pixelation blur, like the kind used to hide naughty bits on your favorite tv show.
**PARAMETERS**
* *block_size* - the blur block size in pixels, an integer is an square blur, a tuple is rectangular.
* *region* - do the blur in a region in format (x_position,y_position,width,height)
* *levels* - the number of levels per color channel. This makes the image look like an 8-bit video game.
* *doHue* - If this value is true we calculate the peak hue for the area, not the
average color for the area.
**RETURNS**
Returns the image with the pixelation blur applied.
**EXAMPLE**
>>> img = Image("lenna")
>>> result = img.pixelize( 16, (200,180,250,250), levels=4)
>>> img.show()
"""
if( isinstance(block_size, int) ):
block_size = (block_size,block_size)
retVal = self.getEmpty()
levels_f = 0.00
if( levels is not None ):
levels = 255/int(levels)
if(levels <= 1 ):
| |
<gh_stars>1-10
import os
import datetime
import time
import matplotlib.pyplot as plt
import numpy as np
import logging
import collections
import tensorflow as tf
import pandas as pd
from keras.utils import to_categorical
from tensorflow import keras
from tensorflow.keras import layers
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# fix for memory problem...
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def read_words(filename):
with tf.io.gfile.GFile(filename, "r") as f:
return f.read().replace("\n", "<eos>").split()
def build_vocab(filename):
data = read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def file_to_word_ids(filename, word_to_id):
data = read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def load_data():
# get the data paths
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
# build the complete vocabulary, then convert text data to list of integers
word_to_id = build_vocab(train_path)
train_data = file_to_word_ids(train_path, word_to_id)
valid_data = file_to_word_ids(valid_path, word_to_id)
test_data = file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
print("======= Examples =======")
print("Train data: ")
print(train_data[:5])
print("Vocabulary: ")
print(vocabulary)
print("Reversed dictionary: " + " ".join([reversed_dictionary[x] for x in train_data[:10]]))
print("========================")
return train_data, valid_data, test_data, vocabulary, reversed_dictionary
class KerasBatchGenerator(object):
def __init__(self, data, num_steps, batch_size, vocabulary, skip_step=5):
self.data = data
self.num_steps = num_steps
self.batch_size = batch_size
self.vocabulary = vocabulary
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set it will reset
# back to zero
self.current_idx = 0
# skip_step is the number of words which will be skipped before the next
# batch is skimmed from the data set
self.skip_step = skip_step
def generate(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield x, y
def generateX(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield x
def generateY(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield y
def model_create(vocab_size, encode_dim, layer_dim, batch, seq, stateful, n_layers):
init = keras.initializers.random_uniform(-0.05, 0.05)
# input is batch_size x steps x integer(vocab_size)
# Creating RNN model and fit it:
_model = keras.Sequential()
# Creating embedding layer -
# (vocabulary size, dimensions of the encoder, sequence length)
_model.add(layers.Embedding(input_dim=vocab_size, output_dim=encode_dim, batch_input_shape=(batch, seq),
embeddings_initializer=init))
for i in range(n_layers):
# Add an LSTM layer with 'dim' internal units.
_model.add(layers.LSTM(layer_dim, return_sequences=True, stateful=stateful,
kernel_initializer=init, recurrent_initializer=init))
# Add a Dense layer with 'encode_dim' units - 'decoding' layer
# read https://arxiv.org/pdf/1708.02182v1.pdf 4.5. Independent embedding size and hidden size
_model.add(layers.Dense(encode_dim, activation='linear',
kernel_initializer=init))
# Add a Dense layer with len(dictionary) units - output is unit vector
# TODO: Try trainable=False, and copy weights as in the Embedding layer:
# https://arxiv.org/pdf/1708.02182v1.pdf 4.4. Weight tying
_model.add(layers.Dense(vocab_size, activation='softmax',
kernel_initializer=init))
return _model
def perplexity(y_true, y_pred):
"""
The perplexity metric. Why isn't this part of Keras yet?!
https://stackoverflow.com/questions/41881308/how-to-calculate-perplexity-of-rnn-in-tensorflow
https://github.com/keras-team/keras/issues/8267
"""
# cross_entropy = keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
# perplexity = np.exp(np.mean(cross_entropy))
cce = keras.losses.SparseCategoricalCrossentropy()
perplexity = np.exp(cce(y_true, y_pred))
return perplexity
if __name__ == "__main__":
start_time = time.time()
logging.basicConfig(level=logging.INFO)
logging.info('date {}'.format(datetime.datetime.now()))
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
os.mkdir("logs/" + timestamp)
log_dir = "logs/" + timestamp
# *******************************************************************************
# -------------- --- hyper parameters -----------------------------------------*
# *******************************************************************************
data_path = '../Language_Modeling_Generator/data'
epochs = 39
batch_size = 10 # recommendation for validation - 1
seq_len = 35 # length of input, and output
# shift = seq_len + 1 # shift = seq_len + 1 for stateful=True
layers_num = 1
hidden_layer_dim = 200
encoder_dim = 200
# optimizer:
clip_norm = 5
# lr_scheduler:
initial_learning_rate = 1.
decay_epoch = 6
decay = 0.833 # was 0.96
# dropout = 0 TODO: variational dropout
with open(log_dir + "/log.txt", "w") as file:
file.write("NLP - Log " + timestamp +
"\n non-regularized LSTM: \n" +
">> Epochs: " + str(epochs) + "\n" +
">> Batch Size: " + str(batch_size) + "\n" +
">> Sequence Length: " + str(seq_len) + "\n" +
">> Encoder Dimension: " + str(encoder_dim) + "\n" +
">> Hidden Layer (1st LSTM): " + str(hidden_layer_dim) + "\n" +
">> # LSTM layers: " + str(layers_num) + "\n" +
">> Initial Learning Rate: " + str(initial_learning_rate) + "\n" +
">> Learning Rate # Epoch Decay: " + str(decay_epoch) + "\n" +
">> Learning Rate Decay: x" + str(decay) + "\n" +
"\n\n")
# *******************************************************************************
# ---------------- data: Python lists of strings ------------------------------*
# *******************************************************************************
train_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()
# generate data - (batch_size x seq_len x vocabulary) is actually 1 batch input
train_data_generator = KerasBatchGenerator(train_data, seq_len, batch_size, vocabulary,
skip_step=seq_len)
valid_data_generator = KerasBatchGenerator(valid_data, seq_len, batch_size, vocabulary,
skip_step=seq_len)
test_data_generator = KerasBatchGenerator(test_data, seq_len, batch_size, vocabulary,
skip_step=seq_len)
train_steps_4epoch = len(train_data) // (batch_size * seq_len)
valid_steps_4epoch = len(valid_data) // (batch_size * seq_len)
test_steps_4epoch = len(test_data) // (batch_size * seq_len)
# *******************************************************************************
# ----------------- configure TF graph -----------------------------------------*
# *******************************************************************************
# Batch size is 1, the batch_size is taking in count in the data itself because we want to use the stateful
model = model_create(vocab_size=vocabulary, encode_dim=encoder_dim, layer_dim=hidden_layer_dim,
n_layers=layers_num, batch=batch_size, seq=seq_len, stateful=True)
model_test = model_create(vocab_size=vocabulary, encode_dim=encoder_dim, layer_dim=hidden_layer_dim,
n_layers=layers_num, batch=batch_size, seq=seq_len, stateful=True)
with open(log_dir + "/log.txt", "a") as file:
model.summary(print_fn=lambda x: file.write(x + '\n'))
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=(train_steps_4epoch * decay_epoch), # decay after 6 epochs (was 100000)
decay_rate=decay,
staircase=True)
_loss = tf.keras.losses.CategoricalCrossentropy()
_optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule, clipnorm=clip_norm)
model.compile(
loss=_loss,
optimizer=_optimizer,
metrics=['categorical_crossentropy'] # TODO: ask about perplexity metric
)
# *******************************************************************************
# ------------------------- initialize ----------------------------------------*
# *******************************************************************************
loss_train = []
loss_valid = []
perplexity_train = []
perplexity_valid = []
raw_data = {'time': [],
'epoch': [],
'train loss': [],
'valid loss': [],
'train perplexity': [],
'valid perplexity': []}
columns = ['time', 'epoch', 'train loss', 'valid loss', 'train perplexity', 'valid perplexity']
# *******************************************************************************
# ---------------------- train and evaluate -----------------------------------*
# *******************************************************************************
# checkpoint = keras.callbacks.ModelCheckpoint('logs/' + timestamp + '/checkpoint',
# monitor='val_loss', save_best_only=True, mode='min') # TODO: uncomment
for epoch in range(epochs):
print("\nEPOCH " + str(epoch + 1) + "/" + str(epochs))
# train fit:
hist = model.fit(train_data_generator.generate(), steps_per_epoch=train_steps_4epoch, epochs=1,
validation_data=valid_data_generator.generate(), validation_steps=valid_steps_4epoch,
shuffle=False)
# TODO: UNCOMMENT - , callbacks=[checkpoint])
model.reset_states()
loss_train += hist.history['loss']
loss_valid += hist.history['val_loss']
perplexity_train += [np.exp(loss_train[-1])]
# valid prediction:
model_test.set_weights(model.get_weights())
pred = model.predict(valid_data_generator.generate())
perplex = perplexity(y_pred=pred, y_true=valid_data_generator.generateY())
perplexity_valid += [perplex]
raw_data['time'] += [str(datetime.timedelta(seconds=round(time.time() - start_time)))]
raw_data['epoch'] += [epoch + 1]
raw_data['train loss'] += [loss_train[-1]]
raw_data['valid loss'] += [loss_valid[-1]]
raw_data['train perplexity'] += [perplexity_train[-1]]
raw_data['valid perplexity'] += [perplexity_valid[-1].astype(float)]
df = pd.DataFrame(raw_data, columns=columns)
df.to_csv(log_dir + '/fit_' + timestamp + '.csv')
model.save(log_dir + '/model')
# *******************************************************************************
# ----------------------- plot ------------------------------------------------*
# *******************************************************************************
plt.rcParams['axes.facecolor'] = 'floralwhite'
plt.plot(range(epochs), perplexity_train, linewidth=1, color='blue', label='training')
plt.plot(range(epochs), perplexity_valid, linewidth=1, color='red', label='validation')
plt.grid(True, which='both', axis='both')
plt.title('Penn Treebank Corpus')
plt.xlabel('Epochs')
plt.ylabel('Perplexity')
plt.legend()
plt.savefig(log_dir + "/graph.png")
plt.show()
plt.clf()
m = min(10, epochs // 2)
plt.rcParams['axes.facecolor'] = 'floralwhite'
plt.plot(range(epochs)[m:], perplexity_train[m:], linewidth=1, color='blue', label='training')
plt.plot(range(epochs)[m:], perplexity_valid[m:], linewidth=1, color='red', label='validation')
plt.grid(True, which='both', | |
lr})
optimizer = optim.SGD(param_list, lr=lr, momentum=momentum, weight_decay=w_decay)
return optimizer
def train(model, criterion, optimizer, pos_feats, neg_feats, maxiter, in_layer='fc4'):
np.random.seed(123)
torch.manual_seed(456)
torch.cuda.manual_seed(789)
model.train()
batch_pos = opts['batch_pos']
batch_neg = opts['batch_neg']
batch_test = opts['batch_test']
batch_neg_cand = max(opts['batch_neg_cand'], batch_neg)
pos_idx = np.random.permutation(pos_feats.size(0))
neg_idx = np.random.permutation(neg_feats.size(0))
while (len(pos_idx) < batch_pos * maxiter):
pos_idx = np.concatenate([pos_idx, np.random.permutation(pos_feats.size(0))])
while (len(neg_idx) < batch_neg_cand * maxiter):
neg_idx = np.concatenate([neg_idx, np.random.permutation(neg_feats.size(0))])
pos_pointer = 0
neg_pointer = 0
for iter in range(maxiter):
# select pos idx
pos_next = pos_pointer + batch_pos
pos_cur_idx = pos_idx[pos_pointer:pos_next]
pos_cur_idx = pos_feats.new(pos_cur_idx).long()
pos_pointer = pos_next
# select neg idx
neg_next = neg_pointer + batch_neg_cand
neg_cur_idx = neg_idx[neg_pointer:neg_next]
neg_cur_idx = neg_feats.new(neg_cur_idx).long()
neg_pointer = neg_next
# create batch
batch_pos_feats = Variable(pos_feats.index_select(0, pos_cur_idx))
batch_neg_feats = Variable(neg_feats.index_select(0, neg_cur_idx))
# hard negative mining
if batch_neg_cand > batch_neg:
model.eval()
for start in range(0, batch_neg_cand, batch_test):
end = min(start + batch_test, batch_neg_cand)
score = model(batch_neg_feats[start:end], in_layer=in_layer)
if start == 0:
neg_cand_score = score.data[:, 1].clone()
else:
neg_cand_score = torch.cat((neg_cand_score, score.data[:, 1].clone()), 0)
_, top_idx = neg_cand_score.topk(batch_neg)
batch_neg_feats = batch_neg_feats.index_select(0, Variable(top_idx))
model.train()
# forward
pos_score = model(batch_pos_feats, in_layer=in_layer)
neg_score = model(batch_neg_feats, in_layer=in_layer)
# optimize
loss = criterion(pos_score, neg_score)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), opts['grad_clip'])
optimizer.step()
if opts['show_train']:
print "Iter %d, Loss %.10f" % (iter, loss.data[0])
def run_ACT(img_list, init_bbox, gt=None, savefig_dir='', display=False):
# Init bbox
np.random.seed(123)
torch.manual_seed(456)
torch.cuda.manual_seed(789)
rate = init_bbox[2] / init_bbox[3]
target_bbox = np.array(init_bbox)
result = np.zeros((len(img_list), 4))
result_bb = np.zeros((len(img_list), 4))
result[0] = target_bbox
result_bb[0] = target_bbox
success = 1
# Init model
model = MDNet(opts['model_path'])
actor = Actor(opts['actor_path'])
if opts['use_gpu']:
model = model.cuda()
actor = actor.cuda()
model.set_learnable_params(opts['ft_layers'])
# Init criterion and optimizer
criterion = BinaryLoss()
init_optimizer = set_optimizer(model, opts['lr_init'])
update_optimizer = set_optimizer(model, opts['lr_update'])
image = Image.open(img_list[0]).convert('RGB')
# Train bbox regressor
bbreg_examples = gen_samples(SampleGenerator('uniform', image.size, 0.3, 1.5, 1.1),
target_bbox, opts['n_bbreg'], opts['overlap_bbreg'], opts['scale_bbreg'])
bbreg_feats = forward_samples(model, image, bbreg_examples)
bbreg = BBRegressor(image.size)
bbreg.train(bbreg_feats, bbreg_examples, target_bbox)
# Draw pos/neg samples
pos_examples = gen_samples(SampleGenerator('gaussian', image.size, 0.1, 1.2),
target_bbox, opts['n_pos_init'], opts['overlap_pos_init'])
neg_examples = np.concatenate([
gen_samples(SampleGenerator('uniform', image.size, 1, 2, 1.1),
target_bbox, opts['n_neg_init'] // 2, opts['overlap_neg_init']),
gen_samples(SampleGenerator('whole', image.size, 0, 1.2, 1.1),
target_bbox, opts['n_neg_init'] // 2, opts['overlap_neg_init'])])
neg_examples = np.random.permutation(neg_examples)
# Extract pos/neg features
pos_feats = forward_samples(model, image, pos_examples)
neg_feats = forward_samples(model, image, neg_examples)
feat_dim = pos_feats.size(-1)
# Initial training
train(model, criterion, init_optimizer, pos_feats, neg_feats, opts['maxiter_init'])
deta_flag, out_flag_first = init_actor(actor, image, target_bbox)
# Init sample generators
init_generator = SampleGenerator('gaussian', image.size, opts['trans_f'], 1, valid=False)
sample_generator = SampleGenerator('gaussian', image.size, opts['trans_f'], opts['scale_f'], valid=False)
pos_generator = SampleGenerator('gaussian', image.size, 0.1, 1.2)
neg_generator = SampleGenerator('uniform', image.size, 1.5, 1.2)
# Init pos/neg features for update
pos_feats_all = [pos_feats[:opts['n_pos_update']]]
neg_feats_all = [neg_feats[:opts['n_neg_update']]]
data_frame = [0]
pos_score = forward_samples(model, image, np.array(init_bbox).reshape([1, 4]), out_layer='fc6')
img_learn = [image]
pos_learn = [init_bbox]
score_pos = [pos_score.cpu().numpy()[0][1]]
frame_learn = [0]
pf_frame = []
update_lenth = 10
spf_total = 0
# Display
savefig = 0
if display or savefig:
dpi = 80.0
figsize = (image.size[0] / dpi, image.size[1] / dpi)
fig = plt.figure(frameon=False, figsize=figsize, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
im = ax.imshow(image)
if gt is not None:
gt_rect = plt.Rectangle(tuple(gt[0, :2]), gt[0, 2], gt[0, 3],
linewidth=3, edgecolor="#00ff00", zorder=1, fill=False)
ax.add_patch(gt_rect)
rect = plt.Rectangle(tuple(result_bb[0, :2]), result_bb[0, 2], result_bb[0, 3],
linewidth=3, edgecolor="#ff0000", zorder=1, fill=False)
ax.add_patch(rect)
if display:
plt.pause(.01)
plt.draw()
if savefig:
fig.savefig(os.path.join(savefig_dir, '0000.jpg'), dpi=dpi)
detetion = 0
imageVar_first = cv2.Laplacian(crop_image_blur(np.array(image), target_bbox), cv2.CV_64F).var()
# Main loop
for i in range(1, len(img_list)):
tic = time.time()
# Load image
image = Image.open(img_list[i]).convert('RGB')
if imageVar_first > 200:
imageVar = cv2.Laplacian(crop_image_blur(np.array(image), target_bbox), cv2.CV_64F).var()
else:
imageVar = 200
# Estimate target bbox
img_g, img_l, out_flag = getbatch_actor(np.array(image), np.array(target_bbox).reshape([1, 4]))
deta_pos = actor(img_l, img_g)
deta_pos = deta_pos.data.clone().cpu().numpy()
if deta_pos[:, 2] > 0.05 or deta_pos[:, 2] < -0.05:
deta_pos[:, 2] = 0
if deta_flag or (out_flag and not out_flag_first):
deta_pos[:, 2] = 0
if len(pf_frame) and i == (pf_frame[-1] + 1):
deta_pos[:, 2] = 0
pos_ = np.round(move_crop(target_bbox, deta_pos, (image.size[1], image.size[0]), rate))
r = forward_samples(model, image, np.array(pos_).reshape([1, 4]), out_layer='fc6')
r = r.cpu().numpy()
if r[0][1] > 0 and imageVar > 100:
target_bbox = pos_
target_score = r[0][1]
bbreg_bbox = pos_
success = 1
if not out_flag:
fin_score = r[0][1]
img_learn.append(image)
pos_learn.append(target_bbox)
score_pos.append(fin_score)
frame_learn.append(i)
while len(img_learn) > update_lenth * 2:
del img_learn[0]
del pos_learn[0]
del score_pos[0]
del frame_learn[0]
result[i] = target_bbox
result_bb[i] = bbreg_bbox
else:
detetion += 1
if len(pf_frame) == 0:
pf_frame = [i]
else:
pf_frame.append(i)
if (len(frame_learn) == update_lenth*2 and data_frame[-1] not in frame_learn ) or data_frame[-1] == 0:
for num in range(max(0, img_learn.__len__() - update_lenth), img_learn.__len__()):
if frame_learn[num] not in data_frame:
gt_ = pos_learn[num]
image_ = img_learn[num]
pos_examples = np.round(gen_samples(pos_generator, gt_,
opts['n_pos_update'],
opts['overlap_pos_update']))
neg_examples = np.round(gen_samples(neg_generator, gt_,
opts['n_neg_update'],
opts['overlap_neg_update']))
pos_feats_ = forward_samples(model, image_, pos_examples)
neg_feats_ = forward_samples(model, image_, neg_examples)
pos_feats_all.append(pos_feats_)
neg_feats_all.append(neg_feats_)
data_frame.append(frame_learn[num])
if len(pos_feats_all) > 10:
del pos_feats_all[0]
del neg_feats_all[0]
del data_frame[0]
else:
pos_feats_ = pos_feats_all[data_frame.index(frame_learn[num])]
neg_feats_ = neg_feats_all[data_frame.index(frame_learn[num])]
if num == max(0, img_learn.__len__() - update_lenth):
pos_feats = pos_feats_
neg_feats = neg_feats_
else:
pos_feats = torch.cat([pos_feats, pos_feats_], 0)
neg_feats = torch.cat([neg_feats, neg_feats_], 0)
train(model, criterion, update_optimizer, pos_feats, neg_feats, opts['maxiter_update'])
if success:
sample_generator.set_trans_f(opts['trans_f'])
else:
sample_generator.set_trans_f(opts['trans_f_expand'])
if imageVar < 100:
samples = gen_samples(init_generator, target_bbox, opts['n_samples'])
else:
samples = gen_samples(sample_generator, target_bbox, opts['n_samples'])
if i < 20 or out_flag or ((init_bbox[2] * init_bbox[3]) > 1000 and (target_bbox[2] * target_bbox[3] / (init_bbox[2] * init_bbox[3]) > 2.5 or target_bbox[2] * target_bbox[3] / (init_bbox[2] * init_bbox[3]) < 0.4)):
sample_generator.set_trans_f(opts['trans_f_expand'])
samples_ = np.round(gen_samples(sample_generator, np.hstack([target_bbox[0:2] + target_bbox[2:4] / 2 - init_bbox[2:4] / 2, init_bbox[2:4]]), opts['n_samples']))
samples = np.vstack([samples, samples_])
sample_scores = forward_samples(model, image, samples, out_layer='fc6')
top_scores, top_idx = sample_scores[:, 1].topk(5)
top_idx = top_idx.cpu().numpy()
target_score = top_scores.mean()
target_bbox = samples[top_idx].mean(axis=0)
success = target_score > opts['success_thr']
# Bbox regression
if success:
bbreg_samples = samples[top_idx]
bbreg_feats = forward_samples(model, image, bbreg_samples)
bbreg_samples = bbreg.predict(bbreg_feats, bbreg_samples)
bbreg_bbox = bbreg_samples.mean(axis=0)
img_learn.append(image)
pos_learn.append(target_bbox)
score_pos.append(target_score)
frame_learn.append(i)
while len(img_learn) > 2*update_lenth:
del img_learn[0]
del pos_learn[0]
del score_pos[0]
del frame_learn[0]
else:
bbreg_bbox = target_bbox
# Copy previous result at failure
if not success:
target_bbox = result[i - 1]
bbreg_bbox = result_bb[i - 1]
# Save result
result[i] = target_bbox
result_bb[i] = bbreg_bbox
spf = time.time() - tic
spf_total += spf
# Display
if display or savefig:
im.set_data(image)
if gt is not None:
gt_rect.set_xy(gt[i, :2])
gt_rect.set_width(gt[i, 2])
gt_rect.set_height(gt[i, 3])
rect.set_xy(result_bb[i, :2])
rect.set_width(result_bb[i, 2])
rect.set_height(result_bb[i, 3])
if display:
plt.pause(.01)
plt.draw()
if savefig:
fig.savefig(os.path.join(savefig_dir, '%04d.jpg' % (i)), dpi=dpi)
if display:
if gt is None:
print "Frame %d/%d, Score %.3f, Time %.3f" % \
(i, len(img_list), target_score, spf)
else:
if opts['show_train']:
print "Frame %d/%d, Overlap %.3f, Score %.3f, Time %.3f, box (%d,%d,%d,%d), var %d" % \
(i, len(img_list), overlap_ratio(gt[i], result_bb[i])[0], target_score, spf, target_bbox[0],
target_bbox[1], target_bbox[2], target_bbox[3], imageVar)
fps = len(img_list) / spf_total
return result, result_bb, fps
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seq', default='DragonBaby', help='input seq')
parser.add_argument('-j', '--json', default='cfg.josn', help='input json')
parser.add_argument('-f', '--savefig', action='store_true')
parser.add_argument('-d', '--display', action='store_true')
args = parser.parse_args()
assert (args.seq != '' or args.json != '')
img_path = '../dataset'
savefig_dir = None
video = 'Car4'
display = 01
if video == 'all':
opts['show_train'] = 0
dataset_folder = os.path.join(img_path)
videos_list = [v for v in os.listdir(dataset_folder)]
videos_list.sort()
nv = np.size(videos_list)
speed_all = np.zeros(nv)
precisions_all = np.zeros(nv)
precisions_auc_all = np.zeros(nv)
ious_all = np.zeros(nv)
for i in range(nv):
gt, img_list, _, _ = _init_video(img_path, videos_list[i])
ground_th = np.zeros([gt.shape[0], 4])
for video_num in range(gt.shape[0]):
ground_th[video_num] = region_to_bbox(gt[video_num], False)
bboxes, result_bb, fps = run_ACT(img_list, gt[0], gt=gt, savefig_dir=savefig_dir, display=0)
_, precision, precision_auc, iou = _compile_results(gt, result_bb, 20)
speed_all[i] = fps
precisions_all[i] = precision
precisions_auc_all[i] = precision_auc
ious_all[i] = iou
print str(i) + ' -- ' + videos_list[i] + \
' -- Precision: ' + "%.2f" % precisions_all[i] + \
' -- IOU: ' + "%.2f" % ious_all[i] + \
' -- Speed: ' + "%.2f" % speed_all[i] + ' --'
mean_precision = np.mean(precisions_all)
mean_precision_auc = np.mean(precisions_auc_all)
mean_iou = np.mean(ious_all)
mean_speed = np.mean(speed_all)
print '-- Overall stats (averaged per frame) on ' + str(nv)
print ' -- Precision ' + "(20 px)" + ': ' + "%.2f" % mean_precision + \
' -- IOU: ' + "%.2f" % mean_iou + \
' | |
<reponame>erialcuw/stretchableCircuit<filename>stretchableCircuit.py
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 09:06:14 2021
@author: <NAME>
Perform calculations of the strain-dependent behavior of elastic transistors and simple circuits.
Classes:
transistor
inverter
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.family'] = 'Arial'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2
class transistor():
"""
Define a stretchable transistor object.
Instance Variables:
flavor (string): Flavor of transistor: 'TFT', 'EDLT', or 'OECT'
Ttype (string): Type of transistor: 'p-type' or 'n-type'
W (float): Width of the semiconducting channel
Arbitrary dimensional units, as long as it matches L
L (float): Length of the semiconducting channel
Arbitrary dimensional units, as long as it matches W
d (float): Thickness of the semiconducting layer; only relevant for OECTs [m]
C (float): For TFT and EDLT this is gate capacitance C_G [F/m^2]
For OECT this is volumetric capacitance [C*, F/m^3]
mu (float): Mobility [m^2/Vs]
V_T (float): Threshold voltage [V]
V_DD (float): Supply voltage [V]
V_resolution (int): Resolution of the voltage sweep
deformMode (string): Deformation axis options:
(1) uniaxial along channel length 'uniaxial-L'
(2) uniaxial along channel width 'uniaxial-W'
(3) biaxial 'biaxial-WL'
er (List[float]): List of extension ratios over which to calculate
Note: This must include the undeformed state (er = 1)
Functions:
calculateStrainDependence(self)
calculateI_SD(self)
plotIVvsDeformation(self, er_plot)
calculateRelativeI_SD(self)
plotRelativeI_SD(self)
"""
def __init__(self, flavor, Ttype, W, L, d, C, mu, V_T, V_DD, V_resolution, deformMode, er):
if flavor != 'TFT' and flavor != 'EDLT' and flavor !='OECT':
print('Not a valid flavor of transistor (options: TFT, EDLT, or OECT).')
self.flavor = flavor
if Ttype != 'n' and Ttype != 'p':
print('Not a valid type of transistor (options: n or p).')
self.Ttype = Ttype
self.W = W
self.L = L
self.d = d
self.C = C
self.mu = mu
self.V_T = V_T
self.V_DD = V_DD
self.V_resolution = V_resolution
self.V_range = np.linspace(0,V_DD,V_resolution)
if self.Ttype == 'n':
self.V_G = self.V_range
self.V_SD = self.V_range
self.I_SD_maxidx = len(self.V_G)-1
elif self.Ttype == 'p':
self.V_G = self.V_range-V_DD
self.V_SD = self.V_range-V_DD
self.I_SD_maxidx = 0
self.V_SD_satidx = np.abs(np.abs(self.V_SD)-np.abs(self.V_DD)).argmin()
if deformMode != 'uniaxial-L' and deformMode != 'uniaxial-W' and deformMode != 'biaxial-WL':
print('Not a valid deformation mode (options: uniaxial-L, uniaxial-W, biaxial-WL).')
self.deformMode = deformMode
if 1 not in er:
print('The range of extension ratios to be modeled must include 1 (the undeformed state).')
self.er = np.asarray(er)
self.er_1_idx = np.where(self.er==1)
# Define the extension ratios in all three dimensions based on deformation mode.
if self.deformMode == 'uniaxial-L':
self.erL = self.er
self.erW = 1/(self.erL**(1/2))
self.ert = 1/(self.erL**(1/2))
elif self.deformMode == 'uniaxial-W':
self.erW = self.er
self.erL = 1/(self.erW**(1/2))
self.ert = 1/(self.erW**(1/2))
elif self.deformMode == 'biaxial-WL':
self.erL = self.er
self.erW = self.er
self.ert = np.zeros((len(self.erW), len(self.erL)))
for i in range(len(self.erW)):
for j in range(len(self.erL)):
self.ert[i,j] = 1/(self.erL[j]*self.erW[i])
def calculateStrainDependence(self):
"""Define the constant beta, the strain-dependent V_T, and the
strain-dependent C based on transistor type.
"""
if self.flavor == 'TFT':
self.beta = (self.W/self.L)*self.mu*self.C
self.V_T_er = self.V_T*self.ert
self.C_er = self.C*(self.ert**(-1))
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
self.geo_er = (self.W*self.erW)/(self.L*self.erL)
elif self.deformMode == 'biaxial-WL':
self.geo_er = np.zeros((len(self.erW), len(self.erL)))
for i in range(len(self.erW)):
for j in range(len(self.erL)):
self.geo_er[i,j] = (self.W*self.erW[i])/(self.L*self.erL[j])
elif self.flavor == 'EDLT':
self.beta = (self.W/self.L)*self.mu*self.C
self.V_T_er = self.V_T*np.ones(np.shape(self.ert))
self.C_er = self.C*np.ones(np.shape(self.ert))
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
self.geo_er = (self.W*self.erW)/(self.L*self.erL)
elif self.deformMode == 'biaxial-WL':
self.geo_er = np.zeros((len(self.erW), len(self.erL)))
for i in range(len(self.erW)):
for j in range(len(self.erL)):
self.geo_er[i,j] = (self.W*self.erW[i])/(self.L*self.erL[j])
elif self.flavor == 'OECT':
self.beta = (self.W/self.L)*self.d*self.mu*self.C
self.V_T_er = self.V_T*np.ones(np.shape(self.ert))
self.C_er = self.C*np.ones(np.shape(self.ert))
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
self.geo_er = ((self.W*self.erW)/(self.L*self.erL))*(self.d*self.ert)
elif self.deformMode == 'biaxial-WL':
self.geo_er = np.zeros((len(self.erW), len(self.erL)))
for i in range(len(self.erW)):
for j in range(len(self.erL)):
self.geo_er[i,j] = ((self.W*self.erW[i])/(self.L*self.erL[j]))*(self.d*self.ert[i,j])
def calculateI_SD(self):
"""Calculate the strain-dependent source-drain current I_SD."""
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
self.I_SD = np.zeros((np.size(self.V_SD), np.size(self.V_G), np.size(self.erL)))
for i in range(np.size(self.erL)):
for j in range(np.size(self.V_G)):
for k in range(np.size(self.V_SD)):
if abs(self.V_G[j])<abs(self.V_T_er[i]):
self.I_SD[k,j,i]=0
elif abs(self.V_SD[k])<abs(self.V_G[j]-self.V_T_er[i]): # linear regime
self.I_SD[k,j,i]=self.geo_er[i]*self.C_er[i]*self.mu*(((self.V_G[j]-self.V_T_er[i])*self.V_SD[k])-((self.V_SD[k]**2)/2))
elif abs(self.V_SD[k])>=abs(self.V_G[j]-self.V_T_er[i]): # saturation regime
self.I_SD[k,j,i]=self.geo_er[i]*self.C_er[i]*self.mu*(((self.V_G[j]-self.V_T_er[i])**2)/2)
elif self.deformMode == 'biaxial-WL':
self.I_SD = np.zeros((np.size(self.V_SD), np.size(self.V_G), np.size(self.erW), np.size(self.erL)))
for h in range(np.size(self.erL)):
for i in range(np.size(self.erW)):
for j in range(np.size(self.V_G)):
for k in range(np.size(self.V_SD)):
if abs(self.V_G[j])<abs(self.V_T_er[i,h]):
self.I_SD[k,j,i,h]=0
elif abs(self.V_SD[k])<abs(self.V_G[j]-self.V_T_er[i,h]): # linear regime
self.I_SD[k,j,i,h]=self.geo_er[i,h]*self.C_er[i,h]*self.mu*(((self.V_G[j]-self.V_T_er[i,h])*self.V_SD[k])-((self.V_SD[k]**2)/2))
elif abs(self.V_SD[k])>=abs(self.V_G[j]-self.V_T_er[i,h]): # saturation regime
self.I_SD[k,j,i,h]=self.geo_er[i,h]*self.C_er[i,h]*self.mu*(((self.V_G[j]-self.V_T_er[i,h])**2)/2)
def plotIVvsDeformation(self, er_plot):
"""Plot a series of I-V for a single transistor vs. deformation.
Inputs:
er_plot (List[float]): list of the extension ratios to plot.
Note: These values should be contained in your extension ratio sweep.
"""
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
idx_er = np.zeros((np.size(er_plot)))
for i in range((np.size(er_plot))):
idx_er[i] = (np.abs(self.er - er_plot[i])).argmin()
fig = plt.figure(figsize=(5, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
ax.set_xlabel('$V_\mathrm{SD}$ (V)', labelpad=10)
ax.set_ylabel('$I_\mathrm{SD}$ (A)', labelpad=10)
cm = mpl.cm.get_cmap('viridis')
for i in range((np.size(er_plot,0))):
ax.plot(self.V_SD, self.I_SD[:,self.I_SD_maxidx,int(idx_er[i])],
linewidth=2,
color=cm(1.*i/np.size(er_plot)),
label=('$\lambda$ = '+str(er_plot[i])))
plt.legend(bbox_to_anchor=(1.02, 1.0), loc='upper left')
plt.show()
#plt.savefig('Final_Plot.png', dpi=300, transparent=False, bbox_inches='tight')
else:
print('Currently unsupported.')
def calculateRelativeI_SD(self):
"""Calculate the relative source-drain current (I_SD/I_SD(er=1)) in the saturation regime.
The value of V_G that gives the highest magnitude I_SD is used.
"""
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
self.I_SDrel = np.zeros(np.size(self.erL))
self.I_SD_undeformed = self.I_SD[self.V_SD_satidx, self.I_SD_maxidx, self.er_1_idx]
for i in range(np.size(self.erL)):
self.I_SDrel[i] = self.I_SD[self.V_SD_satidx, self.I_SD_maxidx,i]/self.I_SD_undeformed
elif self.deformMode == 'biaxial-WL':
self.I_SDrel = np.zeros((np.size(self.erW), np.size(self.erL)))
self.I_SD_undeformed = self.I_SD[self.V_SD_satidx, self.I_SD_maxidx, self.er_1_idx, self.er_1_idx]
for i in range(np.size(self.erL)):
for j in range(np.size(self.erW)):
self.I_SDrel[j,i] = self.I_SD[self.V_SD_satidx, self.I_SD_maxidx, j, i]/self.I_SD_undeformed
def plotRelativeI_SD(self):
"""Plot relative source-drain current in the saturation regime vs. extension ratio."""
if self.deformMode == 'uniaxial-L' or self.deformMode == 'uniaxial-W':
fig = plt.figure(figsize=(5, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
ax.set_xlabel('Extension Ratio, $\lambda$', labelpad=10)
ax.set_ylabel('Relative Current, $I_\mathrm{SD}/I_\mathrm{SD}^\mathrm{initial}$', labelpad=10)
ax.plot(self.er, self.I_SDrel, linewidth=2)
plt.show()
elif self.deformMode == 'biaxial-WL':
fig = plt.figure(figsize=(5, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlabel('$\lambda_L$', labelpad=10)
ax.set_ylabel('$\lambda_W$', labelpad=10)
im = ax.imshow(self.I_SDrel, interpolation='none', cmap=mpl.cm.get_cmap('viridis'),
origin='lower', extent=[np.min(self.er), np.max(self.er), np.min(self.er), np.max(self.er)])
fig.colorbar(im, ax=ax)
plt.show()
class inverter():
"""
Define a stretchable inverter object.
Instance Variables:
ntype (transistor): The n-type transistor in the inverter
ptype (transistor): The p-type transistor in the inverter
Functions:
buildVTC(self)
plotLoadCurves(self, V_in_LCplot, er_LCplot)
plotLoadCurves_alternative(self, V_in_LCplot, er_LCplot)
plotVTC(self, er_plot)
plotVTCeye(self, er_plot)
"""
def __init__(self, ntype, ptype):
self.ntype = ntype
self.ptype = ptype
if np.array_equal(ntype.er, ptype.er) == False:
print('The n-type and p-type transistors must have identical arrays of extension ratios (er).')
if ntype.V_DD != ptype.V_DD:
print('The n-type and p-type transistors must have identical supply voltages (V_DD).')
def buildVTC(self):
"""
Build the voltage transfer curve (VTC) by finding where the n- and p-type load curves cross.
The accuracy of this calculation depends on your voltage scan resolution, V_resolution.
"""
if (self.ntype.deformMode == 'uniaxial-L' or self.ntype.deformMode == 'uniaxial-W') and (self.ptype.deformMode == 'uniaxial-L' or self.ptype.deformMode == 'uniaxial-W'):
self.V_out_cross = np.zeros((np.size(self.ntype.V_G,0),np.size(self.ntype.er,0)))
for i in range(np.size(self.ntype.er)):
for j in range (np.size(self.ntype.V_G)):
for k in range (np.size(self.ntype.V_SD)):
if self.ntype.I_SD[k,j,i]>=self.ptype.I_SD[k,j,i]:
self.V_out_cross[j,i] = self.ntype.V_SD[k]
break
elif (self.ntype.deformMode == 'biaxial-WL') and (self.ptype.deformMode == 'biaxial-WL'):
self.V_out_cross = np.zeros((np.size(self.ntype.V_G),np.size(self.ntype.er),np.size(self.ntype.er)))
for h in range(np.size(self.ntype.er)):
for i in range(np.size(self.ntype.er)):
for j in range (np.size(self.ntype.V_G)):
for k in range (np.size(self.ntype.V_SD,0)):
if self.ntype.I_SD[k,j,i,h]>=self.ptype.I_SD[k,j,i,h]:
self.V_out_cross[j,i,h] = self.ntype.V_SD[k]
break
else:
print('This deformation scenario is unsupported.')
def plotLoadCurves(self, V_in_LCplot, er_LCplot):
"""Plot a series of load curves.
Inputs:
V_in_LCplot (List[float]): list of the input voltages to plot
Note: These values should be contained in your voltage sweep.
"""
if (self.ntype.deformMode == 'uniaxial-L' or self.ntype.deformMode == 'uniaxial-W') and (self.ptype.deformMode == 'uniaxial-L' or self.ptype.deformMode == 'uniaxial-W'):
idx_er = (np.abs(self.ntype.er - er_LCplot)).argmin()
idx_n = np.zeros((np.size(V_in_LCplot)))
idx_p = np.zeros((np.size(V_in_LCplot)))
for i in range((np.size(V_in_LCplot))):
idx_n[i] = (np.abs(self.ntype.V_G - V_in_LCplot[i])).argmin()
for i in range((np.size(V_in_LCplot))):
idx_p[i] = (np.abs(self.ptype.V_G - (V_in_LCplot[i]-self.ptype.V_DD))).argmin()
fig = plt.figure(figsize=(5, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
ax.set_xlim(0, self.ntype.V_DD)
ax.set_ylim(0, 1.05*np.max(self.ntype.I_SD[:,:,idx_er]))
ax.set_xlabel('$V_\mathrm{SD}$ (V)', labelpad=10)
ax.set_ylabel('$I_\mathrm{SD}$ (A)', labelpad=10)
cm = mpl.cm.get_cmap('viridis')
for i in range((np.size(V_in_LCplot))):
ax.plot(self.ntype.V_SD, self.ntype.I_SD[:,int(idx_n[i]),idx_er],
linewidth=2, color=cm(1.*i/np.size(V_in_LCplot)),
label=('$V_\mathrm{in}$ = '+str(V_in_LCplot[i])+' V'))
ax.plot(self.ntype.V_SD, self.ptype.I_SD[:,int(idx_p[i]),idx_er],
linewidth=2, | |
<filename>kipoi/cli/main.py<gh_stars>0
"""Main CLI commands
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import json
import sys
import os
from kipoi_utils import parse_json_file_str_or_arglist, cd, makedir_exist_ok, compare_numpy_dict
import kipoi # for .config module
from kipoi.cli.parser_utils import add_model, add_source, add_dataloader, add_dataloader_main, file_exists, dir_exists
from kipoi.sources import list_subcomponents
from ..data import numpy_collate_concat
# import h5py
# import six
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import OrderedDict
import logging
from kipoi import writers
import ast
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def prepare_batch(dl_batch, pred_batch,
keep_inputs=False):
dl_batch["preds"] = pred_batch
if not keep_inputs:
dl_batch.pop("inputs", None)
dl_batch.pop("targets", None)
return dl_batch
def cli_test(command, raw_args):
"""Runs test on the model
"""
assert command == "test"
# setup the arg-parsing
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description='script to test model zoo submissions. Example usage:\n'
'`kipoi test model/directory`, where `model/directory` is the '
'path to a directory containing a model.yaml file.')
add_model(parser, source="dir")
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size to use in prediction')
parser.add_argument("-o", "--output", default=None, required=False,
help="Output hdf5 file")
parser.add_argument("-s", "--skip-expect", action='store_true',
help="Skip validating the expected predictions if test.expect field is specified under model.yaml")
parser.add_argument("-e", "--expect", default=None,
help="File path to the hdf5 file of predictions produced by kipoi test -o file.h5 "
"or kipoi predict -o file.h5 --keep_inputs. Overrides test.expect in model.yaml")
args = parser.parse_args(raw_args)
# --------------------------------------------
mh = kipoi.get_model(args.model, args.source)
if not mh._sufficient_deps(mh.dependencies):
# model requirements should be installed
logger.warning("Required package '{0}' for model type: {1} is not listed in the dependencies".
format(mh.MODEL_PACKAGE, mh.type))
# Load the test files from model source
mh.pipeline.predict_example(batch_size=args.batch_size, output_file=args.output)
if (mh.test.expect is not None or args.expect is not None) \
and not args.skip_expect and args.output is None:
if args.expect is not None:
# `expect` specified from the CLI
expect = args.expect
else:
# `expect` taken from model.yaml
if isinstance(mh.test.expect, kipoi.specs.RemoteFile):
# download the file
output_dir = kipoi.get_source(args.source).get_model_download_dir(args.model)
makedir_exist_ok(output_dir)
mh.test.expect = mh.test.expect.get_file(os.path.join(output_dir, 'test.expect.h5'))
expect = mh.test.expect
logger.info('Testing if the predictions match the expected ones in the file: {}'.format(expect))
logger.info('Desired precision (number of matching decimal places): {}'.format(mh.test.precision_decimal))
# iteratively load the expected file
expected = kipoi.readers.HDF5Reader(expect)
expected.open()
it = expected.batch_iter(batch_size=args.batch_size)
for i, batch in enumerate(tqdm(it, total=len(expected) // args.batch_size)):
if i == 0 and ('inputs' not in batch or 'preds' not in batch):
raise ValueError("test.expect file requires 'inputs' and 'preds' "
"to be specified. Available keys: {}".format(list(expected)))
pred_batch = mh.predict_on_batch(batch['inputs'])
# compare to the predictions
# import ipdb
# ipdb.set_trace()
try:
compare_numpy_dict(pred_batch, batch['preds'], exact=False, decimal=mh.test.precision_decimal)
except Exception as e:
logger.error("Model predictions don't match the expected predictions."
"expected: {}\nobserved: {}. Exception: {}".format(batch['preds'], pred_batch, e))
expected.close()
sys.exit(1)
expected.close()
logger.info('All predictions match')
logger.info('Successfully ran test_predict')
def cli_get_example(command, raw_args):
"""Downloads the example files to the desired directory
"""
assert command == "get-example"
# setup the arg-parsing
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description='Get example files')
add_model(parser, source="kipoi")
parser.add_argument("-o", "--output", default="example", required=False,
help="Output directory where to store the examples. Default: 'example'")
args = parser.parse_args(raw_args)
# --------------------------------------------
md = kipoi.get_model_descr(args.model, args.source)
src = kipoi.get_source(args.source)
# load the default dataloader
if isinstance(md.default_dataloader, kipoi.specs.DataLoaderImport):
with cd(src.get_model_dir(args.model)):
dl_descr = md.default_dataloader.get()
else:
# load from directory
# attach the default dataloader already to the model
dl_descr = kipoi.get_dataloader_descr(os.path.join(args.model, md.default_dataloader),
source=args.source)
kwargs = dl_descr.download_example(output_dir=args.output, dry_run=False)
logger.info("Example files downloaded to: {}".format(args.output))
logger.info("use the following dataloader kwargs:")
print(json.dumps(kwargs))
def cli_preproc(command, raw_args):
"""Preprocess:
- Run the dataloader and store the results to a (hdf5) file
"""
assert command == "preproc"
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description='Run the dataloader and save the output to an hdf5 file.')
add_dataloader_main(parser, with_args=True)
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size to use in data loading')
parser.add_argument("-n", "--num_workers", type=int, default=0,
help="Number of parallel workers for loading the dataset")
parser.add_argument("-o", "--output", required=True,
help="Output hdf5 file")
args = parser.parse_args(raw_args)
dataloader_kwargs = parse_json_file_str_or_arglist(args.dataloader_args, parser)
dir_exists(os.path.dirname(args.output), logger)
# --------------------------------------------
Dataloader = kipoi.get_dataloader_factory(args.dataloader, args.source)
dataloader_kwargs = kipoi.pipeline.validate_kwargs(Dataloader, dataloader_kwargs)
dataloader = Dataloader(**dataloader_kwargs)
it = dataloader.batch_iter(batch_size=args.batch_size, num_workers=args.num_workers)
logger.info("Writing to the hdf5 file: {0}".format(args.output))
writer = writers.HDF5BatchWriter(file_path=args.output)
for i, batch in enumerate(tqdm(it)):
# check that the first batch was indeed correct
if i == 0 and not Dataloader.get_output_schema().compatible_with_batch(batch):
logger.warning("First batch of data is not compatible with the dataloader schema.")
writer.batch_write(batch)
writer.close()
logger.info("Done!")
def cli_predict(command, raw_args):
"""CLI interface to predict
"""
assert command == "predict"
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description='Run the model prediction.')
add_model(parser)
add_dataloader(parser, with_args=True)
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size to use in prediction')
parser.add_argument("-n", "--num_workers", type=int, default=0,
help="Number of parallel workers for loading the dataset")
parser.add_argument("-k", "--keep_inputs", action='store_true',
help="Keep the inputs in the output file. ")
parser.add_argument("-l", "--layer",
help="Which output layer to use to make the predictions. If specified," +
"`model.predict_activation_on_batch` will be invoked instead of `model.predict_on_batch`")
parser.add_argument("--singularity", action='store_true',
help="Run `kipoi predict` in the appropriate singularity container. "
"Containters will get downloaded to ~/.kipoi/envs/ or to "
"$SINGULARITY_CACHEDIR if set")
parser.add_argument('-o', '--output', required=True, nargs="+",
help="Output files. File format is inferred from the file path ending. Available file formats are: " +
", ".join(["." + k for k in writers.FILE_SUFFIX_MAP]))
args = parser.parse_args(raw_args)
dataloader_kwargs = parse_json_file_str_or_arglist(args.dataloader_args, parser)
# setup the files
if not isinstance(args.output, list):
args.output = [args.output]
for o in args.output:
ending = o.split('.')[-1]
if ending not in writers.FILE_SUFFIX_MAP:
logger.error("File ending: {0} for file {1} not from {2}".
format(ending, o, writers.FILE_SUFFIX_MAP))
sys.exit(1)
dir_exists(os.path.dirname(o), logger)
# singularity_command
if args.singularity:
from kipoi.cli.singularity import singularity_command
logger.info("Running kipoi predict in the singularity container")
# Drop the singularity flag
raw_args = [x for x in raw_args if x != '--singularity']
singularity_command(['kipoi', command] + raw_args,
args.model,
dataloader_kwargs,
output_files=args.output,
source=args.source,
dry_run=False)
return None
# --------------------------------------------
# load model & dataloader
model = kipoi.get_model(args.model, args.source)
if args.dataloader is not None:
Dl = kipoi.get_dataloader_factory(args.dataloader, args.dataloader_source)
else:
Dl = model.default_dataloader
dataloader_kwargs = kipoi.pipeline.validate_kwargs(Dl, dataloader_kwargs)
dl = Dl(**dataloader_kwargs)
# setup batching
it = dl.batch_iter(batch_size=args.batch_size,
num_workers=args.num_workers)
# Setup the writers
use_writers = []
for output in args.output:
writer = writers.get_writer(output, metadata_schema=dl.get_output_schema().metadata)
if writer is None:
logger.error("Unknown file format: {0}".format(ending))
sys.exit()
else:
use_writers.append(writer)
output_writers = writers.MultipleBatchWriter(use_writers)
# Loop through the data, make predictions, save the output
for i, batch in enumerate(tqdm(it)):
# validate the data schema in the first iteration
if i == 0 and not Dl.get_output_schema().compatible_with_batch(batch):
logger.warning("First batch of data is not compatible with the dataloader schema.")
# make the prediction
if args.layer is None:
pred_batch = model.predict_on_batch(batch['inputs'])
else:
pred_batch = model.predict_activation_on_batch(batch['inputs'], layer=args.layer)
# write out the predictions, metadata (, inputs, targets)
output_batch = prepare_batch(batch, pred_batch, keep_inputs=args.keep_inputs)
output_writers.batch_write(output_batch)
output_writers.close()
logger.info('Done! Predictions stored in {0}'.format(",".join(args.output)))
def cli_pull(command, raw_args):
"""Pull the repository
"""
assert command == "pull"
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description="Downloads the directory" +
" associated with the model.")
parser.add_argument('model', help='Model name. '
'<model> can also refer to a model-group - e.g. if you '
'specify MaxEntScan then the dependencies\n'
'for MaxEntScan/5prime and MaxEntScan/3prime will be installed')
add_source(parser)
args = parser.parse_args(raw_args)
src = kipoi.config.get_source(args.source)
sub_models = list_subcomponents(args.model, args.source, which='model')
if len(sub_models) == 0:
logger.error("Model {0} not found in source {1}".format(args.model, args.source))
sys.exit(1)
if len(sub_models) > 1:
logger.info("Found {0} models under the model name: {1}. Pulling all of them".
format(len(sub_models), args.model))
for sub_model in sub_models:
src.pull_model(sub_model)
def cli_init(command, raw_args, **kwargs):
"""Initialize the repository using cookiecutter
"""
assert command == "init"
logger.info("Initializing a new Kipoi model")
print("\nPlease answer the questions below. Defaults are shown in square brackets.\n")
print("You might find the following links useful: ")
print("- getting started: http://www.kipoi.org/docs/contributing/01_Getting_started/")
print("- model_type: http://www.kipoi.org/docs/contributing/02_Writing_model.yaml/#type")
print("- dataloader_type: http://www.kipoi.org/docs/contributing/04_Writing_dataloader.py/#dataloader-types")
print("--------------------------------------------\n")
from cookiecutter.main import cookiecutter
from cookiecutter.exceptions import FailedHookException
# Get the information about the current directory
import inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
this_dir = os.path.dirname(os.path.abspath(filename))
template_path = os.path.join(this_dir, "../model_template/")
# remove the pyc files in the template directory
# bug in cookiecutter: https://github.com/audreyr/cookiecutter/pull/1037
pyc_file = os.path.join(template_path, "hooks", "pre_gen_project.pyc")
if os.path.exists(pyc_file):
os.remove(pyc_file)
# Create project from the cookiecutter-pypackage/ template
try:
out_dir = cookiecutter(template_path, **kwargs)
except FailedHookException:
# pre_gen_project.py detected an error in the configuration
logger.error("Failed to initialize the model")
sys.exit(1)
print("--------------------------------------------")
logger.info("Done!\nCreated the following folder into the current working directory: {0}".format(os.path.basename(out_dir)))
def cli_info(command, raw_args):
"""CLI interface to predict
"""
assert command == "info"
parser = argparse.ArgumentParser('kipoi {}'.format(command),
description="Prints dataloader" +
" keyword arguments.")
add_model(parser)
add_dataloader(parser, with_args=False)
args = parser.parse_args(raw_args)
# --------------------------------------------
# load model & dataloader
md = kipoi.get_model_descr(args.model, args.source)
src = kipoi.get_source(args.source)
# load the default dataloader
if isinstance(md.default_dataloader, kipoi.specs.DataLoaderImport):
with cd(src.get_model_dir(args.model)):
dl_descr = md.default_dataloader.get()
else:
| |
<reponame>jbeilstenedmands/cctbx_project
from __future__ import division
from xfel.ui.db import db_proxy
known_job_statuses = ["DONE", "ERR", "PEND", "RUN", "SUSP", "PSUSP", "SSUSP", "UNKWN", "EXIT", "DONE", "ZOMBI", "DELETED", "SUBMIT_FAIL", "SUBMITTED"]
finished_job_statuses = ["DONE", "EXIT", "DELETED", "UNKWN", "ERR", "SUBMIT_FAIL"]
class Job(db_proxy):
def __init__(self, app, job_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_job" % app.params.experiment_tag, id = job_id, **kwargs)
self.job_id = self.id
def get_log_path(self):
from xfel.ui.db import get_run_path
import os
run_path = str(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run))
return os.path.join(run_path, "stdout", "log.out")
def delete(self, output_only=False):
from xfel.ui.db import get_run_path
import os, shutil
if self.status not in finished_job_statuses:
print "Job is not finished (status = %s)"%self.status
return
if self.status == "DELETED":
return
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
if os.path.exists(job_folder):
print "Deleting job folder for job", self.id
shutil.rmtree(job_folder)
else:
print "Cannot find job folder (%s)"%job_folder
# Have to be careful to delete from the tables in the right order
tag = self.app.params.experiment_tag
def delete_and_commit(query):
cursor = self.app.execute_query(query, commit=True)
print "(%d)"%cursor.rowcount
print "Deleting cell_bin entries",
query = """DELETE cell_bin FROM `%s_cell_bin` cell_bin
JOIN `%s_crystal` crystal ON crystal.id = cell_bin.crystal_id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
ids = {}
for item in "crystal", "beam", "detector":
print "Listing %s ids"%item,
query = """SELECT %s.id FROM `%s_%s` %s
JOIN `%s_experiment` expr ON expr.%s_id = %s.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
item, tag, item, item, tag, item, item, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print "(%d)"%len(item_ids)
ids[item] = ",".join(item_ids)
if len(self.trial.isoforms) == 0:
print "Listing bin entries",
query = """SELECT bin.id FROM `%s_bin` bin
JOIN `%s_cell` cell ON bin.cell_id = cell.id
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id is NULL""" % (
tag, tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print "(%d)"%len(item_ids)
bin_ids = ",".join(item_ids)
print "Listing cell entries",
query = """SELECT cell.id FROM `%s_cell` cell
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id IS NULL""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print "(%d)"%len(item_ids)
cell_ids = ",".join(item_ids)
print "Deleting experiment entries",
query = """DELETE expr FROM `%s_experiment` expr
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
for item in "crystal", "beam", "detector":
if len(ids[item]) > 0:
print "Deleting %s entries"%item,
query = """DELETE %s FROM `%s_%s` %s
WHERE %s.id IN (%s)""" % (
item, tag, item, item, item, ids[item])
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(bin_ids) > 0:
print "Deleting bin entries",
query = """DELETE bin FROM `%s_bin` bin
WHERE bin.id IN (%s)""" % (
tag, bin_ids)
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(cell_ids) > 0:
print "Deleting cell entries",
query = """DELETE cell FROM `%s_cell` cell
WHERE cell.id IN (%s)""" % (
tag, cell_ids)
delete_and_commit(query)
print "Listing imageset entries",
query = """SELECT imgset.id FROM `%s_imageset` imgset
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print "(%d)"%len(item_ids)
imageset_ids = ",".join(item_ids)
print "Deleting imageset_event entries",
query = """DELETE is_e FROM `%s_imageset_event` is_e
JOIN `%s_event` evt ON evt.id = is_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
if len(imageset_ids) > 0:
print "Deleting imageset entries",
query = """DELETE imgset FROM `%s_imageset` imgset
WHERE imgset.id IN (%s)""" % (
tag, imageset_ids)
delete_and_commit(query)
print "Deleting event entries",
query = """DELETE evt FROM `%s_event` evt
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
self.status = "DELETED"
def remove_from_db(self):
assert self.status == "DELETED"
print "Removing job %d from the db"%self.id,
tag = self.app.params.experiment_tag
query = """DELETE job FROM `%s_job` job
WHERE job.id = %d""" % (
tag, self.id)
cursor = self.app.execute_query(query, commit=True)
print "(%d)"%cursor.rowcount
# Support classes and functions for job submission
class _job(object):
"""Used to represent a job that may not have been submitted into the cluseter or database yet"""
def __init__(self, trial, rungroup, run):
self.trial = trial
self.rungroup = rungroup
self.run = run
def __eq__(self, other):
return self.trial.id == other.trial_id and \
self.rungroup.id == other.rungroup_id and \
self.run.id == other.run_id
def submit_all_jobs(app):
runs = app.get_all_runs()
submitted_jobs = app.get_all_jobs()
trials = app.get_all_trials(only_active = True)
needed_jobs = []
for trial in trials:
for rungroup in trial.rungroups:
assert rungroup.active
rg_start = app.get_run(run_number=rungroup.startrun)
if rungroup.endrun is None:
# open ended run group
rg_runs = [r for r in runs if r.run >= rg_start.run]
else:
# closed run group
rg_end = app.get_run(run_number=rungroup.endrun)
rg_runs = [r for r in runs if r.run >= rg_start.run and r.run <= rg_end.run]
for run in rg_runs:
needed_jobs.append(_job(trial, rungroup, run))
all_jobs = [j for j in submitted_jobs] # shallow copy
for job in needed_jobs:
if job in submitted_jobs:
continue
print "Submitting job: trial %d, rungroup %d, run %d"%(job.trial.trial, job.rungroup.id, job.run.run)
j = app.create_job(trial_id = job.trial.id,
rungroup_id = job.rungroup.id,
run_id = job.run.id,
status = "SUBMITTED")
all_jobs.append(j)
try:
j.submission_id = submit_job(app, job)
except Exception as e:
print "Couldn't submit job:", str(e)
j.status = "SUBMIT_FAIL"
raise
def submit_job(app, job):
import os, libtbx.load_env
from xfel.ui import settings_dir
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
target_phil_path = os.path.join(configs_dir, "%s_%s_r%04d_t%03d_rg%03d_params.phil"%
(app.params.experiment, app.params.experiment_tag, job.run.run, job.trial.trial, job.rungroup.id))
dispatcher = app.params.dispatcher
phil_str = job.trial.target_phil_str
if job.rungroup.extra_phil_str is not None:
phil_str += "\n" + job.rungroup.extra_phil_str
from xfel.ui import known_dials_dispatchers
if dispatcher in known_dials_dispatchers:
import importlib
orig_phil_scope = importlib.import_module(known_dials_dispatchers[dispatcher]).phil_scope
from iotbx.phil import parse
if job.rungroup.two_theta_low is not None or job.rungroup.two_theta_high is not None:
override_str = """
radial_average {
enable = True
show_plots = False
verbose = False
output_bins = False
}
"""
phil_scope = orig_phil_scope.fetch(parse(override_str))
else:
phil_scope = orig_phil_scope
trial_params = phil_scope.fetch(parse(phil_str)).extract()
image_format = job.rungroup.format
assert image_format in ['cbf', 'pickle']
if image_format == 'cbf':
if "rayonix" in job.rungroup.detector_address.lower():
mode = "rayonix"
elif "cspad" in job.rungroup.detector_address.lower():
mode = "cspad"
elif "jungfrau" in job.rungroup.detector_address.lower():
mode = "jungfrau"
else:
assert False, "Couldn't figure out what kind of detector is specified by address %s"%job.rungroup.detector_address
if dispatcher == 'cctbx.xfel.xtc_process':
trial_params.format.file_format = image_format
trial_params.format.cbf.mode = mode
elif dispatcher == "cxi.xtc_process":
image_format = 'pickle'
else:
raise RuntimeError("Unknown dispatcher: %s"%dispatcher)
if job.rungroup.calib_dir is not None or job.rungroup.config_str is not None or dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
config_path = os.path.join(configs_dir, "%s_%s_r%04d_t%03d_rg%03d.cfg"%
(app.params.experiment, app.params.experiment_tag, job.run.run, job.trial.trial, job.rungroup.id))
else:
config_path = None
# Dictionary for formating the submit phil and, if used, the labelit cfg file
d = dict(
# Generally for the LABELIT backend or image pickles
address = job.rungroup.detector_address,
default_calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0"),
dark_avg_path = job.rungroup.dark_avg_path,
dark_stddev_path = job.rungroup.dark_stddev_path,
untrusted_pixel_mask_path = job.rungroup.untrusted_pixel_mask_path,
detz_parameter = job.rungroup.detz_parameter,
gain_map_path = job.rungroup.gain_map_path,
gain_mask_level = job.rungroup.gain_mask_level,
beamx = job.rungroup.beamx,
beamy = job.rungroup.beamy,
| |
<reponame>cutz-j/Python_photoshop<gh_stars>0
from tkinter import *
import os
import os.path
import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
import operator
import numpy as np
import struct
import threading
from wand.image import *
from wand.color import Color
from wand.drawing import Drawing
import sqlite3
import pymysql
import csv
import xlrd
from xlsxwriter import Workbook
import xlsxwriter
import matplotlib.pyplot as plt
import glob
import json
import tensorflow as tf
import pandas as pd
## 함수 선언
def loadImage(fname):
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, VIEW_X, VIEW_Y
fsize = os.path.getsize(fname)
inH = inW = int(math.sqrt(fsize))
inImage = np.zeros([inH, inW], dtype=np.int32)
fp = open(fname, 'rb')
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile():
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH, photo, gif, VIEW_X, VIEW_Y
filename = askopenfilename(parent=window,
filetypes=(("그림파일", "*.raw;*.gif;*.jpg;*.png;*.tif;*.bmp"), ("모든파일", "*.*")))
if pLabel != None:
pLabel.destroy()
if filename[-3:] != "raw":
gif = True
loadImage_gif(filename)
equal_gif()
return
else: gif = False
loadImage(filename) # 파일 -> 입력메모리
equal() # 입력메모리 -> 출력메모리
def display_geniune():
global VIEW_X, VIEW_Y
brt = askinteger('출력 비율을 정하세요. ', '64, 128, 256, 512, 1024..', minvalue=64, maxvalue=4096)
VIEW_X = VIEW_Y = brt
display_first()
def display():
global window, canvas, PLabel, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy
global VIEW_X, VIEW_Y
if gif == True:
display_gif()
return
if canvas != None:
canvas.destroy()
if pLabel != None:
pLabel.destroy()
if VIEW_X >= outW or VIEW_Y >= outH:
VIEW_X = outW
VIEW_Y = outH
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X * 2) + 'x' + str(VIEW_Y * 2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_Y/2), image=paper, state='normal')
def putPixel() :
for i in range(0, outH, step) :
for k in range(0, outW, step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (int(k/step), int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor=CENTER)
status.configure(text="이미지 정보: " + str(outW) + " X " + str(outH) + " / 출력 정보: " + str(VIEW_X) + " X " + str(VIEW_Y))
status.pack()
def display_first():
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy
global VIEW_X, VIEW_Y
if gif == True:
display_first_gif()
return
if canvas != None :
canvas.destroy()
if VIEW_X >= outW or VIEW_Y >= outH:
VIEW_X = outW
VIEW_Y = outH
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X * 2) + 'x' + str(VIEW_Y * 2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
paper_copy = paper.copy()
canvas.create_image((VIEW_X/2, VIEW_Y/2), image=paper, state='normal')
def putPixel() :
for i in range(0, outH, step) :
for k in range(0, outW, step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (int(k/step), int(i/step)))
paper_copy.put('#%02x%02x%02x' % (data, data, data), (int(k/step), int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor=CENTER)
status.configure(text="이미지 정보: " + str(outW) + " X " + str(outH) + " / 출력 정보: " + str(VIEW_X) + " X " + str(VIEW_Y))
status.pack()
def display_copy():
global window, canvas, pLabel, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy
global VIEW_X, VIEW_Y
if gif == True:
display_copy_gif()
return
if canvas != None :
canvas.destroy()
if VIEW_X >= outW or VIEW_Y >= outH:
VIEW_X = outW
VIEW_Y = outH
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y * 2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_Y/2), image=paper, state='normal')
canvas.pack(side=RIGHT)
photo = PhotoImage()
pLabel = Label(window, image=photo)
pLabel.pack(side=LEFT)
pLabel.configure(image=paper_copy)
def rollback():
if gif == True:
rollback_gif()
return
global window, canvas, paper, PLabel, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy
if pLabel != None:
pLabel.destroy()
loadImage(filename)
equal()
def equal():
if gif == True:
equal_gif()
return
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, photo
outW = inW
outH = inH
outImage = np.zeros([inW, inH], dtype=np.int32)
np.array(inImage)
for i in range(inH):
for j in range(inW):
outImage[i][j] = inImage[i][j]
display_first()
def saveFile():
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
draw = Drawing() # 빈 판
# 빈 판에 컬러 -> #000000 ~ #FFFFFF
saveFp = asksaveasfile(parent=window, mode='w', defaultextension='.png'
, filetypes=(("그림파일", "*.gif;*.jpg;*.png;*.tif;*.bmp"), ("모든파일", "*.*")))
for i in range(outW):
for j in range(outH):
dataR = outImage[i][j][0]
dataG = outImage[i][j][1]
dataB = outImage[i][j][2]
hexStr = '#'
if dataR > 15:
hexStr += hex(dataR)[2:]
else:
hexStr += ('0' + hex(dataR)[2:])
if dataG > 15:
hexStr += hex(dataG)[2:]
else:
hexStr += ('0' + hex(dataG)[2:])
if dataB > 15:
hexStr += hex(dataB)[2:]
else:
hexStr += ('0' + hex(dataB)[2:])
draw.fill_color = Color(hexStr)
draw.color(j, i, 'replace')
with Image(filename=filename) as img:
draw(img)
img.save(filename=saveFp.name)
print("SAVE OK")
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
def addImage(num):
if gif == True:
addImage_gif(num)
return
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
outW = inW
outH = inH
outImage = np.zeros([inH, inW], dtype=np.int32)
if num == 1:
brt = askinteger('밝게하기', '밝게할 값', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + brt > 255:
outImage[i][k] = 255
else:
outImage[i][k] = inImage[i][k] + brt
elif num == 2:
brt = askinteger('어둡게하기', '어둡게할 값', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] - brt < 0:
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] - brt
elif num == 3:
brt = askinteger('밝게하기', '곱할 값', minvalue=1, maxvalue=10)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] * brt > 255:
outImage[i][k] = 255
else:
outImage[i][k] = inImage[i][k] * brt
elif num == 4:
brt = askinteger('어둡게하기', '나눌 값', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = int(inImage[i][k] / brt)
elif num == 5: # AND
brt = askinteger('AND', '상수', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] & brt > 255:
outImage[i][k] = 255
elif inImage[i][k] & brt < 0:
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] & brt
elif num == 6: # OR
brt = askinteger('OR', '상수', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] | brt > 255:
outImage[i][k] = 255
elif inImage[i][k] | brt < 0:
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] | brt
elif num == 7: # XOR
brt = askinteger('OR', '상수', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k] ^ brt
if outImage[i][k] > 255: outImage[i][k] = 255
elif outImage[i][k] < 0: outImage[i][k] = 0
elif num == 8: # 반전
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = 255 - inImage[i][k]
elif num == 9: # 감마
brt = askfloat('감마', '소수', minvalue=0, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = int(inImage[i][k] * (1/brt))
if outImage[i][k] > 255: outImage[i][k] = 255
elif outImage[i][k] < 0: outImage[i][k] = 0
elif num == 10: # parabola(cap)
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = int(-255 * ((inImage[i][k] / 127 - 1) ** 2) + 255)
if outImage[i][k] > 255: outImage[i][k] = 255
elif outImage[i][k] < 0: outImage[i][k] = 0
elif num == 11: # parabola(cap)
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = int(255 * ((inImage[i][k] / 127 - 1) ** 2))
if outImage[i][k] > 255: outImage[i][k] = 255
elif outImage[i][k] < 0: outImage[i][k] = 0
elif num == 12: # binary
brt = askinteger('임계치', '정수(1~255)', minvalue=0, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] > brt: outImage[i][k] = 255
elif inImage[i][k] <= brt: outImage[i][k] = 0
elif num == 13: # 범위강조
brt1 = askinteger('첫 번째 범위 수', '정수(1~255)', minvalue=0, maxvalue=255)
brt2 = askinteger('두 번째 범위 수', '정수(1~255)', minvalue=0, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] > brt1 and inImage[i][k] < brt2:
outImage[i][k] = 255
display()
def a_average(num): # 입력 // 출력 평균
if gif == True:
a_average_gif(num)
return
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, photo
sumList1, sumList2 = [], []
if num == 2:
brt = askinteger('절사 수치', | |
import contextlib
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os.path
import pandas as pd
import seaborn as sb
import sys
from . import classifiers
from . import piquant_options as po
from . import resource_usage as ru
from . import statistics
from . import tpms as t
RESOURCE_USAGE_DIR = "resource_usage_graphs"
# Don't embed characters as paths when outputting SVG - assume fonts are
# installed on machine where SVG will be viewed (see
# http://matplotlib.org/users/customizing.html)
plt.rcParams['svg.fonttype'] = 'none'
# matplotlib parameters appropriate for poster output
# plt.rcParams['font.size'] = 16.0
# plt.rcParams['axes.labelsize'] = 'medium'
# plt.rcParams['xtick.labelsize'] = 'x-small'
# plt.rcParams['ytick.labelsize'] = 'x-small'
# plt.rcParams['legend.fontsize'] = 'small'
class _GroupedPlotInfo(object):
def __init__(self, group_mqr_option, fixed_mqr_option_info):
self.group_mqr_option = group_mqr_option
self.fixed_mqr_option_info = fixed_mqr_option_info
self.title = None
def get_filename_parts(
self, base_name, plotted, versus=None, ascending=None):
name_elements = [base_name, plotted]
if versus:
name_elements += ["vs", versus]
name_elements += ["per", self.group_mqr_option.title.lower()]
if ascending is not None:
name_elements.append("asc" if ascending else "desc")
name_elements += self.fixed_mqr_option_info
if ascending is not None:
name_elements.append("distribution")
return name_elements
def set_plot_title(self, plotted, versus=None):
title_elements = [plotted]
if versus:
title_elements += ["vs", _decapitalized(versus)]
title_elements += ["per", self.group_mqr_option.title.lower()]
title = " ".join(title_elements)
if len(self.fixed_mqr_option_info) > 0:
title += ": " + ", ".join(self.fixed_mqr_option_info)
self.title = title
@contextlib.contextmanager
def _saving_new_plot(fformat, file_name_elements):
plt.figure()
try:
yield
finally:
file_name = "_".join([str(el) for el in file_name_elements])
file_name = file_name.replace(' ', '_')
plt.savefig(file_name + "." + fformat, format=fformat)
plt.close()
def _capitalized(text):
return text[:1].upper() + text[1:]
def _decapitalized(text):
return text[:1].lower() + text[1:]
def _get_distribution_plot_ylabel(ascending):
return "Percentage of isoforms " + \
("less" if ascending else "greater") + " than threshold"
def _set_distribution_plot_bounds(xmin, xmax, ymin=None, ymax=None):
# unused arguments
del ymin
del ymax
xmargin = (xmax - xmin) / 40.0
plt.xlim(xmin=xmin - xmargin, xmax=xmax + xmargin)
plt.ylim(ymin=-2.5, ymax=102.5)
def _get_plot_bounds_setter(statistic):
def _set_statistic_plot_bounds(xmin, xmax, ymin, ymax):
xmargin = 2 * (xmax - xmin) / 100.0
plt.xlim(xmin=xmin - xmargin, xmax=xmax + xmargin)
stat_range = statistic.stat_range((ymin, ymax))
if stat_range is not None:
min_val = stat_range[0]
max_val = stat_range[1]
if min_val is not None:
plt.ylim(ymin=min_val)
if max_val is not None:
plt.ylim(ymax=max_val)
return _set_statistic_plot_bounds
def _set_ticks_for_classifier_plot(locations, classifier):
plt.xticks(locations, classifier.get_value_labels(len(locations)))
def _get_group_mqr_option_values(stats_df, group_mqr_option):
group_mqr_option_vals = \
stats_df[group_mqr_option.name].value_counts().index.tolist()
group_mqr_option_vals.sort()
return group_mqr_option_vals
def _plot_grouped_statistic(
stats_df, plot_info, xcol, ycol, xlabel, ylabel,
plot_bounds_setter):
group_mqr_option_vals = _get_group_mqr_option_values(
stats_df, plot_info.group_mqr_option)
xmin = ymin = sys.maxsize
xmax = ymax = -sys.maxsize - 1
for group_mqr_option_value in group_mqr_option_vals:
group_stats = stats_df[
stats_df[plot_info.group_mqr_option.name] == group_mqr_option_value]
group_stats.sort(columns=xcol, axis=0, inplace=True)
xvals = group_stats[xcol]
yvals = group_stats[ycol]
plt.plot(xvals, yvals, '-o',
label=plot_info.group_mqr_option.get_value_name(
group_mqr_option_value))
group_ymin = yvals.min()
if group_ymin < ymin:
ymin = group_ymin
group_ymax = yvals.max()
if group_ymax > ymax:
ymax = group_ymax
group_xmin = xvals.min()
if group_xmin < xmin:
xmin = group_xmin
group_xmax = xvals.max()
if group_xmax > xmax:
xmax = group_xmax
plot_bounds_setter(xmin, xmax, ymin, ymax)
plt.xlabel(_capitalized(xlabel))
plt.ylabel(_capitalized(ylabel))
plt.legend(title=plot_info.group_mqr_option.title, loc=4)
plt.suptitle(plot_info.title)
return (ymin, ymax)
def _plot_grouped_stat_vs_mqr_opt(
fformat, stats, base_name, statistic, group_mqr_option,
varying_mqr_option, fixed_mqr_option_values):
fixed_mqr_option_info = po.get_value_names(fixed_mqr_option_values)
plot_info = _GroupedPlotInfo(group_mqr_option, fixed_mqr_option_info)
name_elements = plot_info.get_filename_parts(
base_name, statistic.name, versus=varying_mqr_option.name)
with _saving_new_plot(fformat, name_elements):
plot_info.set_plot_title(
statistic.title, versus=varying_mqr_option.title)
_plot_grouped_statistic(
stats, plot_info, varying_mqr_option.name, statistic.name,
varying_mqr_option.get_axis_label(), statistic.get_axis_label(),
_get_plot_bounds_setter(statistic))
def _plot_grouped_stat_vs_clsfr(
fformat, stats, base_name, statistic, group_mqr_option,
classifier, fixed_mqr_option_values):
clsfr_col = classifier.get_column_name()
fixed_mqr_option_info = po.get_value_names(fixed_mqr_option_values)
plot_info = _GroupedPlotInfo(group_mqr_option, fixed_mqr_option_info)
name_elements = plot_info.get_filename_parts(
base_name, statistic.name, versus=clsfr_col)
with _saving_new_plot(fformat, name_elements):
plot_info.set_plot_title(
statistic.title, versus=classifier.get_plot_title())
_plot_grouped_statistic(
stats, plot_info, clsfr_col, statistic.name,
classifier.get_axis_label(), statistic.get_axis_label(),
_get_plot_bounds_setter(statistic))
min_xval = stats[clsfr_col].min()
max_xval = stats[clsfr_col].max()
_set_ticks_for_classifier_plot(
np.arange(min_xval, max_xval + 1), classifier)
def _plot_grouped_cumulative_dist(
fformat, stats, base_name, group_mqr_option,
classifier, ascending, fixed_mqr_option_values):
clsfr_col = classifier.get_column_name()
fixed_mqr_option_info = po.get_value_names(fixed_mqr_option_values)
plot_info = _GroupedPlotInfo(group_mqr_option, fixed_mqr_option_info)
name_elements = plot_info.get_filename_parts(
base_name, clsfr_col, ascending=ascending)
with _saving_new_plot(fformat, name_elements):
plot_info.set_plot_title(
_capitalized(clsfr_col) + " threshold")
_plot_grouped_statistic(
stats, plot_info, clsfr_col, t.TRUE_POSITIVE_PERCENTAGE,
clsfr_col, _get_distribution_plot_ylabel(ascending),
_set_distribution_plot_bounds)
def _draw_prequant_time_usage_graph(fformat, graph_file_basename, usage_data):
with _saving_new_plot(fformat, [graph_file_basename, "time_usage"]):
n_groups = len(usage_data.index)
index = np.arange(n_groups)
time_usage_stats = ru.get_time_usage_statistics()
gap_width = 0.1
bar_width = (1 - gap_width) / len(time_usage_stats)
dummy, axes = plt.subplots()
color_cycle = axes._get_lines.color_cycle
for i, usage_stat in enumerate(time_usage_stats):
plt.bar(index + i * bar_width,
usage_data[usage_stat.name].values,
bar_width, color=color_cycle.next(),
label=_capitalized(usage_stat.name.replace('-', ' ')))
plt.xlabel('Quantification method')
plt.ylabel('Log10 total time (s)')
plt.title('Time taken for prequantification')
plt.xticks(index + ((1 - gap_width) / 2),
usage_data["quant_method"].values)
box = axes.get_position()
axes.set_position([box.x0, box.y0, box.width * 0.9, box.height])
axes.legend(loc=6, bbox_to_anchor=(1, 0.5))
def _draw_prequant_mem_usage_graph(fformat, graph_file_basename, usage_data):
with _saving_new_plot(fformat, [graph_file_basename, "memory_usage"]):
n_groups = len(usage_data.index)
index = np.arange(n_groups)
mem_usage_stat = ru.get_memory_usage_statistics()[0]
bar_width = 0.9
dummy, axes = plt.subplots()
color_cycle = axes._get_lines.color_cycle
plt.bar(index,
usage_data[mem_usage_stat.name].values,
bar_width, color=color_cycle.next())
plt.xlabel('Quantification method')
plt.ylabel('Resident memory (Gb)')
plt.title('Maximum resident memory during prequantification')
plt.xticks(index + bar_width / 2,
usage_data["quant_method"].values)
def log_tpm_scatter_plot(
fformat, tpms, base_name, tpm_label, not_present_cutoff):
with _saving_new_plot(fformat, [base_name, tpm_label, "log10 scatter"]):
plt.scatter(tpms[t.LOG10_REAL_TPM].values,
tpms[t.LOG10_CALCULATED_TPM].values,
c="lightblue", alpha=0.4)
plt.suptitle("Scatter plot of log calculated vs real TPMs: " +
tpm_label)
plt.xlabel("Log10 real TPM")
plt.ylabel("Log10 calculated TPM")
min_val = np.log10(not_present_cutoff) - 0.2
plt.xlim(xmin=min_val)
plt.ylim(ymin=min_val)
def log_ratio_boxplot(
fformat, tpms, base_name, tpm_label, classifier, threshold):
grouping_column = classifier.get_column_name()
grouped_tpms = tpms.groupby(grouping_column)
tpms = grouped_tpms.filter(
lambda x: len(x[t.REAL_TPM]) > threshold)
with _saving_new_plot(
fformat, [base_name, grouping_column, tpm_label, "boxplot"]):
sb.boxplot(tpms[t.LOG10_RATIO], groupby=tpms[grouping_column],
sym='', color='lightblue')
plt.suptitle("Log ratios of calculated to real TPMs: " + tpm_label)
plt.xlabel(_capitalized(grouping_column))
plt.ylabel("Log ratio (calculated/real TPM)")
_set_ticks_for_classifier_plot(plt.xticks()[0], classifier)
def plot_statistic_vs_classifier(
fformat, stats, base_name, statistic, classifier, threshold):
stats = stats[stats[statistics.TP_NUM_TPMS] > threshold]
clsfr_col = classifier.get_column_name()
with _saving_new_plot(fformat, [base_name, statistic.name, "vs", clsfr_col]):
xvals = stats[clsfr_col]
min_xval = xvals.min()
max_xval = xvals.max()
yvals = stats[statistic.name]
plt.plot(xvals, yvals, '-o')
_get_plot_bounds_setter(statistic)(
min_xval, max_xval, yvals.min(), yvals.max())
plt.xlabel(_capitalized(classifier.get_axis_label()))
plt.ylabel(statistic.title)
plt.suptitle(statistic.title + " vs " + _decapitalized(clsfr_col))
_set_ticks_for_classifier_plot(
np.arange(min_xval, max_xval + 1), classifier)
def plot_transcript_cumul_dist(
fformat, tpms, base_name, tpm_label, classifier, ascending):
clsfr_col = classifier.get_column_name()
with _saving_new_plot(
fformat, [base_name, clsfr_col, tpm_label,
("asc" if ascending else "desc"), "distribution"]):
xvals, yvals = t.get_distribution(tpms, classifier, ascending)
plt.plot(xvals, yvals, '-o')
_set_distribution_plot_bounds(xvals[0], xvals[-1])
plt.xlabel(_capitalized(clsfr_col))
plt.ylabel(_get_distribution_plot_ylabel(ascending))
plt.suptitle(_capitalized(clsfr_col) + " threshold: " + tpm_label)
# Making plots over multiple sets of sequencing and quantification run options
def _get_plot_subdir(parent_dir, *sub_dir_name_elems):
sub_dir_name = "_".join(sub_dir_name_elems).replace(' ', '_')
sub_dir = os.path.join(parent_dir, sub_dir_name)
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
return sub_dir
def stats_graphs_vs_num_opt_drawer(
plot_dir, fformat, grp_option, num_option, stats, plot_file_prefix):
num_opt_dir = _get_plot_subdir(plot_dir, "by", num_option.name)
def drawer(df, fixed_option_values):
for stat in stats:
stat_dir = _get_plot_subdir(num_opt_dir, stat.name)
graph_file_basename = os.path.join(stat_dir, plot_file_prefix)
_plot_grouped_stat_vs_mqr_opt(
fformat, df, graph_file_basename, stat, grp_option,
num_option, fixed_option_values)
return drawer
def _draw_stats_graphs(
fformat, stats_dir, sub_dir, data_frame, opt_vals_set,
stats, plot_file_prefix):
main_plot_dir = _get_plot_subdir(stats_dir, sub_dir)
for option in opt_vals_set.get_non_degenerate_options():
nondeg_numerical_opts = opt_vals_set.get_non_degenerate_options(
numeric_only=True, opts_to_remove=option)
if len(nondeg_numerical_opts) == 0:
continue
option_plot_dir = _get_plot_subdir(main_plot_dir, "per", option.name)
for num_opt in nondeg_numerical_opts:
opt_vals_set.exec_for_fixed_option_values_sets(
stats_graphs_vs_num_opt_drawer(
option_plot_dir, fformat, option, num_opt,
stats, plot_file_prefix),
[option, num_opt], data_frame)
def draw_quant_res_usage_graphs(
fformat, stats_dir, usage_data, opt_vals_set):
_draw_stats_graphs(
fformat, stats_dir, RESOURCE_USAGE_DIR, usage_data, opt_vals_set,
ru.get_resource_usage_statistics(), statistics.OVERALL_STATS_PREFIX)
def draw_prequant_res_usage_graphs(fformat, stats_dir, usage_data):
plot_dir = _get_plot_subdir(stats_dir, RESOURCE_USAGE_DIR)
graph_file_basename = os.path.join(plot_dir, "prequant")
_draw_prequant_time_usage_graph(fformat, graph_file_basename, usage_data)
_draw_prequant_mem_usage_graph(fformat, graph_file_basename, usage_data)
def draw_overall_stats_graphs(
fformat, stats_dir, overall_stats, opt_vals_set, tpm_level):
# Draw graphs derived from statistics calculated for the whole set of TPMs.
# e.g. the Spearman correlation of calculated and real TPMs graphed as
# read-depth varies, for each quantification method, in the case of
# paired-end reads with errors and bias.
sub_dir = "overall_{l}_stats_graphs".format(l=tpm_level)
_draw_stats_graphs(
fformat, stats_dir, sub_dir, overall_stats, opt_vals_set,
statistics.get_graphable_statistics(), "usage")
def grouped_stats_graph_drawer(
plot_dir, fformat, grp_option, clsfr, num_tpms_filter):
option_stats_dir = _get_plot_subdir(plot_dir, "per", grp_option.name)
def drawer(df, fixed_option_values):
for stat in statistics.get_graphable_statistics():
statistic_dir = _get_plot_subdir(option_stats_dir, stat.name)
graph_file_basename = os.path.join(statistic_dir, "grouped")
filtered_stats_df = df[num_tpms_filter(df)]
_plot_grouped_stat_vs_clsfr(
fformat, filtered_stats_df, graph_file_basename,
stat, grp_option, clsfr, fixed_option_values)
return drawer
def draw_grouped_stats_graphs(fformat, stats_dir, opt_vals_set, threshold):
# Draw graphs derived from statistics calculated on groups of TPMs that
# have been stratified into sets based on some classifier of transcripts.
# e.g. the median percentage error of calculated vs real TPMs graphed as
# the percentage of unique sequence per-transcript varies, for single and
# paired-end reads, in the case of reads with errors and bias, and a
# particular quantification method.
grouped_stats_dir = _get_plot_subdir(stats_dir, "grouped_stats_graphs")
num_tpms_filter = lambda x: x[statistics.TP_NUM_TPMS] > threshold
clsfrs = classifiers.get_classifiers()
grp_clsfrs = [c for c in clsfrs if c.produces_grouped_stats()]
for clsfr in grp_clsfrs:
stats_file = statistics.get_stats_file(
stats_dir, statistics.OVERALL_STATS_PREFIX, t.TRANSCRIPT, clsfr)
clsfr_stats = pd.read_csv(stats_file)
clsfr_dir = _get_plot_subdir(
grouped_stats_dir, "grouped_by", clsfr.get_column_name())
for option in opt_vals_set.get_non_degenerate_options():
opt_vals_set.exec_for_fixed_option_values_sets(
grouped_stats_graph_drawer(
clsfr_dir, fformat, option, clsfr, num_tpms_filter),
option, clsfr_stats)
def distribution_stats_graph_drawer(
plot_dir, fformat, grp_option, clsfr, asc):
option_stats_dir = _get_plot_subdir(plot_dir, "per", grp_option.name)
graph_file_basename = os.path.join(option_stats_dir, "distribution")
def drawer(df, fixed_option_values):
_plot_grouped_cumulative_dist(
fformat, df, graph_file_basename, grp_option,
clsfr, asc, fixed_option_values)
return drawer
def draw_distribution_graphs(fformat, stats_dir, opt_vals_set):
# Draw distributions illustrating the percentage of TPMs above or below
# some threshold as that threshold changes. e.g. the percentage of TPMs
# whose absolute percentage error | |
<reponame>davidalvarezdlt/master_thesis
import math
import random
import cv2
import numpy as np
import skimage.metrics
import skimage.transform
import torch
import torch.nn.functional as F
class FlowsUtils:
"""Utilities class containing flow-related methods."""
@staticmethod
def flow_abs_to_relative(flow):
"""Given a normalized flow between [-1, 1], returns the relative
flow between [-2, 2].
Args:
flow: Tensor of size ``(B,F,H,W,2)`` containing absolute flows.
Returns:
Tensor of size ``(B,F,H,W,2)`` containing relative flows.
"""
b, f, h, w, _ = flow.size()
flow_pos_identity = F.affine_grid(
torch.tensor([[1.0, 0, 0], [0, 1.0, 0]]).unsqueeze(0),
[1, 1, h, w],
align_corners=True,
).view(1, 1, h, w, 2)
return flow - flow_pos_identity.repeat(b, f, 1, 1, 1)
@staticmethod
def flow_relative_to_abs(flow_rel):
"""Given a relative flow between [-2, 2], returns the absolute flow
between [-1, 1]
Args:
flow_rel: Tensor of size ``(B,F,H,W,2)`` containing relative flows.
Returns:
Tensor of size ``(B,F,H,W,2)`` containing absolute flows.
"""
b, f, h, w, _ = flow_rel.size()
flow_pos_identity = F.affine_grid(
torch.tensor([[1.0, 0, 0], [0, 1.0, 0]]).unsqueeze(0),
[1, 1, h, w],
align_corners=True,
).view(1, 1, h, w, 2)
return flow_rel + flow_pos_identity.repeat(b, f, 1, 1, 1)
@staticmethod
def crop_flow(flow, crop_size, crop_position):
"""Cuts an absolute flow between at the position ``crop_position``.
Args:
flow: Tensor of size ``(B,F,H,W,2)`` containing absolute flows.
crop_size: Tuple containing the size of the cropped flow.
crop_position: Tuple containing the position of the cropped flow.
Returns:
Tensor of size ``(B,F,H',W',2)`` containing the cropped flow.
"""
b, f, h, w, _ = flow.size()
flow_rel = FlowsUtils.flow_abs_to_relative(flow)
flow_h_from, flow_h_to = crop_position[0], \
crop_position[0] + crop_size[0]
flow_w_from, flow_w_to = crop_position[1], \
crop_position[1] + crop_size[1]
flow_rel_cut = \
flow_rel[:, :, flow_h_from: flow_h_to, flow_w_from: flow_w_to]
flow_rel_cut[:, :, :, :, 0] *= w / crop_size[1]
flow_rel_cut[:, :, :, :, 1] *= h / crop_size[0]
return FlowsUtils.flow_relative_to_abs(flow_rel_cut)
@staticmethod
def align_set(x, v, flow):
"""Aligns the images ``x`` and ``v`` using the flow given in ``flow``.
Args:
x: Tensor of size ``(B,C,F,H,W)`` containing masked background
frames.
v: Tensor of size ``(B,1,F,H,W)`` containing visibility maps.
flow: Tensor of size ``(B,F,H,W,2)`` containing the flows.
Returns:
Tuple of two positions containing:
- The masked background frames ``x`` aligned using ``flow``.
- The visibility maps ``v`` aligned using ``flow``.
"""
b, c, f, h, w = x.size()
x_aligned = F.grid_sample(
x.transpose(1, 2).reshape(-1, c, h, w),
flow.reshape(-1, h, w, 2),
align_corners=True,
).reshape(b, -1, 3, h, w).transpose(1, 2)
v_aligned = F.grid_sample(
v.transpose(1, 2).reshape(-1, 1, h, w),
flow.reshape(-1, h, w, 2),
align_corners=True,
mode='nearest',
).reshape(b, -1, 1, h, w).transpose(1, 2)
return x_aligned, v_aligned
@staticmethod
def resize_flow(flow, size, mode='nearest'):
"""Resizes a flow to a new resolution given by ``size``.
Args:
flow: Tensor of size ``(B,F,H,W,2)`` containing the flow in the
original resolution.
size: Tuple containing the new height and width of the flow.
mode: Mode used to resize the flow. Same format as in
``torch.nn.functional.interpolate()``.
Returns:
Tensor of size ``(B,F,h_new,w_new,2)`` containing the flow in the
new resolution.
"""
b, f, h, w, _ = flow.size()
flow_resized = F.interpolate(
flow.reshape(b * f, h, w, 2).permute(0, 3, 1, 2), size, mode=mode
)
return flow_resized.reshape(b, f, 2, size[0], size[1]) \
.permute(0, 1, 3, 4, 2)
class LossesUtils:
"""Utilities class containing loss-related methods."""
_GRAD_H = torch.tensor(
[[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=torch.float32
).unsqueeze(0).unsqueeze(0).repeat((3, 1, 1, 1))
_GRAD_V = torch.tensor(
[[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float32
).unsqueeze(0).unsqueeze(0).repeat((3, 1, 1, 1))
@staticmethod
def masked_l1(y_hat, y, mask, batch_mask=None, reduction='mean', weight=1):
"""Computes the L1 loss of the image ``y_hat`` with respect to the
ground truth ``y`` on those positions which are not masked by ``mask``.
Args:
y_hat: Tensor of size ``(B,C,H,W,*)`` containing the estimated
image.
y: Tensor of size ``(B,C,H,W,*)`` containing the ground-truth
image.
mask: Tensor of size ``(B,1,H,W,*)`` containing the mask.
batch_mask: Tensor of the same size as ``--batch_size`` indicating
if that element should be taken into account to compute the
loss.
reduction: Reduction mode applied to the loss.
weight: Scaling factor applied to the loss.
Returns:
Masked L1 loss between ``y_hat`` and ``y``.
"""
if batch_mask is not None and not any(batch_mask):
return torch.zeros(1).to(y_hat.device)
elif batch_mask is not None:
y_hat, y, mask = (
y_hat[batch_mask],
y[batch_mask],
mask[batch_mask],
)
masked_l1_loss = F.l1_loss(y_hat * mask, y * mask, reduction=reduction)
return weight * masked_l1_loss / (
torch.sum(mask) + 1e-9 if reduction == 'sum' else 1
)
@staticmethod
def perceptual(y_hat, y, model_vgg, weight=1):
"""Computes the perceptual loss of the image ``y_hat`` with respect
to the ground truth ``y``.
Args:
y_hat: Tensor of size ``(B,C,H,W)`` containing the estimated image.
y: Tensor of size ``(B,C,H,W)`` containing the ground-truth image.
weight: Scaling factor applied to the loss.
Returns:
Perceptual loss between ``y_hat`` and ``y``.
"""
input_vgg = model_vgg(y_hat.contiguous())
target_vgg = model_vgg(y.contiguous())
loss_perceptual = 0
for p in range(len(input_vgg)):
loss_perceptual += F.l1_loss(input_vgg[p], target_vgg[p])
return loss_perceptual * weight / len(input_vgg), input_vgg, target_vgg
@staticmethod
def grad(y_hat, y, reduction, weight=1):
"""Computes the gradient loss of the image ``y_hat`` with respect
to the ground truth ``y``.
Args:
y_hat: Tensor of size ``(B,C,H,W)`` containing the estimated image.
y: Tensor of size ``(B,C,H,W)`` containing the ground-truth image.
reduction: Reduction mode applied to the loss.
weight: Scaling factor applied to the loss.
Returns:
Gradient loss between ``y_hat`` and ``y``.
"""
if y_hat.device != LossesUtils._GRAD_H.device:
LossesUtils._GRAD_H = LossesUtils._GRAD_H.to(y_hat.device)
LossesUtils._GRAD_V = LossesUtils._GRAD_V.to(y_hat.device)
input_grads = torch.cat((
F.conv2d(y_hat, padding=1, weight=LossesUtils._GRAD_H, groups=3),
F.conv2d(y_hat, padding=1, weight=LossesUtils._GRAD_V, groups=3)
), dim=1)
target_grads = torch.cat((
F.conv2d(y, padding=1, weight=LossesUtils._GRAD_H, groups=3),
F.conv2d(y, padding=1, weight=LossesUtils._GRAD_V, groups=3)
), dim=1)
mask = torch.ones_like(input_grads).to(y_hat.device)
return LossesUtils.masked_l1(
input_grads, target_grads, mask, batch_mask=None,
reduction=reduction, weight=weight,
)
class MovementsUtils:
"""Utilities class containing movement-related methods.
Args:
max_displacement: Number indicating the maximum displacement applied to
an image.
max_scaling: Number indicating the maximum scaling applied to an image.
max_rotation: Number indicating the maximum rotation applied to an
image.
"""
def __init__(self, max_displacement, max_scaling, max_rotation):
self.max_displacement = max_displacement
self.max_scaling = max_scaling
self.max_rotation = max_rotation
def random_affine(self):
"""Returns a random affine transformation matrix.
Returns:
Tensor of size ``(3,3)`` containing a randomly-generated affine
transformation matrix.
"""
tx, ty = np.random.randint(
low=-self.max_displacement, high=self.max_displacement, size=2
) if self.max_displacement > 0 else (0, 0)
sx, sy = np.random.uniform(
low=1 - self.max_scaling, high=1 + self.max_scaling, size=2
)
rot = np.random.uniform(low=-self.max_rotation, high=self.max_rotation)
affine_matrix = skimage.transform.AffineTransform(
translation=(tx, ty), scale=(sx, sy), rotation=rot
).params
return torch.from_numpy(affine_matrix).float()
def simulate_movement(self, x, n, affine_matrices=None):
"""Simulates a moving sequence of ``n` frames using ``frame`` as
starting point.
Args:
x: Tensor of size ``(C,H,W)`` containing the first frame.
n: Number of frames of the sequence.
affine_matrices: Tensor of size ``(n,3,3)`` containing the
transformations to apply, or ``None``.
Returns:
Tensor of size ``(C,F,H,W)`` containing the moving sequence.
"""
c, h, w = x.size()
if affine_matrices is None:
affine_matrices = [self.random_affine() for _ in range(n - 1)]
affine_matrices = (
affine_matrices[: n // 2]
+ [MovementsUtils.identity_affine()]
+ affine_matrices[n // 2:]
)
affine_matrices_inv = [
MovementsUtils.affine_inverse(affine_mat) for affine_mat in
affine_matrices
]
affine_matrices_s, affine_matrices_inv = torch.stack(
affine_matrices
), torch.stack(affine_matrices_inv)
affine_matrices_s = MovementsUtils.stack_transformations(
affine_matrices_s, t=n // 2
)
affine_matrices_inv = MovementsUtils.stack_transformations(
affine_matrices_inv, t=n // 2
)
affine_matrices_theta = torch.stack(
[MovementsUtils.affine2theta(ra, h, w) for ra in affine_matrices_s]
)
affine_matrices_inv_theta = torch.stack(
[MovementsUtils.affine2theta(ra, h, w) for ra in
affine_matrices_inv]
)
flow = F.affine_grid(
affine_matrices_theta, [n, c, h, w], align_corners=True
)
flow_inv = F.affine_grid(
affine_matrices_inv_theta, [n, c, h, w], align_corners=True
)
y = F.grid_sample(
x.unsqueeze(0).repeat(n, 1, 1, 1), flow, align_corners=True
)
return y.permute(1, 0, 2, 3), flow_inv, affine_matrices
@staticmethod
def identity_affine():
"""Returns the identity transformation matrix.
Returns:
Tensor of size ``(3,3)`` containing the identity transformation
matrix.
"""
affine_matrix = np.linalg.inv(skimage.transform.AffineTransform(
translation=(0, 0), scale=(1, 1), rotation=0
).params)
return torch.from_numpy(affine_matrix).float()
@staticmethod
def affine_inverse(affine):
"""Returns the inverse of a transformation matrix.
Args:
affine: Tensor of size ``(3,3)`` containing a transformation
matrix.
Returns:
Tensor of size ``(3,3)`` containing a the inverse transformation
matrix.
"""
return torch.from_numpy(np.linalg.inv(affine))
@staticmethod
def stack_transformations(affine_matrices, t):
"""Stacks a set of single transformations.
Given a set of ``F`` independent affine matrices and the target frame
at position ``t``, it computes the transformation required to move from
position ``t`` to ``[..., t-1, t-2, t+1, t+2, ...]``.
Args:
affine_matrices: Tensor of size ``(F,3,3)`` containing the
transformations to stack.
t: Index of the target frame.
Returns:
Tensor of size ``(F,3,3)`` | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from dices import *
#-------------------------------------------------------------------------
# Отряды
metadict_squads = {}
#----
# Тестовые
metadict_squads['характеристики правителей'] = {
# Влом делать отдельно, так рольнём:
# 1 герой 5+ lvl на 16 героев 1 lvl
# 1 герой 7+ lvl на 62 героев 1 lvl
# 1 герой 9+ lvl на 250 героев 1 lvl
# Дер-Кето
# Cleric 1 lvl (city maatcarian-acolyte) sum:104 STR:16 DEX:18 CON:17 INT:17 WIS:19 CHA:17
#'Cleric 1 lvl (city maatcarian-acolyte)':250,
# Тинв:
# Wizard 2 lvl (city cat-weaver) sum:101 STR:14 DEX:19 CON:17 INT:19 WIS:16 CHA:16
#'Wizard 2 lvl (city cat-weaver)':250,
# Ри:
# Rogue 1 lvl (city cat-nyamo) sum:97 STR:12 DEX:20 CON:16 INT:17 WIS:15 CHA:17
#'Rogue 1 lvl (city cat-nyamo)':250,
# Акхен:
# Druid 1 lvl (otherworld terian-forester) sum:101 STR:12 DEX:19 CON:18 INT:18 WIS:19 CHA:15
#'Druid 1 lvl (otherworld terian-forester)':250,
# Кумар:
# Monk 1 lvl (city windsong-apprentice) sum:104 STR:17 DEX:19 CON:17 INT:16 WIS:18 CHA:17
#'Monk 1 lvl (city windsong-apprentice)':250,
# Карагос:
# Barbarian 1 lvl (thracian slayer-dogface) sum:108 STR:19 DEX:18 CON:19 INT:18 WIS:16 CHA:18
#'Barbarian 1 lvl (thracian slayer-dogface)':250,
# Накиями:
# Ranger 1 lvl (otherworld wanderer-scout) sum:101 STR:15 DEX:18 CON:15 INT:18 WIS:17 CHA:18
#'Ranger 1 lvl (otherworld wanderer-scout)':250,
# Крассиус:
# Bard 1 lvl (otherworld singer-follower) sum:95 STR:15 DEX:18 CON:10 INT:16 WIS:17 CHA:19
#'Bard 1 lvl (otherworld singer-follower)':250,
# Руна:
# Paladin 1 lvl (city sentry-sefet) sum:100 STR:18 DEX:16 CON:17 INT:17 WIS:15 CHA:17
#'Paladin 1 lvl (city sentry-sefet)':250,
# Чара:
# Warlock 1 lvl (otherworld seeker-follower) sum:97 STR:15 DEX:18 CON:16 INT:15 WIS:15 CHA:18
# Ашера:
# Empyrean (CR 23) sum:151 STR:30 DEX:21 CON:30 INT:21 WIS:22 CHA:27
# Менон:
# Wizard 1 lvl (otherworld mage-disciple) sum:100 STR:16 DEX:17 CON:17 INT:19 WIS:17 CHA:14
#'Wizard 1 lvl (otherworld mage-disciple)':1000,
# Кирос:
# Fighter 1 lvl (legionary sentinel-battler) sum:103 STR:19 DEX:17 CON:18 INT:16 WIS:16 CHA:17
#'Fighter 1 lvl (legionary sentinel-battler)':2000,
# Чёрные флаги Ост-Индии:
# 1 герой 5-6 lvl на 16 героев 1-2 lvl
# 1 герой 7-8 lvl на 62 героев 1-2 lvl
# 1 герой 9+ lvl на 250 героев 1 lvl
# Салиф:
# Wizard 1 lvl (otherworld mage-disciple) sum:94 STR:13 DEX:17 CON:16 INT:19 WIS:14 CHA:15
#'Wizard 1 lvl (otherworld mage-disciple)':250,
# Намулис:
# Monk 1 lvl (city windsong-apprentice) sum:97 STR:16 DEX:19 CON:17 INT:14 WIS:19 CHA:12
#'Monk 1 lvl (city windsong-apprentice)':250,
# Зуахир:
# Monk 1 lvl (city windsong-apprentice) sum:97 STR:16 DEX:17 CON:17 INT:14 WIS:17 CHA:16
#'Monk 1 lvl (city windsong-apprentice)':250,
# <NAME>:
# Monk 1 lvl (city windsong-apprentice) sum:96 STR:17 DEX:19 CON:18 INT:12 WIS:18 CHA:12
#'Monk 1 lvl (city windsong-apprentice)':250,
# Ген<NAME>:
# Fighter 1 lvl (legionary slayer-rookie) sum:100 STR:19 DEX:18 CON:19 INT:15 WIS:12 CHA:17
#'Fighter 1 lvl (legionary slayer-rookie)':250,
}
metadict_squads['характеристики героев'] = {
# Влом делать отдельно, так рольнём:
# 1 герой 5+ lvl на 16 героев 1 lvl
# 1 герой 7+ lvl на 62 героев 1 lvl
# 1 герой 9+ lvl на 250 героев 1 lvl
# <NAME>:
# Bard 1 lvl (otherworld singer-follower) sum:85 STR:10 DEX:16 CON:15 INT:14 WIS:14 CHA:16
#'Bard 1 lvl (otherworld singer-follower)':32,
}
#----
# Тестовые (осадное вооружение)
metadict_squads['10 onagers (siege)'] = {
# Онагры, катапульты
'Warrior 2 lvl (siege engineer-apprentice) (onager-siege)':10,
'Warrior 4 lvl (siege engineer-master)':1,
}
metadict_squads['10 onagers (fire)'] = {
'Warrior 2 lvl (siege engineer-apprentice) (onager-fire)':10,
'Warrior 4 lvl (siege engineer-master)':1,
}
metadict_squads['Company-regular (осадные инженеры)'] = {
# С двуручными кирками, Greataxe
'Warrior 1 lvl (legionary infantry-siege)':80 + dice_throw('1d12'),
'Warrior 2 lvl (legionary infantry-siege-veteran)':10,
'Warrior 3 lvl (legionary infantry-siege-corporal)':3,
}
#----
# Тестовые (ложные цели)
metadict_squads['Company-dummy (куклы)'] = {
# Просто чучела на кораблях.
'Dummy (AC 17)':100,
}
#----
# Тестовые (герои из Monsters_Manual)
metadict_squads['Single-hero (druid)'] = {
'Druid (CR 2)':1,
}
metadict_squads['Single-hero (mage)'] = {
'Mage (CR 6)':1,
}
metadict_squads['Single-hero (archmage)'] = {
'Archmage (CR 12)':1,
}
metadict_squads['Single-hero (dragon)'] = {
# Дракон
'Red Dragon, Young (CR 10)':1,
}
metadict_squads['Single-hero (storm giant)'] = {
'Storm Giant (CR 13)':1,
}
metadict_squads['Single-hero (empyrean)'] = {
'Empyrean (CR 23)':1,
}
# Обычные бойцы дл тестовых поединков
#----
metadict_squads['Single-regular (infantry-lieutenant)'] = {
#'Warrior 5 lvl (legionary infantry-lieutenant)':1,
#'Warrior 5 lvl (thracian infantry-lieutenant)':1,
'Warrior 5 lvl (achean hoplite-lieutenant)':1,
#'Warrior 5 lvl (grenadier line-infantry-lieutenant)':1,
}
#----
# Тестовые (отряды из Monsters_Manual)
metadict_squads['Squad-animal-herd (horseclaws)'] = {
'Horseclaw':dice_throw('2d6'),
}
metadict_squads['Squad-elite (veterans)'] = {
'Veteran (CR 3)':10,
}
metadict_squads['Squad-elite (wights)'] = {
'Wight (CR 3)':10,
}
metadict_squads['Squad-elite (hill giants)'] = {
# Холмовые великаны
'Hill Giant (CR 5)':6,
}
metadict_squads['Squad-elite (stone giants)'] = {
# Каменные великаны
'Stone Giant (CR 7)':6,
}
metadict_squads['Squad-elite (frost giants)'] = {
# Ледяные великаны
'Frost Giant (CR 8)':6,
}
metadict_squads['Squad-elite (fire giants)'] = {
# Огненные великаны
'Fire Giant (CR 9)':6,
}
metadict_squads['Squad-elite (storm giants)'] = {
# Штормовые великаны
'Storm Giant (CR 13)':6,
}
#----
# Тестовые (отряды из Monsters_Manual)
metadict_squads['Company-regular (sentinels)'] = {
# Для тестов сравнительной силы отрядов.
# Считаются бесстрашными, fearless_AI
'Sentinel (CR 1/8)':100,
}
metadict_squads['Company-regular (tribe warriors)'] = {
'Tribe Warrior (CR 1/8)':100,
}
metadict_squads['Company-militia (zombies)'] = {
'Zombie (CR 1/4)':100,
}
metadict_squads['Company-militia (bandits)'] = {
'Bandit (CR 1/8)':100,
}
metadict_squads['Company-veteran (thugs)'] = {
'Thug (CR 1/2)':100,
}
metadict_squads['Company-militia (goblins)'] = {
'Goblin (CR 1/4)':80 + dice_throw('3d12'),
'Goblin Boss (CR 1)':3,
}
metadict_squads['Company-veteran (hobgoblins)'] = {
'Hobgoblin (CR 1/2)':80 + dice_throw('3d12'),
'Hobgoblin-captain (CR 3)':1,
}
metadict_squads['Company-veteran (orks)'] = {
'Ork (CR 1/2)':80 + dice_throw('3d12'),
'Orog (CR 2)':3,
'Ork war chief (CR 4)':1,
}
metadict_squads['Company-elite (bugbears)'] = {
'Bugbear (CR 1)':100,
}
#----
# Тестовые (партии)
metadict_squads['Band-hero (party 1 lvl)'] = {
# TODO: они должны быть бесстрашными.
# Тестовая партия для оценки CR:
# Воин, клирик, вор и маг:
# Боец -- лучник с боевым стилем
# Клерик -- "Воодушевляющий лидер" и командир, колдует "Bless"
# Рога -- с ножом и арбалетом
# Волшебник -- с "Волшебной стрелой"
'Cleric 1 lvl (war cleric)':1,
'Rogue 1 lvl (city cat-nyamo)':1,
'Wizard 1 lvl (otherworld mage-disciple)':1,
'Barbarian 1 lvl (thracian slayer-dogface)':1,
#'Fighter 1 lvl (ArbitraryNickname) (снайпер)':1,
}
#----
# Тестовые (отряды)
metadict_squads['Company-test (standard) (shortbow)'] = {
'Warrior 1 lvl (standard) (Shortbow)':100,
}
metadict_squads['Company-test (standard) (shortbow) (archery)'] = {
'Warrior 1 lvl (standard) (Shortbow) (archery)':100,
}
metadict_squads['Company-test (standard) (greataxes)'] = {
# Тесты типовых отрядов для Vened'а.
'Warrior 1 lvl (standard) (Greataxe)':100,
}
metadict_squads['Company-test (standard) (disadvantage) (greataxes)'] = {
'Warrior 1 lvl (standard) (disadvantage) (Greataxe)':100,
}
metadict_squads['Company-test (standard) (bless + disadvantage) (greataxes)'] = {
'Warrior 1 lvl (standard) (bless + disadvantage) (Greataxe)':100,
}
metadict_squads['Company-test (standard) (battleaxes)'] = {
'Warrior 1 lvl (standard) (Battleaxe + Shield)':100,
}
metadict_squads['Company-test (standard) (Feat_Heavy_Armor_Master)'] = {
'Warrior 4 lvl (standard) (Feat_Heavy_Armor_Master)':100,
}
metadict_squads['Company-test (standard) (Feat_Polearm_Master)'] = {
'Warrior 4 lvl (standard) (Feat_Polearm_Master)':100,
}
metadict_squads['Company-test (standard) (Feat_Great_Weapon_Master)'] = {
'Warrior 4 lvl (standard) (Feat_Great_Weapon_Master)':100,
}
metadict_squads['Company-test (standard) (Feat_Sentinel)'] = {
'Warrior 4 lvl (standard) (Feat_Sentinel)':100,
}
metadict_squads['Company-test (standard) (Feat_Martial_Adept)'] = {
'Warrior 4 lvl (standard) (Feat_Martial_Adept)':100,
}
metadict_squads['Company-test (standard) (Feat_Magic_Initiate)'] = {
'Warrior 4 lvl (standard) (Feat_Magic_Initiate)':100,
}
#----
# Тестовые отряды (Чёрные флаги Ост-Индии)
metadict_squads['Company-veteran (pirate-infantry-grenadiers)'] = {
# Абордажная команда, палубная команда.
'Warrior 2 lvl (grenadier line-infantry-veteran) (assault)':65,
'Warrior 4 lvl (grenadier line-infantry-sergeant) (assault)':3,
'Warrior 5 lvl (grenadier line-infantry-lieutenant) (assault)':3,
'Commoner 1 lvl (recruit)':100,
}
metadict_squads['Company-veteran (pirate-infantry-fusiliers)'] = {
# Артиллерийская команда.
'Warrior 2 lvl (fusilier line-infantry-veteran)':65,
'Warrior 4 lvl (fusilier line-infantry-sergeant)':3,
'Warrior 5 lvl (fusilier line-infantry-lieutenant)':3,
'Commoner 1 lvl (recruit)':100,
}
#----
# Тестовые отряды (Чёрные флаги Ост-Индии)
metadict_squads['Band-regular (black-flags) (healers)'] = {
'Warrior 4 lvl (healer-sergeant)':3,
'Warrior 5 lvl (healer-lieutenant)':1,
'Commoner 1 lvl (recruit)':6,
}
metadict_squads['Band-regular (black-flags) (абордажники Эвери) (2)'] = {
'Warrior 4 lvl (абордажник Эвери)':2,
'Commoner 1 lvl (recruit)':6,
}
metadict_squads['Band-regular (black-flags) (абордажники Эвери) (4)'] = {
'Warrior 4 lvl (абордажник Эвери)':3,
'Warrior 5 lvl (лейтенант Эвери)':1,
'Commoner 1 lvl (recruit)':10,
}
metadict_squads['Band-regular (black-flags) (абордажники Эвери) (8)'] = {
'Warrior 4 lvl (абордажник Эвери)':6,
'Warrior 5 lvl (лейтенант Эвери)':2,
'Commoner 1 lvl (recruit)':20,
}
metadict_squads['Band-regular (black-flags) (assault, 8)'] = {
# Испытываем свиты героев.
'Warrior 3 lvl (grenadier line-infantry-corporal) (assault)':6,
'Warrior 4 lvl (grenadier line-infantry-sergeant) (assault)':1,
'Warrior 5 lvl (grenadier line-infantry-lieutenant) (assault)':1,
'Commoner 1 lvl (recruit)':8 * 3,
}
metadict_squads['Band-regular (black-flags) (assault, 10)'] = {
'Warrior 3 lvl (grenadier line-infantry-corporal) (assault)':9,
'Warrior 5 lvl (grenadier line-infantry-lieutenant) (assault)':1,
'Commoner 1 lvl (recruit)':10 * 3,
}
metadict_squads['Band-regular (black-flags) (snipers, 4)'] = {
'Warrior 4 lvl (fusilier line-infantry-sergeant) (sniper)':3,
'Warrior 5 lvl (fusilier line-infantry-lieutenant) (sniper)':1,
'Commoner 1 lvl (recruit)':4 | |
typing.Union[base.InputFile, base.String],
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove,
types.ForceReply, None] = None) -> types.Message:
"""
Use this method to send .webp stickers.
Source: https://core.telegram.org/bots/api#sendsticker
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param sticker: Sticker to send.
:type sticker: :obj:`typing.Union[base.InputFile, base.String]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals(), exclude=['sticker'])
result = await self.send_file('sticker', api.Methods.SEND_STICKER, sticker, payload)
return types.Message(**result)
async def get_sticker_set(self, name: base.String) -> types.StickerSet:
"""
Use this method to get a sticker set.
Source: https://core.telegram.org/bots/api#getstickerset
:param name: Name of the sticker set
:type name: :obj:`base.String`
:return: On success, a StickerSet object is returned.
:rtype: :obj:`types.StickerSet`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_STICKER_SET, payload)
return types.StickerSet(**result)
async def upload_sticker_file(self, user_id: base.Integer, png_sticker: base.InputFile) -> types.File:
"""
Use this method to upload a .png file with a sticker for later use in createNewStickerSet
and addStickerToSet methods (can be used multiple times).
Source: https://core.telegram.org/bots/api#uploadstickerfile
:param user_id: User identifier of sticker file owner
:type user_id: :obj:`base.Integer`
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size,
dimensions must not exceed 512px, and either width or height must be exactly 512px.
:type png_sticker: :obj:`base.InputFile`
:return: Returns the uploaded File on success.
:rtype: :obj:`types.File`
"""
payload = generate_payload(**locals(), exclude=['png_sticker'])
result = await self.send_file('png_sticker', api.Methods.UPLOAD_STICKER_FILE, png_sticker, payload)
return types.File(**result)
async def create_new_sticker_set(self, user_id: base.Integer, name: base.String, title: base.String,
png_sticker: typing.Union[base.InputFile, base.String], emojis: base.String,
contains_masks: typing.Union[base.Boolean, None] = None,
mask_position: typing.Union[types.MaskPosition, None] = None) -> base.Boolean:
"""
Use this method to create new sticker set owned by a user. The bot will be able to edit the created sticker set.
Source: https://core.telegram.org/bots/api#createnewstickerset
:param user_id: User identifier of created sticker set owner
:type user_id: :obj:`base.Integer`
:param name: Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals).
:type name: :obj:`base.String`
:param title: Sticker set title, 1-64 characters
:type title: :obj:`base.String`
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size,
dimensions must not exceed 512px, and either width or height must be exactly 512px.
:type png_sticker: :obj:`typing.Union[base.InputFile, base.String]`
:param emojis: One or more emoji corresponding to the sticker
:type emojis: :obj:`base.String`
:param contains_masks: Pass True, if a set of mask stickers should be created
:type contains_masks: :obj:`typing.Union[base.Boolean, None]`
:param mask_position: A JSON-serialized object for position where the mask should be placed on faces
:type mask_position: :obj:`typing.Union[types.MaskPosition, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
mask_position = prepare_arg(mask_position)
payload = generate_payload(**locals(), exclude=['png_sticker'])
result = await self.send_file('png_sticker', api.Methods.CREATE_NEW_STICKER_SET, png_sticker, payload)
return result
async def add_sticker_to_set(self, user_id: base.Integer, name: base.String,
png_sticker: typing.Union[base.InputFile, base.String], emojis: base.String,
mask_position: typing.Union[types.MaskPosition, None] = None) -> base.Boolean:
"""
Use this method to add a new sticker to a set created by the bot.
Source: https://core.telegram.org/bots/api#addstickertoset
:param user_id: User identifier of sticker set owner
:type user_id: :obj:`base.Integer`
:param name: Sticker set name
:type name: :obj:`base.String`
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size,
dimensions must not exceed 512px, and either width or height must be exactly 512px.
:type png_sticker: :obj:`typing.Union[base.InputFile, base.String]`
:param emojis: One or more emoji corresponding to the sticker
:type emojis: :obj:`base.String`
:param mask_position: A JSON-serialized object for position where the mask should be placed on faces
:type mask_position: :obj:`typing.Union[types.MaskPosition, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
mask_position = prepare_arg(mask_position)
payload = generate_payload(**locals(), exclude=['png_sticker'])
result = await self.send_file('png_sticker', api.Methods.ADD_STICKER_TO_SET, png_sticker, payload)
return result
async def set_sticker_position_in_set(self, sticker: base.String, position: base.Integer) -> base.Boolean:
"""
Use this method to move a sticker in a set created by the bot to a specific position.
Source: https://core.telegram.org/bots/api#setstickerpositioninset
:param sticker: File identifier of the sticker
:type sticker: :obj:`base.String`
:param position: New sticker position in the set, zero-based
:type position: :obj:`base.Integer`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_STICKER_POSITION_IN_SET, payload)
return result
async def delete_sticker_from_set(self, sticker: base.String) -> base.Boolean:
"""
Use this method to delete a sticker from a set created by the bot.
The following methods and objects allow your bot to work in inline mode.
Source: https://core.telegram.org/bots/api#deletestickerfromset
:param sticker: File identifier of the sticker
:type sticker: :obj:`base.String`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.DELETE_STICKER_FROM_SET, payload)
return result
async def answer_inline_query(self, inline_query_id: base.String,
results: typing.List[types.InlineQueryResult],
cache_time: typing.Union[base.Integer, None] = None,
is_personal: typing.Union[base.Boolean, None] = None,
next_offset: typing.Union[base.String, None] = None,
switch_pm_text: typing.Union[base.String, None] = None,
switch_pm_parameter: typing.Union[base.String, None] = None) -> base.Boolean:
"""
Use this method to send answers to an inline query.
No more than 50 results per query are allowed.
Source: https://core.telegram.org/bots/api#answerinlinequery
:param inline_query_id: Unique identifier for the answered query
:type inline_query_id: :obj:`base.String`
:param results: A JSON-serialized array of results for the inline query
:type results: :obj:`typing.List[types.InlineQueryResult]`
:param cache_time: The maximum amount of time in seconds that the result of the
inline query may be cached on the server. Defaults to 300.
:type cache_time: :obj:`typing.Union[base.Integer, None]`
:param is_personal: Pass True, if results may be cached on the server side only
for the user that sent the query. By default, results may be returned to any user who sends the same query
:type is_personal: :obj:`typing.Union[base.Boolean, None]`
:param next_offset: Pass the offset that a client should send in the
next query with the same text to receive more results.
Pass an empty string if there are no more results or if you don‘t support pagination.
Offset length can’t exceed 64 bytes.
:type next_offset: :obj:`typing.Union[base.String, None]`
:param switch_pm_text: If passed, clients will display a button with specified text that
switches the user to a private chat with the bot and sends the bot a start message
with the parameter switch_pm_parameter
:type switch_pm_text: :obj:`typing.Union[base.String, None]`
:param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when
user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed.
:type switch_pm_parameter: :obj:`typing.Union[base.String, None]`
:return: On success, True is returned.
:rtype: :obj:`base.Boolean`
"""
results = prepare_arg(results)
payload = generate_payload(**locals())
result = await self.request(api.Methods.ANSWER_INLINE_QUERY, payload)
return result
# === Payments ===
# https://core.telegram.org/bots/api#payments
async def send_invoice(self, chat_id: base.Integer, title: base.String,
description: base.String, payload: base.String,
provider_token: base.String, start_parameter: base.String,
currency: base.String, prices: typing.List[types.LabeledPrice],
provider_data: typing.Union[typing.Dict, None] = None,
photo_url: typing.Union[base.String, None] = None,
photo_size: typing.Union[base.Integer, None] = None,
photo_width: typing.Union[base.Integer, None] = None,
photo_height: typing.Union[base.Integer, None] = None,
need_name: typing.Union[base.Boolean, None] = None,
need_phone_number: typing.Union[base.Boolean, None] = None,
need_email: typing.Union[base.Boolean, None] = None,
need_shipping_address: typing.Union[base.Boolean, None] = None,
is_flexible: typing.Union[base.Boolean, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup, None] = None) -> types.Message:
"""
Use this method to send invoices.
Source: https://core.telegram.org/bots/api#sendinvoice
:param chat_id: Unique identifier for the target private chat
:type chat_id: :obj:`base.Integer`
:param title: Product name, 1-32 characters
:type title: :obj:`base.String`
:param description: Product description, 1-255 characters
:type description: :obj:`base.String`
:param payload: Bot-defined invoice payload, 1-128 bytes.
This will not be displayed to the user, use for your internal processes.
:type payload: :obj:`base.String`
:param provider_token: Payments provider token, obtained via Botfather
:type provider_token: :obj:`base.String`
:param start_parameter: Unique deep-linking parameter that can be used to generate this
invoice when used as a start parameter
:type start_parameter: :obj:`base.String`
:param currency: Three-letter ISO 4217 currency code, see more on currencies
:type currency: :obj:`base.String`
:param prices: Price breakdown, a list of components
(e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.)
:type prices: :obj:`typing.List[types.LabeledPrice]`
:param provider_data: JSON-encoded data about | |
Culture-Glendale Heights","Tricoci University of Beauty Culture-Glendale Heights"),
("Tricoci University of Beauty Culture-Highland","Tricoci University of Beauty Culture-Highland"),
("Tricoci University of Beauty Culture-Indianapolis","Tricoci University of Beauty Culture-Indianapolis"),
("Tricoci University of Beauty Culture-Libertyville","Tricoci University of Beauty Culture-Libertyville"),
("Tricoci University of Beauty Culture-Peoria","Tricoci University of Beauty Culture-Peoria"),
("Tricoci University of Beauty Culture-Rockford","Tricoci University of Beauty Culture-Rockford"),
("Trident Technical College","Trident Technical College"),
("Trident University International","Trident University International"),
("Trine University","Trine University"),
("Trine University-Regional/Non-Traditional Campuses","Trine University-Regional/Non-Traditional Campuses"),
("Trinidad State Junior College","Trinidad State Junior College"),
("Trinity Baptist College","Trinity Baptist College"),
("Trinity Bible College","Trinity Bible College"),
("Trinity Christian College","Trinity Christian College"),
("Trinity College of Florida","Trinity College of Florida"),
("Trinity College of Nursing & Health Sciences","Trinity College of Nursing & Health Sciences"),
("Trinity College of Puerto Rico","Trinity College of Puerto Rico"),
("Trinity College","Trinity College"),
("Trinity Episcopal School for Ministry","Trinity Episcopal School for Ministry"),
("Trinity Health System School of Nursing","Trinity Health System School of Nursing"),
("Trinity International University-Florida","Trinity International University-Florida"),
("Trinity International University-Illinois","Trinity International University-Illinois"),
("Trinity Law School","Trinity Law School"),
("Trinity Lutheran College","Trinity Lutheran College"),
("Trinity Lutheran Seminary","Trinity Lutheran Seminary"),
("Trinity University","Trinity University"),
("Trinity Valley Community College","Trinity Valley Community College"),
("Trinity Vocational Center","Trinity Vocational Center"),
("Trinity Washington University","Trinity Washington University"),
("Triton College","Triton College"),
("Trocaire College","Trocaire College"),
("Troy University","Troy University"),
("Truckee Meadows Community College","Truckee Meadows Community College"),
("Truett-McConnell College","Truett-McConnell College"),
("Truman Medical Center School of Nurse Anesthesia","Truman Medical Center School of Nurse Anesthesia"),
("Truman State University","Truman State University"),
("Trumbull Business College","Trumbull Business College"),
("Trumbull Career & Technical Center","Trumbull Career & Technical Center"),
("Tucson College of Beauty","Tucson College of Beauty"),
("Tucson College","Tucson College"),
("Tufts University","Tufts University"),
("Tulane University of Louisiana","Tulane University of Louisiana"),
("Tulare Beauty College","Tulare Beauty College"),
("Tulsa Community College","Tulsa Community College"),
("Tulsa Technology Center-Broken Arrow Campus","Tulsa Technology Center-Broken Arrow Campus"),
("Tulsa Technology Center-Lemley Campus","Tulsa Technology Center-Lemley Campus"),
("Tulsa Technology Center-Owasso Campus","Tulsa Technology Center-Owasso Campus"),
("Tulsa Technology Center-Peoria Campus","Tulsa Technology Center-Peoria Campus"),
("Tulsa Technology Center-Riverside Campus","Tulsa Technology Center-Riverside Campus"),
("Tulsa Technology Center-Sand Springs Campus","Tulsa Technology Center-Sand Springs Campus"),
("Tulsa Welding School-Jacksonville","Tulsa Welding School-Jacksonville"),
("Tulsa Welding School-Jacksonville","Tulsa Welding School-Jacksonville"),
("Tulsa Welding School-Tulsa","Tulsa Welding School-Tulsa"),
("Tulsa Welding School-Tulsa","Tulsa Welding School-Tulsa"),
("Tunxis Community College","Tunxis Community College"),
("Turning Point Beauty College","Turning Point Beauty College"),
("Turtle Mountain Community College","Turtle Mountain Community College"),
("Tusculum College","Tusculum College"),
("Tuskegee University","Tuskegee University"),
("Twin City Beauty College","Twin City Beauty College"),
("Twin Rivers Adult School","Twin Rivers Adult School"),
("Tyler Junior College","Tyler Junior College"),
("U S Grant Joint Vocational School","U S Grant Joint Vocational School"),
("UEI College-Fresno","UEI College-Fresno"),
("UEI College-Santa Cruz","UEI College-Santa Cruz"),
("Uintah Basin Applied Technology College","Uintah Basin Applied Technology College"),
("Ukiah Adult School","Ukiah Adult School"),
("Ulster County BOCES-Practical Nursing Program","Ulster County BOCES-Practical Nursing Program"),
("Ulster County Community College","Ulster County Community College"),
("Ultimate Medical Academy-Clearwater","Ultimate Medical Academy-Clearwater"),
("Ultimate Medical Academy-Tampa","Ultimate Medical Academy-Tampa"),
("Ultrasound Medical Institute","Ultrasound Medical Institute"),
("Umpqua Community College","Umpqua Community College"),
("Unification Theological Seminary","Unification Theological Seminary"),
("Union College","Union College"),
("Union College","Union College"),
("Union College","Union College"),
("Union County College","Union County College"),
("Union County Vocational Technical School","Union County Vocational Technical School"),
("Union Graduate College","Union Graduate College"),
("Union Institute & University","Union Institute & University"),
("Union Presbyterian Seminary","Union Presbyterian Seminary"),
("Union Theological Seminary in the City of New York","Union Theological Seminary in the City of New York"),
("Union University","Union University"),
("Unitech Training Academy-Alexandria","Unitech Training Academy-Alexandria"),
("Unitech Training Academy-Houma","Unitech Training Academy-Houma"),
("Unitech Training Academy-Lafayette","Unitech Training Academy-Lafayette"),
("Unitech Training Academy-Lake Charles","Unitech Training Academy-Lake Charles"),
("Unitech Training Academy-West Monroe","Unitech Training Academy-West Monroe"),
("United Beauty College","United Beauty College"),
("United Education Institute-Huntington Park Campus","United Education Institute-Huntington Park Campus"),
("United Medical and Business Institute","United Medical and Business Institute"),
("United States Air Force Academy","United States Air Force Academy"),
("United States Coast Guard Academy","United States Coast Guard Academy"),
("United States Merchant Marine Academy","United States Merchant Marine Academy"),
("United States Military Academy","United States Military Academy"),
("United States Naval Academy","United States Naval Academy"),
("United States Sports Academy","United States Sports Academy"),
("United States University","United States University"),
("United Talmudical Seminary","United Talmudical Seminary"),
("United Technical Center","United Technical Center"),
("United Theological Seminary of the Twin Cities","United Theological Seminary of the Twin Cities"),
("United Theological Seminary","United Theological Seminary"),
("United Tribes Technical College","United Tribes Technical College"),
("Unitek College","Unitek College"),
("Unity College","Unity College"),
("Unity Cosmetology College","Unity Cosmetology College"),
("UnityPoint Health-Des Moines School of Radiologic Technology","UnityPoint Health-Des Moines School of Radiologic Technology"),
("Universal Barber College","Universal Barber College"),
("Universal Career School","Universal Career School"),
("Universal College of Beauty Inc-Los Angeles 1","Universal College of Beauty Inc-Los Angeles 1"),
("Universal College of Beauty Inc-Los Angeles 2","Universal College of Beauty Inc-Los Angeles 2"),
("Universal College of Healing Arts","Universal College of Healing Arts"),
("Universal Spa Training Academy","Universal Spa Training Academy"),
("Universal Technical Institute - Dall<NAME>","Universal Technical Institute - <NAME>"),
("Universal Technical Institute of Arizona Inc","Universal Technical Institute of Arizona Inc"),
("Universal Technical Institute of Arizona Inc-Motorcycle Mechanics Institute Division","Universal Technical Institute of Arizona Inc-Motorcycle Mechanics Institute Division"),
("Universal Technical Institute of California Inc","Universal Technical Institute of California Inc"),
("Universal Technical Institute of Illinois Inc","Universal Technical Institute of Illinois Inc"),
("Universal Technical Institute of Massachusetts Inc","Universal Technical Institute of Massachusetts Inc"),
("Universal Technical Institute of Northern California Inc","Universal Technical Institute of Northern California Inc"),
("Universal Technical Institute of Pennsylvania Inc","Universal Technical Institute of Pennsylvania Inc"),
("Universal Technical Institute of Texas Inc.","Universal Technical Institute of Texas Inc."),
("Universal Technical Institute-Auto Motorcycle & Marine Mechanics Institute Division-Orlando","Universal Technical Institute-Auto Motorcycle & Marine Mechanics Institute Division-Orlando"),
("Universal Technology College of Puerto Rico","Universal Technology College of Puerto Rico"),
("Universal Therapeutic Massage Institute","Universal Therapeutic Massage Institute"),
("Universal Training Institute","Universal Training Institute"),
("Universidad Adventista de las Antillas","Universidad Adventista de las Antillas"),
("Universidad Central Del Caribe","Universidad Central Del Caribe"),
("Universidad Del Este","Universidad Del Este"),
("Universidad Del Turabo","Universidad Del Turabo"),
("Universidad Internacional Iberoamericana","Universidad Internacional Iberoamericana"),
("Universidad Metropolitana","Universidad Metropolitana"),
("Universidad Pentecostal Mizpa","Universidad Pentecostal Mizpa"),
("Universidad Politecnica de Puerto Rico","Universidad Politecnica de Puerto Rico"),
("Universidad Teologica del Caribe","Universidad Teologica del Caribe"),
("Universidad del Sagrado Corazon","Universidad del Sagrado Corazon"),
("University Academy of Hair Design","University Academy of Hair Design"),
("University System of Maryland","University System of Maryland"),
("University System of Maryland-Research Centers","University System of Maryland-Research Centers"),
("University System of New Hampshire System Office","University System of New Hampshire System Office"),
("University at Buffalo","University at Buffalo"),
("University of Advancing Technology","University of Advancing Technology"),
("University of Aesthetics-Chicago","University of Aesthetics-Chicago"),
("University of Aesthetics-Downers Grove","University of Aesthetics-Downers Grove"),
("University of Akron Main Campus","University of Akron Main Campus"),
("University of Akron Wayne College","University of Akron Wayne College"),
("University of Alabama System Office","University of Alabama System Office"),
("University of Alabama at Birmingham","University of Alabama at Birmingham"),
("University of Alabama in Huntsville","University of Alabama in Huntsville"),
("University of Alaska Anchorage","University of Alaska Anchorage"),
("University of Alaska Fairbanks","University of Alaska Fairbanks"),
("University of Alaska Southeast","University of Alaska Southeast"),
("University of Alaska System of Higher Education","University of Alaska System of Higher Education"),
("University of Antelope Valley","University of Antelope Valley"),
("University of Arizona","University of Arizona"),
("University of Arkansas Community College-Batesville","University of Arkansas Community College-Batesville"),
("University of Arkansas Community College-Hope","University of Arkansas Community College-Hope"),
("University of Arkansas Community College-Morrilton","University of Arkansas Community College-Morrilton"),
("University of Arkansas System Office","University of Arkansas System Office"),
("University of Arkansas at Little Rock","University of Arkansas at Little Rock"),
("University of Arkansas at Monticello","University of Arkansas at Monticello"),
("University of Arkansas at Pine Bluff","University of Arkansas at Pine Bluff"),
("University of Arkansas for Medical Sciences","University of Arkansas for Medical Sciences"),
("University of Arkansas","University of Arkansas"),
("University of Arkansas-Fort Smith","University of Arkansas-Fort Smith"),
("University of Baltimore","University of Baltimore"),
("University of Bridgeport","University of Bridgeport"),
("University of California-Berkeley","University of California-Berkeley"),
("University of California-Davis","University of California-Davis"),
("University of California-Hastings College of Law","University of California-Hastings College of Law"),
("University of California-Irvine","University of California-Irvine"),
("University of California-Los Angeles","University of California-Los Angeles"),
("University of California-Merced","University of California-Merced"),
("University of California-Riverside","University of California-Riverside"),
("University of California-San Diego","University of California-San Diego"),
("University of California-San Francisco","University of California-San Francisco"),
("University of California-Santa Barbara","University of California-Santa Barbara"),
("University of California-Santa Cruz","University of California-Santa Cruz"),
("University of California-System Administration Central Office","University of California-System Administration Central Office"),
("University of Central Arkansas","University of Central Arkansas"),
("University of Central Florida","University of Central Florida"),
("University of Central Missouri","University of Central Missouri"),
("University of Central Oklahoma","University of Central Oklahoma"),
("University of Charleston","University of Charleston"),
("University of Chicago","University of Chicago"),
("University of Cincinnati-Blue Ash College","University of Cincinnati-Blue Ash College"),
("University of Cincinnati-Clermont College","University of Cincinnati-Clermont College"),
("University of Cincinnati-Main Campus","University of Cincinnati-Main Campus"),
("University of Colorado Boulder","University of Colorado Boulder"),
("University of Colorado Colorado Springs","University of Colorado Colorado Springs"),
("University of Colorado Denver","University of Colorado Denver"),
("University of Colorado System Office","University of Colorado System Office"),
("University of Connecticut","University of Connecticut"),
("University of Connecticut-Avery Point","University of Connecticut-Avery Point"),
("University of Connecticut-Stamford","University of Connecticut-Stamford"),
("University of Connecticut-Tri-Campus","University of Connecticut-Tri-Campus"),
("University of Cosmetology Arts & Sciences-Harlingen","University of Cosmetology Arts & Sciences-Harlingen"),
("University of Cosmetology Arts & Sciences-La Joya","University of Cosmetology Arts & Sciences-La Joya"),
("University of Cosmetology Arts & Sciences-McAllen","University of Cosmetology Arts & Sciences-McAllen"),
("University of Cosmetology Arts & Sciences-San Antonio Jamar","University of Cosmetology Arts & Sciences-San Antonio Jamar"),
("University of Cosmetology Arts & Sciences-San Antonio Perrin","University of Cosmetology Arts & Sciences-San Antonio Perrin"),
("University of Dallas","University of Dallas"),
("University of Dayton","University of Dayton"),
("University of Delaware","University of Delaware"),
("University of Denver","University of Denver"),
("University of Detroit Mercy","University of Detroit Mercy"),
("University of Dubuque","University of Dubuque"),
("University of East-West Medicine","University | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/11_dcmm.ipynb (unless otherwise specified).
__all__ = ['dcmm']
# Internal Cell
#exporti
import numpy as np
from .latent_factor_fxns import forecast_marginal_lf_dcmm, forecast_path_lf_dcmm
from .dglm import bern_dglm, pois_dglm
from .update import update_F
from scipy.special import expit
# Cell
class dcmm:
def __init__(self,
a0_bern = None,
R0_bern = None,
nregn_bern = 0,
ntrend_bern = 0,
nlf_bern = 0,
nhol_bern = 0,
seasPeriods_bern = [],
seasHarmComponents_bern = [],
deltrend_bern = 1, delregn_bern = 1,
delhol_bern = 1,
delseas_bern = 1, dellf_bern = 1,
a0_pois = None,
R0_pois = None,
nregn_pois = 0,
ntrend_pois = 0,
nlf_pois = 0,
nhol_pois = 0,
seasPeriods_pois = [],
seasHarmComponents_pois = [],
deltrend_pois = 1, delregn_pois = 1,
delhol_pois = 1,
delseas_pois = 1, dellf_pois = 1,
rho = 1,
interpolate=True,
adapt_discount=False):
"""
:param a0_bern: Prior mean vector for bernoulli DGLM
:param R0_bern: Prior covariance matrix for bernoulli DGLM
:param nregn_bern: Number of regression components in bernoulli DGLM
:param ntrend_bern: Number of trend components in bernoulli DGLM
:param nlf_bern: Number of latent factor components in bernoulli DGLM
:param seasPeriods_bern: List of periods of seasonal components in bernoulli DGLM
:param seasHarmComponents_bern: List of harmonic components included for each period in bernoulli DGLM
:param deltrend_bern: Discount factor on trend components in bernoulli DGLM
:param delregn_bern: Discount factor on regression components in bernoulli DGLM
:param delhol_bern: Discount factor on holiday component in bernoulli DGLM (currently deprecated)
:param delseas_bern: Discount factor on seasonal components in bernoulli DGLM
:param dellf_bern: Discount factor on latent factor components in bernoulli DGLM
:param a0_pois: Prior mean vector for poisson DGLM
:param R0_pois: Prior covariance matrix for poisson DGLM
:param nregn_pois: Number of regression components in poisson DGLM
:param ntrend_pois: Number of trend components in poisson DGLM
:param nlf_pois: Number of latent factor components in poisson DGLM
:param seasPeriods_pois: List of periods of seasonal components in poisson DGLM
:param seasHarmComponents_pois: List of harmonic components included for each period in poisson DGLM
:param deltrend_pois: Discount factor on trend components in poisson DGLM
:param delregn_pois: Discount factor on regression components in poisson DGLM
:param delhol_pois: Discount factor on holiday component in poisson DGLM (currently deprecated)
:param delseas_pois: Discount factor on seasonal components in poisson DGLM
:param dellf_pois: Discount factor on latent factor components in poisson DGLM
:param rho: Discount factor for random effects extension in poisson DGLM (smaller rho increases variance)
"""
self.bern_mod = bern_dglm(a0=a0_bern,
R0=R0_bern,
nregn=nregn_bern,
ntrend=ntrend_bern,
nlf=nlf_bern,
nhol=nhol_bern,
seasPeriods=seasPeriods_bern,
seasHarmComponents=seasHarmComponents_bern,
deltrend=deltrend_bern, delregn=delregn_bern,
delhol=delhol_bern, delseas=delseas_bern,
dellf=dellf_bern,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.pois_mod = pois_dglm(a0=a0_pois,
R0=R0_pois,
nregn=nregn_pois,
ntrend=ntrend_pois,
nlf=nlf_pois,
nhol=nhol_pois,
seasPeriods=seasPeriods_pois,
seasHarmComponents=seasHarmComponents_pois,
deltrend=deltrend_pois, delregn=delregn_pois,
delhol=delhol_pois, delseas=delseas_pois,
dellf=dellf_pois,
rho=rho,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.t = 0
# X is a list or tuple of length 2. The first component is data for the bernoulli DGLM, the next is for the Poisson DGLM.
def update(self, y = None, X = None):
X = self.make_pair(X)
if y is None:
self.bern_mod.update(y=y)
self.pois_mod.update(y=y)
elif y == 0:
self.bern_mod.update(y = 0, X = X[0])
self.pois_mod.update(y = np.nan, X = X[1])
else: # only update beta model if we have significant uncertainty in the forecast
# get the lower end forecast on the logit scale
F = update_F(self.bern_mod, X[0], F=self.bern_mod.F.copy())
ft, qt = self.bern_mod.get_mean_and_var(F, self.bern_mod.a, self.bern_mod.R)
fcast_logit_lb = ft - np.sqrt(qt)
# translate to a prod for a rough idea of whether we're already pretty confident for this forecast
if expit(fcast_logit_lb) < 0.975:
self.bern_mod.update(y=1, X = X[0])
else:
self.bern_mod.update(y=np.nan, X=X[0])
self.pois_mod.update(y = y - 1, X = X[1]) # Shifted Y values in the Poisson DGLM
self.t += 1
def update_lf_sample(self, y = None, X = None, phi_samps = None, parallel=False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
if y is None:
self.bern_mod.update_lf_sample(y=y)
self.pois_mod.update_lf_sample(y=y)
elif y == 0:
self.bern_mod.update_lf_sample(y = 0, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
self.pois_mod.update_lf_sample(y = np.nan, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
else:
self.bern_mod.update_lf_sample(y = 1, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_sample(y =y - 1, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
self.t += 1
def update_lf_analytic(self, y = None, X = None, phi_mu = None, phi_sigma = None):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if y is None:
self.bern_mod.update_lf_analytic(y=y)
self.pois_mod.update_lf_analytic(y=y)
elif y == 0:
self.bern_mod.update_lf_analytic(y = 0, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
self.pois_mod.update_lf_analytic(y = np.nan, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
else:
self.bern_mod.update_lf_analytic(y = 1, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_analytic(y =y - 1, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
self.t += 1
def forecast_marginal(self, k, X = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps, mean_only)
return mean_bern * (mean_pois + 1)
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal(k, X[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal(k, X[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], nsamps = nsamps)
samps_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], nsamps = nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic_new(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
return forecast_marginal_lf_dcmm(self, k, X[0], phi_mu[0], phi_sigma[0], nsamps=nsamps)
def forecast_marginal_lf_sample(self, k, X = None, phi_samps = None, nsamps = 1, mean_only = False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_marginal_lf_sample(k, X[0], phi_samps[0], mean_only)
samps_pois = self.pois_mod.forecast_marginal_lf_sample(k, X[1], phi_samps[1], mean_only) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_sample(self, k, X = None, phi_samps=None, nsamps = 1):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_path_lf_sample(k, X[0], phi_samps[0], nsamps)
samps_pois = self.pois_mod.forecast_path_lf_sample(k, X[1], phi_samps[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path(self, k, X = None, nsamps = 1):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_path(k, X[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_copula(self, k, X = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path_copula(k, X[0], nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_copula(k, X[1], nsamps, **kwargs) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_copula(self, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
if k == 2 and isinstance(phi_mu, (list, tuple)):
if not isinstance(phi_mu[0], (list, tuple)):
phi_mu = (phi_mu, phi_mu)
phi_sigma = (phi_sigma, phi_sigma)
phi_psi = (phi_psi, phi_psi)
else:
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
samps_bern = self.bern_mod.forecast_path_lf_copula(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], phi_psi = phi_psi[0], nsamps = nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_lf_copula(k, X[1], phi_mu = phi_mu[1], | |
#!/usr/bin/python3 -u
import logging
import os
import sys
import time
import struct
from typing import Tuple, Any
from configparser import ConfigParser
from argparse import ArgumentParser, Namespace
from collections.abc import Mapping
from pathlib import Path
from attiny_i2c import ATTiny
#from attiny_i2c_new import ATTiny
### Global configuration of the daemon. You should know what you do if you change
### these values.
# Version information
major = 2
minor = 13
patch = 1
# config file is in the same directory as the script:
_configfile_default = str(Path(__file__).parent.absolute()) + "/attiny_daemon.cfg"
_shutdown_cmd = "sudo systemctl poweroff" # sudo allows us to start as user 'pi'
_reboot_cmd = "sudo systemctl reboot" # sudo allows us to start as user 'pi'
_time_const = 1.0 # used as a pause between i2c communications, the ATTiny is slow
_num_retries = 10 # the number of retries when reading from or writing to the ATTiny
# These are the different values reported back by the ATTiny depending on its config
button_level = 2**3
SL_INITIATED = 2 # the value we use to signal that we are shutting down
shutdown_levels = {
# 0: Normal mode
0: "Everything is normal.",
# 2 is reserved for us signalling the ATTiny that we are shutting down
# 4-15: Maybe shutdown or restart, depending on configuration
2**2: "No external voltage detected. We are on battery power.",
button_level: "Button has been pressed. Reacting according to configuration.",
# >16: Definitely shut down
2**7: "Battery is at warn level. Shutting down.",
}
# Here we store the button functions that are called depending on the configuration
button_functions = {
"nothing": lambda: logging.info("Button pressed. Configured to do nothing."),
"shutdown": lambda: os.system(_shutdown_cmd),
"reboot": lambda: os.system(_reboot_cmd)
}
# this is the minimum reboot time we assume the RPi needs, used for a warning message
minimum_boot_time = 30
### Code starts here.
### Here be dragons...
def main(*args):
# Startup of the daemon
args = parse_cmdline(args)
setup_logger(args.nodaemon)
config = Config(args.cfgfile)
config.read_config()
logging.info("ATTiny Daemon version " + str(major) + "." + str(minor) + "." + str(patch))
attiny = ATTiny(config[Config.I2C_BUS], config[Config.I2C_ADDRESS], _time_const, _num_retries)
if attiny.get_last_access() < 0:
logging.error("Cannot access ATTiny")
exit(1)
(a_major, a_minor, a_patch) = attiny.get_version()
logging.info("ATTiny firmware version " + str(a_major) + "." + str(a_minor) + "." + str(a_patch))
if major != a_major:
logging.error("Daemon and Firmware major version mismatch. This might lead to serious problems. Check both versions.")
config.merge_and_sync_values(attiny)
logging.info("Merging completed")
# loop until stopped or error
fast_exit = False
set_unprimed = False
try:
while True:
should_shutdown = attiny.should_shutdown()
if should_shutdown == 0xFFFF:
# We have a big problem
logging.error("Lost connection to ATTiny.")
# disable to fasten restart
# set_unprimed = True # we still try to reset primed
fast_exit = True
exit(1) # executes finally clause and lets the system restart the daemon
if should_shutdown > SL_INITIATED:
# we will not exit the process but wait for the systemd to shut us down
# using SIGTERM. This does not execute the finally clause and leaves
# everything as it is currently configured
global shutdown_levels
fallback = "Unknown shutdown_level " + str(should_shutdown) + ". Shutting down."
logging.warning(shutdown_levels.get(should_shutdown, fallback))
if should_shutdown > 16:
attiny.set_should_shutdown(SL_INITIATED) # we are shutting down
logging.info("shutting down now...")
os.system(_shutdown_cmd)
elif (should_shutdown | button_level) != 0:
# we are executing the button command and setting the level to normal
attiny.set_should_shutdown(0)
button_functions[config[Config.BUTTON_FUNCTION]]()
logging.debug("Sleeping for " + str(config[Config.SLEEPTIME]) + " seconds.")
time.sleep(config[Config.SLEEPTIME])
except KeyboardInterrupt:
logging.info("Terminating daemon: cleaning up and exiting")
# Ctrl-C means we do not run as daemon
set_unprimed = True
except Exception as e:
logging.error("An exception occurred: '" + str(e) + "' Exiting...")
finally:
if fast_exit == False:
# will not be executed on SIGTERM, leaving primed set to the config value
primed = config[Config.PRIMED]
if args.nodaemon or set_unprimed:
primed = False
if primed == False:
logging.info("Trying to reset primed flag")
attiny.set_primed(primed)
del attiny
def parse_cmdline(args: Tuple[Any]) -> Namespace:
arg_parser = ArgumentParser(description='ATTiny Daemon')
arg_parser.add_argument('--cfgfile', metavar='file', required=False,
help='full path and name of the configfile')
arg_parser.add_argument('--nodaemon', required=False, action='store_true',
help='use normal output formatting')
return arg_parser.parse_args(args)
def setup_logger(nodaemon: bool) -> None:
root_log = logging.getLogger()
root_log.setLevel("INFO")
if not nodaemon:
root_log.addHandler(SystemdHandler())
class SystemdHandler(logging.Handler):
# http://0pointer.de/public/systemd-man/sd-daemon.html
PREFIX = {
# EMERG <0>
# ALERT <1>
logging.CRITICAL: "<2>",
logging.ERROR: "<3>",
logging.WARNING: "<4>",
# NOTICE <5>
logging.INFO: "<6>",
logging.DEBUG: "<7>",
logging.NOTSET: "<7>"
}
def __init__(self, stream=sys.stdout):
self.stream = stream
logging.Handler.__init__(self)
def emit(self, record):
try:
msg = self.PREFIX[record.levelno] + self.format(record)
msg = msg.replace("\n", "\\n")
self.stream.write(msg + "\n")
self.stream.flush()
except Exception:
self.handleError(record)
class Config(Mapping):
DAEMON_SECTION = "attinydaemon"
I2C_BUS = 'i2c bus'
I2C_ADDRESS = 'i2c address'
TIMEOUT = 'timeout'
SLEEPTIME = 'sleeptime'
PRIMED = 'primed'
BAT_V_COEFFICIENT = 'battery voltage coefficient'
BAT_V_CONSTANT = 'battery voltage constant'
EXT_V_COEFFICIENT = 'external voltage coefficient'
EXT_V_CONSTANT = 'external voltage constant'
T_COEFFICIENT = 'temperature coefficient'
T_CONSTANT = 'temperature constant'
FORCE_SHUTDOWN = 'force shutdown'
LED_OFF_MODE = 'led off mode'
WARN_VOLTAGE = 'warn voltage'
UPS_SHUTDOWN_VOLTAGE = 'ups shutdown voltage'
RESTART_VOLTAGE = 'restart voltage'
LOG_LEVEL = 'loglevel'
BUTTON_FUNCTION = 'button function'
UPS_CONFIG = 'ups configuration'
VEXT_SHUTDOWN = 'vext off is shutdown'
PULSE_LENGTH = 'pulse length'
PULSE_LENGTH_ON = 'pulse length on'
PULSE_LENGTH_OFF = 'pulse length off'
SW_RECOVERY_DELAY = 'switch recovery delay'
MAX_INT = sys.maxsize
DEFAULT_CONFIG = {
DAEMON_SECTION: {
I2C_ADDRESS: '0x37',
I2C_BUS: '1',
TIMEOUT: str(MAX_INT),
SLEEPTIME: str(MAX_INT),
PRIMED: 'False',
BAT_V_COEFFICIENT: str(MAX_INT),
BAT_V_CONSTANT: str(MAX_INT),
EXT_V_COEFFICIENT: str(MAX_INT),
EXT_V_CONSTANT: str(MAX_INT),
T_COEFFICIENT: str(MAX_INT),
T_CONSTANT: str(MAX_INT),
FORCE_SHUTDOWN: 'True',
LED_OFF_MODE: '0',
WARN_VOLTAGE: str(MAX_INT),
UPS_SHUTDOWN_VOLTAGE: str(MAX_INT),
RESTART_VOLTAGE: str(MAX_INT),
BUTTON_FUNCTION: "nothing",
UPS_CONFIG: "0",
VEXT_SHUTDOWN: 'False',
PULSE_LENGTH: "200",
PULSE_LENGTH_ON: "0",
PULSE_LENGTH_OFF: "0",
SW_RECOVERY_DELAY: "1000",
LOG_LEVEL: 'DEBUG'
}
}
def __init__(self, cfgfile):
global _configfile_default # simpler to change than a class variable
if cfgfile:
self.configfile_name = cfgfile
else:
self.configfile_name = _configfile_default
self.config = {}
self.parser = ConfigParser(allow_no_value=True)
self._storage = dict()
def __getitem__(self, key):
return self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def read_config(self):
self.parser.read_dict(self.DEFAULT_CONFIG)
if not os.path.isfile(self.configfile_name):
logging.info("No Config File. Trying to create one.")
# self.write_config()
else:
try:
self.parser.read(self.configfile_name)
except Exception:
logging.warning("cannot read config file. Using default values")
try:
self._storage[self.I2C_ADDRESS] = int(self.parser.get(self.DAEMON_SECTION, self.I2C_ADDRESS), 0)
self._storage[self.I2C_BUS] = self.parser.getint(self.DAEMON_SECTION, self.I2C_BUS)
self._storage[self.TIMEOUT] = self.parser.getint(self.DAEMON_SECTION, self.TIMEOUT)
self._storage[self.SLEEPTIME] = self.parser.getint(self.DAEMON_SECTION, self.SLEEPTIME)
self._storage[self.PRIMED] = self.parser.getboolean(self.DAEMON_SECTION, self.PRIMED)
self._storage[self.BAT_V_COEFFICIENT] = self.parser.getint(self.DAEMON_SECTION, self.BAT_V_COEFFICIENT)
self._storage[self.BAT_V_CONSTANT] = self.parser.getint(self.DAEMON_SECTION, self.BAT_V_CONSTANT)
self._storage[self.EXT_V_COEFFICIENT] = self.parser.getint(self.DAEMON_SECTION, self.EXT_V_COEFFICIENT)
self._storage[self.EXT_V_CONSTANT] = self.parser.getint(self.DAEMON_SECTION, self.EXT_V_CONSTANT)
self._storage[self.T_COEFFICIENT] = self.parser.getint(self.DAEMON_SECTION, self.T_COEFFICIENT)
self._storage[self.T_CONSTANT] = self.parser.getint(self.DAEMON_SECTION, self.T_CONSTANT)
self._storage[self.FORCE_SHUTDOWN] = self.parser.getboolean(self.DAEMON_SECTION, self.FORCE_SHUTDOWN)
self._storage[self.LED_OFF_MODE] = self.parser.getint(self.DAEMON_SECTION, self.LED_OFF_MODE)
self._storage[self.WARN_VOLTAGE] = self.parser.getint(self.DAEMON_SECTION, self.WARN_VOLTAGE)
self._storage[self.UPS_SHUTDOWN_VOLTAGE] = self.parser.getint(self.DAEMON_SECTION, self.UPS_SHUTDOWN_VOLTAGE)
self._storage[self.RESTART_VOLTAGE] = self.parser.getint(self.DAEMON_SECTION, self.RESTART_VOLTAGE)
self._storage[self.BUTTON_FUNCTION] = self.parser.get(self.DAEMON_SECTION, self.BUTTON_FUNCTION)
self._storage[self.UPS_CONFIG] = int(self.parser.get(self.DAEMON_SECTION, self.UPS_CONFIG), 0)
self._storage[self.VEXT_SHUTDOWN] = self.parser.getboolean(self.DAEMON_SECTION, self.VEXT_SHUTDOWN)
self._storage[self.PULSE_LENGTH] = self.parser.getint(self.DAEMON_SECTION, self.PULSE_LENGTH)
self._storage[self.PULSE_LENGTH_ON] = self.parser.getint(self.DAEMON_SECTION, self.PULSE_LENGTH_ON)
self._storage[self.PULSE_LENGTH_OFF] = self.parser.getint(self.DAEMON_SECTION, self.PULSE_LENGTH_OFF)
self._storage[self.SW_RECOVERY_DELAY] = self.parser.getint(self.DAEMON_SECTION, self.SW_RECOVERY_DELAY)
logging.getLogger().setLevel(self.parser.get(self.DAEMON_SECTION, self.LOG_LEVEL))
logging.debug("config variables are set")
except Exception as e:
logging.error("Cannot convert option: " + str(e))
exit(1)
def write_config(self):
try:
cfgfile = open(self.configfile_name, 'w')
self.parser.write(cfgfile)
cfgfile.close()
except Exception:
logging.warning("cannot write config file.")
@staticmethod
def calc_sleeptime(val):
global minimum_boot_time
# we should have at least 30 seconds to boot
# before the timeout occurs
sleeptime = val - minimum_boot_time
if sleeptime < 10:
sleeptime = int(val / 2)
if sleeptime < minimum_boot_time:
logging.warning("Sleeptime is low. Ensure that the Raspberry can boot in " + str(sleeptime) + " seconds or change the config file.")
return sleeptime
# not the perfect place for the method, but good enough
def merge_and_sync_values(self, attiny):
logging.debug("Merge Values and save if necessary")
changed_config = False
attiny_primed = attiny.get_primed()
attiny_timeout = attiny.get_timeout()
attiny_force_shutdown = attiny.get_force_shutdown()
attiny_led_off_mode = attiny.get_led_off_mode()
attiny_ups_configuration = attiny.get_ups_configuration()
attiny_pulse_length = attiny.get_pulse_length()
attiny_pulse_length_on = attiny.get_pulse_length_on()
attiny_pulse_length_off = attiny.get_pulse_length_off()
attiny_switch_recovery_delay = attiny.get_switch_recovery_delay()
attiny_vext_off_is_shutdown = attiny.get_vext_off_is_shutdown()
if self._storage[self.TIMEOUT] == self.MAX_INT:
# timeout was not set in the config file
# we will get timeout, primed, ups configuration,
# pulse length, pulse length on, pulse length off,
# switch recovery delay, vext_is_shutdown and
# force_shutdown from the ATTiny
logging.debug("Getting Timeout from ATTiny")
self._storage[self.PRIMED] = attiny_primed
self._storage[self.TIMEOUT] = attiny_timeout
self._storage[self.FORCE_SHUTDOWN] = attiny_force_shutdown
self._storage[self.LED_OFF_MODE] = attiny_led_off_mode
self._storage[self.UPS_CONFIG] = attiny_ups_configuration
self._storage[self.PULSE_LENGTH] = attiny_pulse_length
self._storage[self.PULSE_LENGTH_ON] = attiny_pulse_length_on
self._storage[self.PULSE_LENGTH_OFF] = attiny_pulse_length_off
self._storage[self.SW_RECOVERY_DELAY] = attiny_switch_recovery_delay
self._storage[self.VEXT_SHUTDOWN] = attiny_vext_off_is_shutdown
self.parser.set(self.DAEMON_SECTION, self.TIMEOUT,
str(self._storage[self.TIMEOUT]))
self.parser.set(self.DAEMON_SECTION, self.PRIMED,
str(self._storage[self.PRIMED]))
self.parser.set(self.DAEMON_SECTION, self.FORCE_SHUTDOWN,
str(self._storage[self.FORCE_SHUTDOWN]))
self.parser.set(self.DAEMON_SECTION, self.LED_OFF_MODE,
str(self._storage[self.LED_OFF_MODE]))
self.parser.set(self.DAEMON_SECTION, self.UPS_CONFIG,
str(self._storage[self.UPS_CONFIG]))
self.parser.set(self.DAEMON_SECTION, self.PULSE_LENGTH,
str(self._storage[self.PULSE_LENGTH]))
self.parser.set(self.DAEMON_SECTION, self.PULSE_LENGTH_ON,
str(self._storage[self.PULSE_LENGTH_ON]))
self.parser.set(self.DAEMON_SECTION, self.PULSE_LENGTH_OFF,
str(self._storage[self.PULSE_LENGTH_OFF]))
self.parser.set(self.DAEMON_SECTION, self.SW_RECOVERY_DELAY,
str(self._storage[self.SW_RECOVERY_DELAY]))
self.parser.set(self.DAEMON_SECTION, self.VEXT_SHUTDOWN,
str(self._storage[self.VEXT_SHUTDOWN]))
changed_config = True
else:
if attiny_timeout != self._storage[self.TIMEOUT]:
logging.debug("Writing Timeout to ATTiny")
attiny.set_timeout(self._storage[self.TIMEOUT])
if | |
<gh_stars>0
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
import imp
import ast
import logging
import os
import re
import sys
import types
try:
# pylint: disable=ungrouped-imports
from ast import NameConstant
except ImportError:
class FakeType(object):
pass
NameConstant = FakeType
log = logging.getLogger("guild")
class Script(object):
def __init__(self, src, mod_package=None, sys_path=None):
self.src = src
self.name = _script_name(src)
self.mod_package = mod_package
self.sys_path = sys_path
self._parsed = False
self._imports = []
self._calls = []
self._params = {}
self._parse()
def __lt__(self, x):
return self.__cmp__(x) < 0
def __cmp__(self, x):
return (self.src > x.src) - (self.src < x.src)
@property
def imports(self):
return self._imports
@property
def calls(self):
return self._calls
@property
def params(self):
return self._params
def _parse(self):
assert not self._parsed
parsed = ast.parse(open(self.src, "r").read())
for node in ast.walk(parsed):
self._safe_apply_node(node)
self._parsed = True
def _safe_apply_node(self, node):
try:
self._apply_node(node)
except Exception as e:
self._handle_apply_node_error(e, node)
def _handle_apply_node_error(self, e, node):
msg = "error applying AST node %s from %s:%s:" % (
node.__class__.__name__,
self.src,
node.lineno,
)
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception(msg)
else:
msg += " %s (use 'guild --debug ...' for more information)" % e
log.warning(msg)
def _apply_node(self, node):
if isinstance(node, ast.ImportFrom):
self._apply_import_from(node)
elif isinstance(node, ast.Import):
self._apply_import(node)
elif isinstance(node, ast.Call):
self._maybe_apply_call(node)
elif isinstance(node, ast.Assign):
self._apply_assign(node)
def _apply_import_from(self, node):
if node.module:
self._imports.append(node.module)
if node.names:
self._apply_import(node)
def _apply_import(self, node):
for name in node.names:
if isinstance(name, ast.alias):
self._imports.append(name.name)
def _maybe_apply_call(self, node):
call = Call(node)
if call.name:
self._calls.append(call)
def _apply_assign(self, node):
if node.col_offset == 0:
self._try_apply_param(node)
def _try_apply_param(self, node):
try:
val = _try_param_val(node.value)
except TypeError:
pass
else:
for target in node.targets:
if not isinstance(target, ast.Name):
continue
self._params[target.id] = val
def _try_param_val(val):
if isinstance(val, ast.Num):
return val.n
elif isinstance(val, ast.Str):
return val.s
elif isinstance(val, NameConstant):
return val.value
elif isinstance(val, ast.Name):
if val.id == "True":
return True
elif val.id == "False":
return False
elif val.id == "None":
return None
raise TypeError(val)
class Call(object):
def __init__(self, node):
self.node = node
self.name = self._func_name(node.func)
def _func_name(self, func):
if isinstance(func, ast.Name):
return func.id
elif isinstance(func, ast.Attribute):
return func.attr
elif isinstance(func, ast.Call):
return self._func_name(func.func)
elif isinstance(func, ast.Subscript):
return None
else:
raise AssertionError(func)
def kwarg_param(self, name):
for kw in self.node.keywords:
if kw.arg == name:
try:
_try_param_val(kw.value)
except TypeError:
return None
return None
def _script_name(src):
basename = os.path.basename(src)
name, _ = os.path.splitext(basename)
return name
class Result(Exception):
def __init__(self, value):
super(Result, self).__init__(value)
self.value = value
class MethodWrapper(object):
@staticmethod
def for_method(method):
return getattr(method, "__wrapper__", None)
def __init__(self, func, cls, name):
self._func = func
self._cls = cls
self._name = name
self._cbs = []
self._wrap()
def _wrap(self):
wrapper = self._wrapper()
wrapper.__name__ = "%s_wrapper" % self._name
wrapper.__wrapper__ = self
setattr(self._cls, self._name, wrapper)
def _wrapper(self):
def wrapper(wrapped_self, *args, **kw):
wrapped_bound = self._bind(wrapped_self)
marker = object()
result = marker
for cb in self._cbs:
try:
cb(wrapped_bound, *args, **kw)
except Result as e:
result = e.value
except KeyboardInterrupt:
raise
if result is marker:
return wrapped_bound(*args, **kw)
else:
return result
return wrapper
def _bind(self, wrapped_self):
return lambda *args, **kw: self._func(wrapped_self, *args, **kw)
def add_cb(self, cb):
self._cbs.append(cb)
def remove_cb(self, cb):
try:
self._cbs.remove(cb)
except ValueError:
pass
if not self._cbs:
self.unwrap()
def unwrap(self):
setattr(self._cls, self._name, self._func)
def listen_method(cls, method_name, cb):
method = getattr(cls, method_name)
wrapper = MethodWrapper.for_method(method)
if wrapper is None:
wrapper = MethodWrapper(method, cls, method_name)
wrapper.add_cb(cb)
def remove_method_listener(method, cb):
wrapper = MethodWrapper.for_method(method)
if wrapper is not None:
wrapper.remove_cb(cb)
def remove_method_listeners(method):
wrapper = MethodWrapper.for_method(method)
if wrapper is not None:
wrapper.unwrap()
class FunctionWrapper(object):
@staticmethod
def for_function(function):
return getattr(function, "__wrapper__", None)
def __init__(self, func, mod, name):
self._func = func
self._mod = mod
self._name = name
self._cbs = []
self._wrap()
def _wrap(self):
wrapper = self._wrapper()
wrapper.__name__ = "%s_wrapper" % self._name
wrapper.__wrapper__ = self
setattr(self._mod, self._name, wrapper)
def _wrapper(self):
def wrapper(*args, **kw):
marker = object()
result = marker
for cb in self._cbs:
try:
cb(self._func, *args, **kw)
except Result as e:
result = e.value
except KeyboardInterrupt:
raise
if result is marker:
return self._func(*args, **kw)
else:
return result
return wrapper
def add_cb(self, cb):
self._cbs.append(cb)
def remove_cb(self, cb):
try:
self._cbs.remove(cb)
except ValueError:
pass
if not self._cbs:
self.unwrap()
def unwrap(self):
setattr(self._mod, self._name, self._func)
def listen_function(module, function_name, cb):
function = getattr(module, function_name)
wrapper = FunctionWrapper.for_function(function)
if wrapper is None:
wrapper = FunctionWrapper(function, module, function_name)
wrapper.add_cb(cb)
def remove_function_listener(function, cb):
wrapper = FunctionWrapper.for_function(function)
if wrapper is not None:
wrapper.remove_cb(cb)
def remove_function_listeners(function):
wrapper = FunctionWrapper.for_function(function)
if wrapper is not None:
wrapper.unwrap()
def scripts_for_dir(dir, exclude=None):
import glob
import fnmatch
exclude = [] if exclude is None else exclude
return [
Script(src)
for src in glob.glob(os.path.join(dir, "*.py"))
if not any((fnmatch.fnmatch(src, pattern) for pattern in exclude))
]
def exec_script(filename, globals=None, mod_name="__main__"):
"""Execute a Python script.
This function can be used to execute a Python module as code
rather than import it. Importing a module to execute it is not an
option if importing has a side-effect of starting threads, in
which case this function can be used.
`mod_name` is ``__main__`` by default but may be an alternative
module name. `mod_name` may include a package.
Reference:
https://docs.python.org/2/library/threading.html#importing-in-threaded-code
"""
globals = globals or {}
package_name, mod_name = split_mod_name(mod_name)
_ensure_parent_mod_loaded(package_name)
node_filter = _node_filter_for_globals(globals) if globals else None
src = open(filename, "r").read()
code = _compile_script(src, filename, node_filter)
script_globals = dict(globals)
script_globals.update(
{"__package__": package_name, "__name__": mod_name, "__file__": filename}
)
exec(code, script_globals)
return script_globals
def split_mod_name(mod_name):
parts = mod_name.split(".")
return ".".join(parts[:-1]), parts[-1]
def _ensure_parent_mod_loaded(parent_mod_name):
if parent_mod_name:
try:
__import__(parent_mod_name)
except ValueError:
assert False, parent_mod_name
def _node_filter_for_globals(globals):
"""Filters ast nodes in support of setting globals for exec.
Removes initial assigns of any variables occuring in
`globals`. This is to allow globals to provide the initial
value. Subsequent assigns are not removed under the assumption
that are re-defining the initial variable value.
"""
names = set(globals.keys())
removed = set()
def f(node):
if isinstance(node, ast.Assign):
for target in node.targets:
if not isinstance(target, ast.Name) or target.id in removed:
return True
if target.id in names:
removed.add(target.id)
return False
return True
return f
def _compile_script(src, filename, node_filter=None):
import __future__
ast_root = ast.parse(src, filename)
if node_filter:
ast_root = _filter_nodes(ast_root, node_filter)
flags = __future__.absolute_import.compiler_flag
return compile(ast_root, filename, "exec", flags=flags, dont_inherit=True)
def _filter_nodes(root, node_filter):
if isinstance(root, (ast.Module, ast.If)):
root.body = [
_filter_nodes(node, node_filter) for node in root.body if node_filter(node)
]
return root
def update_refs(module, ref_spec, new_val, recurse=False, seen=None):
seen = seen or set()
if module in seen:
return
seen.add(module)
for name, val in module.__dict__.items():
if _match_ref(name, val, ref_spec):
module.__dict__[name] = new_val
elif recurse and isinstance(val, types.ModuleType):
update_refs(val, ref_spec, new_val, recurse, seen)
def _match_ref(name, val, ref_spec):
target_name, target_type, target_attrs = ref_spec
return (
name == target_name
and isinstance(val, target_type)
and _match_ref_attrs(val, target_attrs)
)
def _match_ref_attrs(val, attrs):
undef = object()
return all((getattr(val, name, undef) == attrs[name] for name in attrs))
def is_python_script(opspec):
return os.path.isfile(opspec) and opspec[-3:] == ".py"
def script_module(script_path, cwd="."):
mod_path = os.path.splitext(script_path)[0]
return os.path.relpath(mod_path, cwd)
def safe_module_name(s):
if s.lower().endswith(".py"):
s = s[:-3]
return re.sub("-", "_", s)
__modules = {}
def find_module(main_mod, model_paths):
cache_key = (main_mod, tuple(model_paths))
try:
return __modules[cache_key]
except KeyError:
__modules[cache_key] = result = _find_module(main_mod, model_paths)
return result
def _find_module(main_mod, model_paths):
for model_path in model_paths:
main_mod_sys_path, module = _split_module(main_mod, model_path)
# Copied from guild.op_main
parts = module.split(".")
module_path = parts[0:-1]
module_name_part = parts[-1]
for sys_path_item in [main_mod_sys_path] + sys.path:
cur_path = os.path.join(sys_path_item, *module_path)
try:
f, maybe_mod_path, _desc = imp.find_module(module_name_part, [cur_path])
except ImportError:
pass
else:
if f:
f.close()
else:
maybe_mod_path = _find_package_main(maybe_mod_path)
if not maybe_mod_path:
raise ImportError(
"No module named %s.__main__ ('%s' is a package "
"and cannot be directly executed)" % (module, module)
)
return main_mod_sys_path, maybe_mod_path
raise ImportError("No module named %s" % main_mod)
def _find_package_main(mod_path):
names = ["__main__.py", "__main__.pyc"]
for name in names:
path = os.path.join(mod_path, name)
if os.path.exists(path):
return path
return None
def _split_module(main_mod, gf_dir):
parts = main_mod.rsplit("/", 1)
if len(parts) == 1:
parts = ".", parts[0]
return os.path.join(gf_dir, parts[0]), parts[1]
def test_package_version(version, req):
req = _parse_req(req)
matches = list(req.specifier.filter({version: ""}, prereleases=True))
| |
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
corpus = ColumnCorpus(
data_folder=data_folder,
column_format=columns,
test_file=test_file, # corpus only has test data
in_memory=in_memory,
tag_to_bioes=tag_to_bioes,
column_delimiter="\t",
document_separator_token="-DOCSTART-",
banned_sentences=banned_sentences,
autofind_splits=False,
sample_missing_splits=sample_missing_splits_in_each_corpus,
)
corpora.append(corpus)
for filename in filenames:
# make column file and save to data_folder
new_filename = determine_tsv_file(
filename=filename,
data_folder=data_folder,
cut_multisense=cut_multisense,
)
corpus = ColumnCorpus(
data_folder=data_folder,
column_format=columns,
train_file=new_filename,
in_memory=in_memory,
tag_to_bioes=tag_to_bioes,
column_delimiter="\t",
document_separator_token="-DOCSTART-",
banned_sentences=banned_sentences,
autofind_splits=False,
sample_missing_splits=sample_missing_splits_in_each_corpus,
)
corpora.append(corpus)
print("...done!")
super(WSD_UFSAC, self).__init__(
corpora,
sample_missing_splits=sample_missing_splits_in_multicorpus,
name=name,
)
class WSD_RAGANATO_ALL(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: bool = True,
cut_multisense: bool = True,
):
"""
Initialize ragnato_ALL (concatenation of all SensEval and SemEval all-words tasks) provided in UFSAC https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
if not base_path:
base_path = flair.cache_root / "datasets"
else:
base_path = Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just SemCor. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
train_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
super(WSD_RAGANATO_ALL, self).__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
tag_to_bioes=tag_to_bioes,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_SEMCOR(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
):
"""
Initialize SemCor provided in UFSAC https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
if not base_path:
base_path = flair.cache_root / "datasets"
else:
base_path = Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just SemCor. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="semcor", data_folder=data_folder, cut_multisense=cut_multisense)
super(WSD_SEMCOR, self).__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
tag_to_bioes=tag_to_bioes,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_WORDNET_GLOSS_TAGGED(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: Union[bool, str] = True,
use_raganato_ALL_as_test_data: bool = False,
):
"""
Initialize Princeton WordNet Gloss Corpus provided in UFSAC https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
if not base_path:
base_path = flair.cache_root / "datasets"
else:
base_path = Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just WordNet Gloss Tagged. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(filename="raganato_ALL", data_folder=data_folder, cut_multisense=True)
else:
test_file = None
train_file = determine_tsv_file(
filename="wngt", data_folder=data_folder, cut_multisense=False
) # does not have multisense!
super(WSD_WORDNET_GLOSS_TAGGED, self).__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
tag_to_bioes=tag_to_bioes,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_MASC(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
):
"""
Initialize MASC (Manually Annotated Sub-Corpus) provided in UFSAC https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
if not base_path:
base_path = flair.cache_root / "datasets"
else:
base_path = Path(base_path)
dataset_name = "wsd_ufsac"
# default dataset folder is the cache root
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just MASC. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="masc", data_folder=data_folder, cut_multisense=cut_multisense)
super(WSD_MASC, self).__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
tag_to_bioes=tag_to_bioes,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_OMSTI(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
):
"""
Initialize OMSTI (One Million Sense-Tagged Instances) provided in UFSAC https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
if not base_path:
base_path = flair.cache_root / "datasets"
else:
base_path = Path(base_path)
dataset_name = "wsd_ufsac"
# default dataset folder is the cache root
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just OMSTI. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="omsti", data_folder=data_folder, cut_multisense=cut_multisense)
super(WSD_OMSTI, self).__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
tag_to_bioes=tag_to_bioes,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_TRAINOMATIC(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
tag_to_bioes=None,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
sample_missing_splits: Union[bool, str] = True,
use_raganato_ALL_as_test_data: bool = False,
):
"""
Initialize Train-O-Matic provided in | |
<gh_stars>0
"""
Functions for use with general model notebooks
"""
import gdal
import ogr
def process_raster_data(src, method, NCOL, NROW, gt, shapeproj, hnoflo, conversion=1.0):
'''
Takes a raster data source (ESRI grid, GeoTiff, .IMG and many other formats)
and returns a numpy array. Arrangment of pixels is given as input and may
correspond to a MODFLOW grid.
src : string
complete path to raster data source
method : string
gdal method for interpolation. Choices are:
gdal.GRA_NearestNeighbour
Nearest neighbour (select on one input pixel)
gdal.GRA_Bilinear
Bilinear (2x2 kernel)
gdal.GRA_Cubic
Cubic Convolution Approximation (4x4 kernel)
gdal.GRA_CubicSpline
Cubic B-Spline Approximation (4x4 kernel)
gdal.GRA_Lanczos
Lanczos windowed sinc interpolation (6x6 kernel)
gdal.GRA_Average
Average (computes the average of all non-NODATA contributing pixels)
gdal.GRA_Mode
Mode (selects the value which appears most often of all the sampled points)
gdal.GRA_Max
Max (selects maximum of all non-NODATA contributing pixels)
gdal.GRA_Min
Min (selects minimum of all non-NODATA contributing pixels)
gdal.GRA_Med
Med (selects median of all non-NODATA contributing pixels)
gdal.GRA_Q1
Q1 (selects first quartile of all non-NODATA contributing pixels)
gdal.GRA_Q3
Q3 (selects third quartile of all non-NODATA contributing pixels)
conversion : float
factor to be applied to raw data values to change units
requires global variables (for now):
NCOL, NROW : number of rows and columns
gt : geotransform list
shapeproj : coordinate reference system of NHDPlus (or other desired projection)
hnoflo : to be used as missing data value (from model_spec.py)
returns:
2D array of raster data source projected onto model grid.
Returns a zero array with the correct shape if the source does not exist.
'''
NCOL = int(NCOL)
NROW = int(NROW)
hnoflo = float(hnoflo)
if os.path.exists(src):
rast = gdal.Open(src)
dest = make_grid(NCOL, NROW, gt, shapeproj, hnoflo)
gdal.ReprojectImage(rast, dest, rast.GetProjection(), shapeproj, method)
grid = dest.GetRasterBand(1).ReadAsArray()
grid = grid * conversion
dest = None
rast = None
else:
grid = np.ones((NROW, NCOL)) * hnoflo
print('Data not processed for\n{}\n Check that the file exists and path is correct'.format(src))
return grid
def process_vector_data(src, attribute, NCOL, NROW, gt, shapeproj, hnoflo):
'''
Takes a vector data source (ESRI shapefile) and returns a numpy array.
Arrangment of pixels is given as input and may correspond to a MODFLOW grid.
src : complete path to vector data source
attribute : field in data table to assign to rasterized pixels
requires global variables:
NCOL, NROW : number of rows and columns
gt : geotransform list
shapeproj : coordinate reference system of NHDPlus
hnoflo : to be used as missing data value (from model_spec.py)
returns:
2D array of vector data source projected onto model grid.
Returns a zero array with the correct shape if the source does not exist.
'''
if os.path.exists(src):
datasource = ogr.Open(src)
layer = datasource.GetLayer()
src = make_grid(NCOL, NROW, gt, shapeproj, 0)
args = 'ATTRIBUTE={}'.format(attribute)
gdal.RasterizeLayer(src, [1], layer, options = [args])
grid = src.GetRasterBand(1).ReadAsArray()
src = None
dst = None
else:
grid = np.ones((NROW, NCOL)) * hnoflo
print('Data not processed for\n{}\n Check that the file exists and path is correct'.format(src))
return grid
def make_raster(dst_file, data, NCOL, NROW, gt, proj, nodata):
'''
Writes numpy array to a GeoTiff file.
dst_file : name of file to write
data : 2D numpy array
NCOL, NROW : number of rows and columns. These may coincide with a MODFLOW grid.
gt : 6-element geotransform list [C, A, B, F, E, D]. Gives the coordinates of one pixel
(the upper left pixel). If there is no rotation, B=D=0. If cells are square, A=-E.
Letter designations come from the original documentation.
C = x coordinate in map units of the upper left corner of the upper left pixel
A = distance from C along x axis to upper right pixel corner of the upper left pixel
B = distance from C along x axis to lower left pixel corner of the upper left pixel,
F = y coordinate in map units of the upper left corner of the upper left pixel
E = distance from C along y axis to lower left pixel corner of the upper left pixel
D = distance from C along y axis to upper right pixel corner of the upper left pixel
proj : projection of the GeoTiff
nodata : value to use as missing data in the GeoTiff
'''
import gdal
driver = gdal.GetDriverByName("GTiff")
dst = driver.Create(dst_file, NCOL, NROW, 1, gdal.GDT_Float32)
dst.SetGeoTransform(gt)
dst.SetProjection(proj)
band = dst.GetRasterBand(1)
band.SetNoDataValue(nodata)
band.WriteArray(data)
dst = None
def make_grid(NCOL, NROW, gt, proj, nodata): # NOTE: in NB1_v3 nodata defaults to hnoflow; here, it must be provided
'''
Creates a blank raster image in memory.
NCOL, NROW : number of rows and columns. These may coincide with a MODFLOW grid.
gt : 6-element geotransform list [C, A, B, F, E, D]. Gives the coordinates of one pixel
(the upper left pixel). If there is no rotation, B=D=0. If cells are square, A=-E.
Letter designations come from the original documentation.
C = x coordinate in map units of the upper left corner of the upper left pixel
A = distance from C along x axis to upper right pixel corner of the upper left pixel
B = distance from C along x axis to lower left pixel corner of the upper left pixel,
F = y coordinate in map units of the upper left corner of the upper left pixel
E = distance from C along y axis to lower left pixel corner of the upper left pixel
D = distance from C along y axis to upper right pixel corner of the upper left pixel
proj : projection of the GeoTiff
nodata : value to use as missing data in the GeoTiff
'''
import gdal
mem_drv = gdal.GetDriverByName('MEM')
grid_ras = mem_drv.Create('', NCOL, NROW, 1, gdal.GDT_Float32)
grid_ras.SetGeoTransform(gt)
grid_ras.SetProjection(proj)
band = grid_ras.GetRasterBand(1)
band.SetNoDataValue(nodata)
#band.SetNoDataValue(-999.0)
array = np.zeros((NROW,NCOL))
band.WriteArray(array)
return grid_ras
def process_mohp_data(tif_files):
'''
Loops a list of MOHP tif files. The rest of the algorithm is similar to the function
"process_raster_data" except that a transformation from the ESRI WKT format to a generic
format is needed. When MOHP data source is finalized, this function can be modified
to work with the final format.
src : complete path to raster data source
method : gdal method for interpolation
conversion : factor to be applied to raw data values to change units
requires global variables (for now):
NCOL, NROW : number of rows and columns
gt : geotransform list
shapeproj : coordinate reference system of NHDPlus (or other desired projection)
hnoflo : to be used as missing data value (from model_spec.py)
returns:
2D array of raster data source projected onto model grid. Each column contains
a different stream order MOHP. Each row corresponds to a model cell.
Number of rows is NCOL x NCOL. Number of columns is number of stream orders present.
'''
import gdal
gdal.UseExceptions()
import ogr
import osr
arr = np.zeros((NCOL * NROW, len(tif_files)))
if tif_files != []:
for col, src in enumerate(tif_files):
hp = gdal.Open(src)
dest = make_grid(NCOL, NROW, gt, shapeproj)
srs = osr.SpatialReference()
srs.ImportFromWkt(hp.GetProjection())
srs.MorphFromESRI()
hp_prj = srs.ExportToWkt()
hp.SetProjection(hp_prj)
gdal.ReprojectImage(hp, dest, hp.GetProjection(), shapeproj, gdal.GRA_NearestNeighbour)
hp_grd = dest.GetRasterBand(1).ReadAsArray()
hp_grd = hp_grd / 10000.
dst = None
hp = None
arr[:, col] = hp_grd.ravel()
return arr
def make_clockwise(coords):
'''
Function to determine direction of vertices of a polygon (clockwise or CCW).
Probably not needed, but here just in case.
coords : array with dim (n, 2)
n is number of vertices in the polygon. The last vertex is the same
as the first to close the polygon. The first column is x and the second is y.
'''
# if the points are counterclockwise, reverse them
x1 = coords[:-1, 0]
x2 = coords[1:, 0]
y1 = coords[:-1, 1]
y2 = coords[1:, 1]
ccw = np.sum((x2 - x1) * (y2 + y1)) < 0
if ccw:
| |
- carry on
continue
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# we don't care if someone stopped/resumed the child -- that
# is up to higher powers. For our purposes, the child is
# alive. Ha!
continue
# not stopped, poor thing... - soooo, what happened?? But hey,
# either way, its dead -- make sure it stays dead, to avoid
# zombie apocalypse...
self.child = None
self.finalize (wstat=wstat)
return output
# --------------------------------------------------------------------
#
def alive (self, recover=False) :
"""
try to determine if the child process is still active. If not, mark
the child as dead and close all IO descriptors etc ("func:`finalize`).
If `recover` is `True` and the child is indeed dead, we attempt to
re-initialize it (:func:`initialize`). We only do that for so many
times (`self.recover_max`) before giving up -- at that point it seems
likely that the child exits due to a re-occurring operations condition.
Note that upstream consumers of the :class:`PTYProcess` should be
careful to only use `recover=True` when they can indeed handle
a disconnected/reconnected client at that point, i.e. if there are no
assumptions on persistent state beyond those in control of the upstream
consumers themselves.
"""
with self.rlock :
# do we have a child which we can check?
if self.child :
wstat = None
while True :
# print('waitpid %s' % self.child)
# hey, kiddo, whats up?
try :
wpid, wstat = os.waitpid (self.child, os.WNOHANG)
# print('waitpid %s: %s - %s' % (self.child, wpid, wstat))
except OSError as e :
if e.errno == errno.ECHILD :
# child disappeared, go to zombie cleanup routine
break
raise "waitpid failed on wait (%s)"
# did we get a note about child termination?
if 0 == wpid :
# print('waitpid %s : %s - %s -- none' \
# % (self.child, wpid, wstat))
# nope, all is well - carry on
return True
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# print('waitpid %s : %s - %s -- stop/cont' \
# % (self.child, wpid, wstat))
# we don't care if someone stopped/resumed the child --
# that is up to higher powers. For our purposes, the
# child is alive. Ha!
continue
break
# so its dead -- make sure it stays dead, to avoid zombie
# apocalypse...
# print("he's dead, honeybunny, jim is dead...")
self.child = None
self.finalize (wstat=wstat)
# check if we can attempt a post-mortem revival though
if not recover :
# print('not alive, not recover')
# nope, we are on holy ground - revival not allowed.
return False
# we are allowed to revive! So can we try one more time...
# pleeeease?? (for cats, allow up to 9 attempts; for Buddhists,
# always allow to reincarnate, etc.)
if self.recover_attempts >= self.recover_max :
# nope, its gone for good - just report the sad news
# print('not alive, no recover anymore')
return False
# MEDIIIIC!!!!
self.recover_attempts += 1
self.initialize ()
# well, now we don't trust the child anymore, of course! So we
# check again. Yes, this is recursive -- but note that
# recover_attempts get incremented on every iteration, and this will
# eventually lead to call termination (tm).
# print('alive, or not alive? Check again!')
return self.alive (recover=True)
# --------------------------------------------------------------------
#
def autopsy (self) :
"""
return diagnostics information string for dead child processes
"""
with self.rlock :
if self.child :
# Boooh!
return "false alarm, process %s is alive!" % self.child
# try a last read to grab whatever we can get (from cache)
data = ''
try :
data = self.tail
data += self.read (size=0, timeout=-1)
except :
pass
ret = ""
ret += " exit code : %s\n" % self.exit_code
ret += " exit signal: %s\n" % self.exit_signal
ret += " last output: %s\n" % data
return ret
# --------------------------------------------------------------------
#
def read (self, size=0, timeout=0, _force=False) :
"""
read some data from the child. By default, the method reads whatever is
available on the next read, up to _CHUNKSIZE, but other read sizes can
be specified.
The method will return whatever data it has at timeout::
timeout == 0 : return the content of the first successful read, with
whatever data up to 'size' have been found.
timeout < 0 : return after first read attempt, even if no data have
been available.
If no data are found, the method returns an empty string (not None).
This method will not fill the cache, but will just read whatever data it
needs (FIXME).
Note: the returned lines do *not* get '\\\\r' stripped.
"""
with self.rlock :
found_eof = False
try:
# start the timeout timer right now. Note that even if timeout
# is short, and child.poll is slow, we will nevertheless attempt
# at least one read...
start = time.time ()
ret = ""
# read until we have enough data, or hit timeout ceiling...
while True :
# first, lets see if we still have data in the cache we
# can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
self.tail += ret
self.tail = self.tail[-256:]
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
self.tail += ret
self.tail = self.tail[-256:]
return ret
# otherwise we need to read some more data, right? idle
# wait 'til the next data chunk arrives, or 'til _POLLDELAY
rlist, _, _ = select.select ([self.parent_out], [], [],
_POLLDELAY)
# got some data?
for f in rlist:
# read whatever we still need
readsize = _CHUNKSIZE
if size:
readsize = size - len(ret)
buf = os.read (f, readsize)
if len(buf) == 0 and sys.platform == 'darwin' :
self.logger.debug ("read : MacOS EOF")
self.finalize ()
found_eof = True
raise se.NoSuccess("unexpected EOF: %s" % self.tail)
tmp = buf.decode('utf-8')
self.cache += tmp.replace ('\r', '')
log = tmp.replace ('\r', '')
log = log.replace ('\n', '\\n')
# print("buf: --%s--" % buf)
# print("log: --%s--" % log)
if len(log) > _DEBUG_MAX :
self.logger.debug("read : [%5d] [%5d] (%s ... %s)"
% (f, len(log), log[:30], log[-30:]))
else :
self.logger.debug ("read : [%5d] [%5d] (%s)"
% (f, len(log), log))
# for c in log :
# print('%s' % c)
# lets see if we still got any data in the cache we
# can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
self.tail += ret
self.tail = self.tail[-256:]
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
self.tail += ret
self.tail = self.tail[-256:]
return ret
# at this point, we do not have sufficient data -- only
# return on timeout
if timeout == 0 :
# only return if we have data
if len (self.cache) :
ret = self.cache
self.cache = ""
self.tail += ret
self.tail = self.tail[-256:]
return ret
elif timeout < 0 :
# return of we have data or not
ret = self.cache
self.cache = ""
self.tail += ret
self.tail = self.tail[-256:]
return ret
else : # timeout > 0
# return if timeout is reached
now = time.time ()
if (now - start) > timeout :
ret = self.cache
self.cache = ""
self.tail += ret
self.tail = self.tail[-256:]
return ret
except Exception as e :
if found_eof :
raise e
raise se.NoSuccess ("read from process failed '%s' : (%s)"
% (e, self.tail))
# ----------------------------------------------------------------
#
def find (self, patterns, timeout=0) :
"""
This methods reads bytes from the child process until a | |
TS>1e4 or is extended
renamed = np.array([s.distance<close_tol for name,s in df[~df.fl8y].iterrows()],bool)
topsr=[s.distance<0.001 and s.otherid.startswith('PSR') for name,s in df.iterrows()]
print ('Sources failing criteria :\n\tRenamed : {} ({} were PSRs)'.format(sum(renamed), sum(topsr)))
too_close = np.array([close_tol<d<0.5 for d in df.distance],bool);
print ('\ttoo close: {}'.format(sum(too_close) ))
not_close = np.array([d>0.5 for d in df.distance],bool);
strong_or_extended = (df.otherts>1e4) | df.other_extended
print ('\tnearest is strong or extended: {}'.format(sum(strong_or_extended)))
subset = df[not_close & (~strong_or_extended)]
print ('remain: {}'.format(len(subset)))
return subset
def rejected_analysis(self, df=None, close_tol=0.15):
"""
"""
df = self.df if df is None else df
gnames = set(self.gdf.index)
pnames = set(df.index)
gdiff = gnames.difference(pnames)
pdiff = pnames.difference(gnames)
self.rejected_count = len(pdiff)
self.rejected = rej = df.loc[list(pdiff)]
add_galactic(self.rejected)
# look for nearest 4FGL source in rejected list: add name, distance to DataFrame
close = tools.find_close(rej, self.gdf)
rej.loc[:,'otherid'] = close.otherid
rej.loc[:, 'distance'] = close.distance
rej.loc[:,'otherts'] = [self.gdf.loc[s.otherid].ts for name,s in rej.iterrows() ]
rej.loc[:,'other_extended'] = [self.gdf.loc[s.otherid].extended for name,s in rej.iterrows() ]
# create subset that are
# * not just a rename,
# * more than 0.5 deg away
# * closest does not have TS>1e4 or is extended
renamed = np.array([s.distance<close_tol for name,s in rej.iterrows()],bool)
topsr=[s.distance<0.001 and s.otherid.startswith('PSR') for name,s in rej.iterrows()]
print ('Sources rejected by gtlike:\n\tRenamed : {} ({} were PSRs)'.format(sum(renamed), sum(topsr)))
too_close = np.array([close_tol<d<0.5 for d in rej.distance],bool);
print ('\ttoo close: {}'.format(sum(too_close) ))
not_close = np.array([d>0.5 for d in rej.distance],bool);
strong_or_extended = (rej.otherts>1e4) | rej.other_extended
print ('\tnearest is strong or extended: {}'.format(sum(strong_or_extended)))
subset = rej[not_close & (~strong_or_extended)]
self.rejected_count = len(subset)
return subset
def rejected_source_info(self, **kwargs):
"""Info about pointlike sources rejected by gtlike analysis
Look only at those which are:
<ul><li>Not just a rename</li>
<li>More than 0.5 deg from any 4FGL source</li>
<li>Closest source has TS<1e4</li> and is not extended</li>
</ul>
TS and locations of the %(rejected_count)s sources with TS>50 <b>not</b> in 4FGL.
%(rejected_4fgl)s
"""
subset = self.seed_subset.query('b4fgl==False')
# special treatment for those with TS>50
dfx = subset.query('ts>50')
sd = map (SkyDir, dfx.ra, dfx.dec)
glon = np.array(map(lambda x: x.l(), sd))
glon[glon>180]-=360
glat = map(lambda x: x.b(), sd)
dfx.loc[:,'singlat'] = np.sin(np.radians(glat))
self.rejected_count = len(dfx)
t= dfx['ts ra dec distance otherid otherts singlat pindex beta acat locqual'.split()];
t.index.name='name'
t.to_csv('rejected_4fgl.csv')
print ('wrote file {}'.format('rejected_4fgl.csv'))
fig = self.source_info_plots(subset, **kwargs)
self.rejected_4fgl =html_table(t,
name=self.plotfolder+'/rejected',
heading='<h4>{} Not in 4FGL w/ TS>50 </h4>'.format(len(t)),
href=True, float_format=FloatFormat(2) )
return fig
def lost_source_info(self, **kwargs):
"""Info on lost sources
Check the 4FGL nickname list for sources that are not in this model. Remove those that are close, but
renamed in the uw list as LAT pulsars.
<p>
Left: TS values
Right: locations, showing the high TS values
<p> %(pulsar_rename)s
<p> Link to a csv file containing a list of the %(number_lost)s sources that were lost:
<a href="../../%(lost_sources)s?download=true">%(lost_sources)s</a>
These are entries for which the NickName field does not have a corresponding source.
"""
# first, those in 4FGL without corresponding source in this list
lost = self.gdf.query('~(uw_ts>0)')
close = tools.find_close(lost, self.df)
close['ts'] = lost.ts
close['roi'] = map( lambda i: int(self.df.loc[close.iloc[i].otherid].roiname[-4:]), range(len(close)))
# to remove PSR guys
ppp = np.array(map(lambda n:not n.startswith('PSR'), close.index))
qqq = np.array(map(lambda n: not n.startswith('PSR'), close.otherid))
pr = list(close[~qqq].otherid)
self.pulsar_rename = 'LAT pulsars added since UW names used for 4FGL nicknames:<br>{}'.format(pr)
# check for renamed sources
near=close[(ppp)&(qqq)].query('distance<0.1').sort_values(by='ts', ascending=False)
print (near)
# these are truuly lost
far = close[(ppp)&(qqq)].query('distance>0.1').sort_values(by='ts', ascending=False)
fig = self.source_info_plots(lost.loc[far.index], **kwargs)
lost_name = '{}/lost_sources.csv'.format(self.plotfolder)
lost.index.name='name'
self.number_lost = len(far)
lost.loc[far.index,'sname ra dec ts pindex eflux100 r95'.split()].to_csv(lost_name)
print ('Wrote file "{}" with info on {} missing sources'.format(lost_name, self.number_lost))
self.lost_sources = lost_name
return fig
def load_pickled_info(self, path='psc_check/info', debug=False):
# if hasattr(self, 'ts_df'):
# return self.ts_df
# get the TS and chisq values
ff =sorted(glob.glob(path+'/*'))
txt = 'read {} pickle files from {}'.format(len(ff), path)
print (txt)
assert len(ff)>0, 'No psc_check files found: must run "psccheck"'
dd = map(lambda f:pickle.load(open(f)), ff)
z = dict()
gtmodel=dict()
for roi,d in enumerate(dd):
for a,b in d:
try:
eflux_pt=a[1].i_flux(e_weight=1)*1e6;
eflux_gt=b[1].i_flux(e_weight=1)*1e6
except Exception as msg:
print (b[0],msg)
eflux_pt=eflux_gt=np.nan
z[b[0]] = dict(
ts_pt=a[2], ts_gt=b[2],
chisq_pt=a[3], chisq_gt=b[3],
eflux_pt=eflux_pt, eflux_gt=eflux_gt,
nickname=a[0], roi=roi,
index_pt=a[1][1],
)
gtmodel[a[0]]=b[1]
q = self.ts_df=pd.DataFrame(z).T
self.gtmodel =gtmodel
if debug:
return q
# add positional info, using nickname field as a key into the model dataframe (which has compressed names)
nicknames = map(lambda n:n.replace(' ',''), self.ts_df.nickname.values)
# check for now missing nicknames
indexset= set(self.df.index);
nicknameset = set(nicknames)
missing_nicknames = list(nicknameset.difference(indexset))
if len(missing_nicknames)>0:
print ('Warning: following nicknames not in current model: {}'.format(np.array(missing_nicknames)))
nicknames = list(indexset.intersection(nicknameset))
cnick = [n.replace(' ','') for n in nicknames]
qv = [n.replace(' ','') for n in q.nickname.values];
ok = np.array([name in cnick for name in qv], bool)
q = self.ts_df = q[ok]
if debug:
return nicknames
sdir = self.df.loc[nicknames,'skydir'].values
if debug: return sdir
glon = np.array(map(lambda s:s.l(), sdir),float)
glon[glon>180]-=360
glat = map(lambda s:s.b(), sdir)
singlat = np.sin(np.radians(glat))
q['glon']=glon; q['glat']=glat
# construct quality difference
a,b = q.chisq_pt.values, q.chisq_gt.values
for x in a,b:
x[pd.isna(x)]=100
delta = np.array( b-a, float )#/ np.array(q.ts_pt,float)**power, float)
q['delta']=delta
# flux ratio
q['eflux_ratio'] = q.eflux_pt/q.eflux_gt
return q
def comparison_plots(self):
"""Comparison plots for corresponding sources
<br>Upper Left: Test Statistic Comparison; the UW value is for the full energy range, so is nearly always greater.
<br>Center left: Localization radius comparison. The UW one is almost always better since it has more data
<br>Center right: Spectral index comparison. This is always the index used for the fit spectral model
"""
skymodel=self.skymodel
df=self.gdf; dfuw=self.df
psr = np.array([name.startswith('PSR') for name in df.index],bool);
dfok = df
def cplot(ax, a,b, xlim, label, ylim=(0.,2.),xscale='log'):
ax.semilogx(a.clip(*xlim), (b/a).clip(*ylim), '.b');
ax.semilogx(a[psr].clip(*xlim), (b/a)[psr].clip(*ylim), '.r', label='PSR');
ax.axhline(1.0, ls='--', color='g');
ax.set( xlabel=label, ylabel='UW/gtlike ratio', xlim=xlim,
ylim=ylim, xscale=xscale)
ax.set_xlabel(label,fontsize=14)
ax.grid(alpha=0.5)
ax.legend()
#ax.set_yticks([0.6, 0.8, 1.0, 1.2, 1.4, 1.6])
#ax.set_yticklabels(['0.6','0.8','1.0', '1.2', '1.4', '1.6'])
fig, axx = plt.subplots(4,1, figsize=(12,15))
plt.subplots_adjust(left=0.05, top = 0.95, hspace=0.3 )
cplot(axx[0], dfok.ts, dfok.uw_ts, (20,1e5), 'TS')
cplot(axx[1], dfok.r95,dfok.uw_r95,(8e-3,0.5),'R95 [deg]')
cplot(axx[2], dfok.eflux100, dfok.uw_eflux100/1.602e-6, (4e-7, 1e-3),
'eflux100 [erg/(s cm^2)]')
cplot(axx[3], dfok.pindex, dfok.uw_pindex, (1.0, 3.5),'pindex', xscale='linear')
fig.suptitle('Comparison of values for common sources', fontsize=14);
fig.set_facecolor('white')
return fig
def setup_quality(self):
#takes some time
self.info = self.load_pickled_info()
def quality_check_plots(self, ylim=(-5,25), tsmin=100):
"""Fit quality check
Compare fit consistency values of this model with that for the %(cat)s model, that is,
as caculated with the pointlike implementation,
but using the %(cat)s spectra determined by gtlike.
<br><b>Upper Left:</b> Scatter plot of the TS value difference, normalized by the square root of the uw value
<br><b>Upper Right:</b> Histogram of the normalized TS difference, for TS_uw>100.
<br><b>Lower Left: </b> Positions of sources in each tail
<br><b>Lower Right: </b> Positions of sources in each tail, along gal. plane
<b><h3>%(deltax2_positive)s</h3>
<b><h3>%(deltax2_positive_psr)s</h3>
<b><h3>%(deltax2_negative)s</h3>
"""
if not hasattr(self, 'info'):
self.setup_quality()
q = self.info
delta_clip = q.delta.clip(*ylim)
delta_label = '(chi2_uw - chi2_g)/sqrt(TS_uw)'
# make a table of the outliers
neg =(q.delta<=ylim[0]) & (q.ts_pt>tsmin)
pos =(q.delta>=ylim[1]) & (q.ts_pt>tsmin)
psr = np.array([name.startswith('PSR') for name in q.nickname])
print ('Outliers (above TS={}): {} negative, {} positive'.format(tsmin, sum(neg), sum(pos)))
try:
self.deltax2_positive=html_table(q[pos & ~psr].sort_values(by='delta', ascending=False),
name=self.plotfolder+'/deltax2_positive',
heading='<h4>pointlike better: (non-pulsars) {}</h4>'.format(sum(pos & ~psr)),
href=True, href_pattern='psc_check/sed/%s*.jpg', )
self.deltax2_positive_psr=html_table(q[pos & psr].sort_values(by='delta', ascending=False),
name=self.plotfolder+'/deltax2_positive_psr',
heading='<h4>pointlike better (pulsars): {}</h4>'.format(sum(pos & psr)),
href=True, href_pattern='psc_check/sed/%s*.jpg', )
if sum(neg)>0:
self.deltax2_negative=html_table(q[neg].sort_values(by='delta', ascending=True),
name=self.plotfolder+'/deltax2_negative',
heading='<h4>gtlike better: {}</h4>'.format(sum(neg)),
href=True, href_pattern='psc_check/sed/%s*.jpg', )
else:
self.deltax2_negative = '<h4>No gtlike fits better than Delta TS = {}</h4>'.format(np.abs(ylim[0]))
except Exception as msg:
print ('Failed to create tables of of outliers: "{}"'.format(msg))
fig, axy = plt.subplots(2,2, figsize=(15,10))
plt.subplots_adjust(wspace=0.25)
axx = axy.flatten()
ax = axx[0] # a)
ridge = np.array((abs(q.glon)<60.) & (abs(q.glat)<5), bool)
ax.semilogx(q.ts_pt.clip(10, 1e5), delta_clip, '.b')
ax.semilogx(q[ridge].ts_pt.clip(10, 1e5), delta_clip[ridge], '.r', label='ridge')
ax.axhline(0, color='orange')
ax.set(ylabel=delta_label)
ax.legend(loc='lower right')
ax = axx[1] # b)
hkw = dict(bins= np.linspace(ylim[0],ylim[1],36), histtype='step', lw=2, log=False)
delta_clip_ts = delta_clip[q.ts_pt>100]
ax.hist(delta_clip_ts,**hkw);
hkw.update(histtype='stepfilled')
ax.hist(delta_clip_ts[delta_clip_ts<=ylim[0]], color='green', **hkw)
ax.hist(delta_clip_ts[delta_clip_ts>=ylim[1]], color='red', **hkw)
ax.axvline(0, color='orange')
ax.set_xlabel(delta_label)
ax = axx[2] # c)
cut = (q.delta>=10) | (q.delta<=-2)
singlat = np.sin(np.radians(q.glat))
self.basic_skyplot(ax, q.glon[cut], singlat[cut],
delta_clip[cut], s=20, cmap=plt.get_cmap('coolwarm'));
ax = axx[3] # d)
self.basic_skyplot(ax, q.glon[cut], singlat[cut],
delta_clip[cut], s=20, cmap=plt.get_cmap('coolwarm'), aspect=5*180.);
ax.set(ylim | |
<reponame>pandegroup/vs-utils<gh_stars>10-100
"""
Helper Classes and Functions for docking fingerprint computation.
The code below contains heavily modified parts of Jacob Durrant's
NNScore 2.0.1. The following notice is copied from the original NNScore
file:
# NNScore 2.01 is released under the GNU General Public License (see
# http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't
# hesitate to contact me, <NAME>, at jdurrant [at] ucsd [dot]
# edu. If you use NNScore 2.01 in your work, please cite [REFERENCE
# HERE].
"""
import math
import os
import subprocess
import openbabel
import numpy as np
__author__ = "<NAME> and <NAME>"
__license__ = "GNU General Public License"
def force_partial_charge_computation(mol):
"""Force computation of partial charges for molecule.
This function uses GetPartialCharge to force computation of the Gasteiger
partial charges. This is an unfortunate hack, since it looks like the
python openbabel API doesn't expose the OBGastChrg object which actually
computes partial charges.
Parameters
----------
mol: OBMol
Molecule on which we compute partial charges.
"""
for obatom in openbabel.OBMolAtomIter(mol):
obatom.GetPartialCharge()
def pdbqt_to_pdb(input_file, output_directory):
"""Convert pdbqt file to pdb file.
Parameters
----------
input_file: String
Path to input file.
output_directory: String
Path to desired output directory.
"""
basename = os.path.basename(input_file).split(".")[0]
pdb_output = os.path.join(output_directory, basename + ".pdb")
print "About to write to"
print pdb_output
with open(pdb_output, "wb") as outfile:
obabel_command = ["obabel", "-ipdbqt", input_file, "-opdb"]
subprocess.Popen(obabel_command, stdout=outfile).wait()
def hydrogenate_and_compute_partial_charges(input_file, input_format,
output_directory,
hyd_output=None,
pdbqt_output=None,
verbose=False):
"""Outputs a hydrogenated pdb and a pdbqt with partial charges.
Takes an input file in specified format. Generates two outputs:
-) A pdb file that contains a hydrogenated (at pH 7.4) version of
original compound.
-) A pdbqt file that has computed Gasteiger partial charges. This pdbqt
file is build from the hydrogenated pdb.
TODO(rbharath): Can do a bit of refactoring between this function and
pdbqt_to_pdb.
Parameters
----------
input_file: String
Path to input file.
input_format: String
Name of input format.
output_directory: String
Path to desired output directory.
"""
basename = os.path.basename(input_file).split(".")[0]
if hyd_output is None:
hyd_output = os.path.join(output_directory, basename + "_hyd.pdb")
if pdbqt_output is None:
pdbqt_output = os.path.join(output_directory, basename + "_hyd.pdbqt")
if verbose:
print "Create pdb with hydrogens added"
hyd_conversion = openbabel.OBConversion()
hyd_conversion.SetInAndOutFormats(input_format, "pdb")
mol = openbabel.OBMol()
hyd_conversion.ReadFile(mol, input_file)
# AddHydrogens(polaronly, correctForPH, pH)
mol.AddHydrogens(True, True, 7.4)
hyd_conversion.WriteFile(mol, hyd_output)
if verbose:
print "Create a pdbqt file from the hydrogenated pdb above."
charge_conversion = openbabel.OBConversion()
charge_conversion.SetInAndOutFormats("pdb", "pdbqt")
if verbose:
print "Make protein rigid."
charge_conversion.AddOption("c", charge_conversion.OUTOPTIONS)
charge_conversion.AddOption("r", charge_conversion.OUTOPTIONS)
if verbose:
print "Preserve hydrogens"
charge_conversion.AddOption("h", charge_conversion.OUTOPTIONS)
if verbose:
print "Preserve atom indices"
charge_conversion.AddOption("p", charge_conversion.OUTOPTIONS)
if verbose:
print "preserve atom indices."
charge_conversion.AddOption("n", charge_conversion.OUTOPTIONS)
if verbose:
print "About to run obabel conversion."
mol = openbabel.OBMol()
charge_conversion.ReadFile(mol, hyd_output)
force_partial_charge_computation(mol)
charge_conversion.WriteFile(mol, pdbqt_output)
class AromaticRing(object):
"""Holds information about an aromatic ring."""
def __init__(self, center, indices, plane_coeff, radius):
"""
Initializes an aromatic.
Parameters
----------
center: float
Center of the ring.
indices: list
List of the atom indices for ring atoms.
plane_coeff: list
A list of elements [a, b, c, d] that define a plane by equation
a x + b y + c z = d.
radius: float
Ring radius from center.
"""
self.center = center
self.indices = indices
# a*x + b*y + c*z = dI think that
self.plane_coeff = plane_coeff
self.radius = radius
def average_point(points):
"""Returns the point with averaged coordinates of arguments.
Parameters
----------
points: list
List of point objects.
Returns
-------
pavg: Point object
Has coordinates the arithmetic average of those of p1 and p2.
"""
coords = np.array([0, 0, 0])
for point in points:
coords += point.as_array().astype(coords.dtype)
if len(points) > 0:
return Point(coords=coords/len(points))
else:
return Point(coords=coords)
class Point(object):
"""
Simple implementation for a point in 3-space.
"""
def __init__(self, x=None, y=None, z=None, coords=None):
"""
Inputs can be specified either by explicitly providing x, y, z coords
or by providing a numpy array of length 3.
Parameters
----------
x: float
X-coord.
y: float
Y-coord.
z: float
Z-coord.
coords: np.ndarray
Should be of length 3 in format np.array([x, y, z])
Raises
------
ValueError: If no arguments are provided.
"""
if x and y and z:
#self.x, self.y, self.z = x, y, z
self.coords = np.array([x, y, z])
elif coords is not None: # Implicit eval doesn't work on numpy arrays.
#self.x, self.y, self.z = coords[0], coords[1], coords[2]
self.coords = coords
else:
raise ValueError("Must specify coordinates for Point!")
# TODO(bramsundar): Should this be __copy__?
def copy_of(self):
"""Return a copy of this point."""
return Point(coords=np.copy(self.coords))
def dist_to(self, point):
"""Distance (in 2-norm) from this point to another."""
return np.linalg.norm(self.coords - point.coords)
def magnitude(self):
"""Magnitude of this point (in 2-norm)."""
return np.linalg.norm(self.coords)
#return self.dist_to(Point(coords=np.array([0, 0, 0])))
def as_array(self):
"""Return the coordinates of this point as array."""
#return np.array([self.x, self.y, self.z])
return self.coords
class Atom(object):
"""
Implements a container class for atoms. This class contains useful
annotations about the atom.
"""
def __init__(self, atomname="", residue="",
coordinates=Point(coords=np.array([99999, 99999, 99999])),
element="", pdb_index="", line="", atomtype="",
indices_of_atoms_connecting=None, charge=0, resid=0,
chain="", structure="", comment=""):
"""
Initializes an atom.
Assumes that atom is loaded from a PDB file.
Parameters
----------
atomname: string
Name of atom. Note that atomname is not the same as residue since
atomnames often have extra annotations (e.g., CG, NZ, etc).
residue: string:
Name of protein residue this atom belongs to.
element: string
Name of atom's element.
coordinate: point
A point object (x, y, z are in Angstroms).
pdb_index: string
Index of the atom in source PDB file.
line: string
The line in the PDB file which specifies this atom.
atomtype: string
Element of atom. This differs from atomname which typically has extra
annotations (e.g. CA, OA, HD, etc)
IndicesOfAtomConnecting: list
The indices (in a PDB object) of all atoms bonded to this one.
charge: float
Associated electrostatic charge.
resid: int
The residue number in the receptor (listing the protein as a chain from
N-Terminus to C-Terminus). Assumes this is a protein atom.
chain: string
Chain identifier for molecule. See PDB spec.
structure: string
One of ALPHA, BETA, or OTHER for the type of protein secondary
structure this atom resides in (assuming this is a receptor atom).
comment: string
Either LIGAND or RECEPTOR depending on whether this is a ligand or
receptor atom.
"""
self.atomname = atomname
self.residue = residue
self.coordinates = coordinates
self.element = element
self.pdb_index = pdb_index
self.line = line
self.atomtype = atomtype
if indices_of_atoms_connecting is not None:
self.indices_of_atoms_connecting = indices_of_atoms_connecting
else:
self.indices_of_atoms_connecting = []
self.charge = charge
self.resid = resid
self.chain = chain
self.structure = structure
self.comment = comment
def copy_of(self):
"""Make a copy of this atom."""
theatom = Atom()
theatom.atomname = self.atomname
theatom.residue = self.residue
theatom.coordinates = self.coordinates.copy_of()
theatom.element = self.element
theatom.pdb_index = self.pdb_index
theatom.line = self.line
theatom.atomtype = self.atomtype
theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]
theatom.charge = self.charge
theatom.resid = self.resid
theatom.chain = self.chain
theatom.structure = self.structure
theatom.comment = self.comment
return theatom
def create_pdb_line(self, index):
"""
Generates appropriate ATOM line for pdb file.
Parameters
----------
index: int
Index in associated PDB file.
"""
output = "ATOM "
output = (output + str(index).rjust(6) + self.atomname.rjust(5) +
self.residue.rjust(4) + self.chain.rjust(2) +
str(self.resid).rjust(4))
coords = self.coordinates.as_array() # [x, y, z]
output = output + ("%.3f" % coords[0]).rjust(12)
output = output + ("%.3f" % coords[1]).rjust(8)
output = output + ("%.3f" % coords[2]).rjust(8)
output = output + self.element.rjust(24)
return output
def number_of_neighbors(self):
"""Reports number of neighboring atoms."""
return len(self.indices_of_atoms_connecting)
def add_neighbor_atom_indices(self, indices):
"""
Adds atoms with provided PDB indices as neighbors.
Parameters
----------
index: list
List of indices of neighbors in PDB object.
"""
for index in indices:
if index not in self.indices_of_atoms_connecting:
self.indices_of_atoms_connecting.append(index)
def side_chain_or_backbone(self):
"""Determine whether receptor atom belongs to residue sidechain or backbone.
"""
# TODO(rbharath): Should this be an atom function?
if (self.atomname.strip() == "CA" or self.atomname.strip() == "C"
or self.atomname.strip() == "O" or self.atomname.strip() == "N"):
return "BACKBONE"
else:
return "SIDECHAIN"
def read_atom_pdb_line(self, line):
"""
TODO(rbharath): This method probably belongs in the PDB class, and not
in the Atom class.
Reads an ATOM or HETATM line from PDB and instantiates fields.
Atoms in PDBs are represented by ATOM or HETATM statements. ATOM and
HETATM statements follow the following record format:
(see | |
:param Height: 高。0-3000。
数值必须是2的倍数,0是原始宽度
:type Height: int
:param Fps: 帧率,默认0。
范围0-60
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。
范围2-6
:type Gop: int
:param Rotate: 旋转角度,默认0。
可取值:0,90,180,270
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。
:type Profile: str
:param BitrateToOrig: 当设置的码率>原始码率时,是否以原始码率为准。
0:否, 1:是
默认 0。
:type BitrateToOrig: int
:param HeightToOrig: 当设置的高度>原始高度时,是否以原始高度为准。
0:否, 1:是
默认 0。
:type HeightToOrig: int
:param FpsToOrig: 当设置的帧率>原始帧率时,是否以原始帧率为准。
0:否, 1:是
默认 0。
:type FpsToOrig: int
:param AdaptBitratePercent: 极速高清视频码率压缩比。
极速高清目标码率=VideoBitrate * (1-AdaptBitratePercent)
取值范围:0.0到0.5
:type AdaptBitratePercent: float
:param ShortEdgeAsHeight: 是否以短边作为高度,0:否,1:是。默认0。
:type ShortEdgeAsHeight: int
"""
self.TemplateId = None
self.Vcodec = None
self.Acodec = None
self.AudioBitrate = None
self.Description = None
self.VideoBitrate = None
self.Width = None
self.NeedVideo = None
self.NeedAudio = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
self.AdaptBitratePercent = None
self.ShortEdgeAsHeight = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.Vcodec = params.get("Vcodec")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Description = params.get("Description")
self.VideoBitrate = params.get("VideoBitrate")
self.Width = params.get("Width")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
self.AdaptBitratePercent = params.get("AdaptBitratePercent")
self.ShortEdgeAsHeight = params.get("ShortEdgeAsHeight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveTranscodeTemplateResponse(AbstractModel):
"""ModifyLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamConfigRequest(AbstractModel):
"""ModifyPullStreamConfig请求参数结构体
"""
def __init__(self):
r"""
:param ConfigId: 配置 ID。
获取来源:
1. 创建拉流配置接口CreatePullStreamConfig返回的配置 ID。
2. 通过查询接口DescribePullStreamConfigs获取配置 ID。
:type ConfigId: str
:param FromUrl: 源 URL,用于拉流的地址。目前可支持直播流及点播文件。
注意:
1. 多个点播 URL 之间使用空格拼接。
2. 目前上限支持10个 URL。
3. 支持拉流文件格式:FLV,RTMP,HLS,MP4。
4. 使用标准三层样式,如:http://test.com/live/stream.flv。
:type FromUrl: str
:param ToUrl: 目的 URL,用于推流的地址,目前限制该目标地址为腾讯域名。
1. 仅支持 RTMP 协议。
2. 使用标准三层样式,如:http://test.com/live/stream.flv。
:type ToUrl: str
:param AreaId: 区域 ID:
1-深圳。
2-上海。
3-天津。
4-中国香港。
如有改动,需同时传入IspId。
:type AreaId: int
:param IspId: 运营商 ID,
1:电信。
2:移动。
3:联通。
4:其他。
AreaId为4的时候,IspId只能为其他。如有改动,需同时传入AreaId。
:type IspId: int
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
注意:北京时间值为 UTC 时间值 + 8 小时,格式按照 ISO 8601 标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#I)。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
注意:北京时间值为 UTC 时间值 + 8 小时,格式按照 ISO 8601 标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#I)。
:type EndTime: str
"""
self.ConfigId = None
self.FromUrl = None
self.ToUrl = None
self.AreaId = None
self.IspId = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaId = params.get("AreaId")
self.IspId = params.get("IspId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyPullStreamConfigResponse(AbstractModel):
"""ModifyPullStreamConfig返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamStatusRequest(AbstractModel):
"""ModifyPullStreamStatus请求参数结构体
"""
def __init__(self):
r"""
:param ConfigIds: 配置 ID 列表。
:type ConfigIds: list of str
:param Status: 目标状态。0无效,2正在运行,4暂停。
:type Status: str
"""
self.ConfigIds = None
self.Status = None
def _deserialize(self, params):
self.ConfigIds = params.get("ConfigIds")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyPullStreamStatusResponse(AbstractModel):
"""ModifyPullStreamStatus返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorStreamPlayInfo(AbstractModel):
"""监控播放数据
"""
def __init__(self):
r"""
:param PlayDomain: 播放域名。
:type PlayDomain: str
:param StreamName: 流id。
:type StreamName: str
:param Rate: 播放码率,0表示原始码率。
:type Rate: int
:param Protocol: 播放协议,可选值包括 Unknown,Flv,Hls,Rtmp,Huyap2p。
:type Protocol: str
:param Bandwidth: 带宽,单位是Mbps。
:type Bandwidth: float
:param Online: 在线人数,1分钟采样一个点,统计采样点的tcp链接数目。
:type Online: int
:param Request: 请求数。
:type Request: int
"""
self.PlayDomain = None
self.StreamName = None
self.Rate = None
self.Protocol = None
self.Bandwidth = None
self.Online = None
self.Request = None
def _deserialize(self, params):
self.PlayDomain = params.get("PlayDomain")
self.StreamName = params.get("StreamName")
self.Rate = params.get("Rate")
self.Protocol = params.get("Protocol")
self.Bandwidth = params.get("Bandwidth")
self.Online = params.get("Online")
self.Request = params.get("Request")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlayAuthKeyInfo(AbstractModel):
"""播放鉴权key信息。
"""
def __init__(self):
r"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用:
0: 关闭。
1: 启用。
:type Enable: int
:param AuthKey: 鉴权 Key。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
:param AuthBackKey: 鉴权 BackKey。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlayCodeTotalInfo(AbstractModel):
"""各状态码的总次数,支持大多数的 HTTP 协议返回码。
"""
def __init__(self):
r"""
:param Code: HTTP code,可选值包括:
400,403,404,500,502,503,504。
:type Code: str
:param Num: 总次数。
:type Num: int
"""
self.Code = None
self.Num = None
def _deserialize(self, params):
self.Code = params.get("Code")
self.Num = params.get("Num")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlayDataInfoByStream(AbstractModel):
"""流维度的播放信息。
"""
def __init__(self):
r"""
:param StreamName: 流名称。
:type StreamName: str
:param TotalFlux: 总流量,单位: MB。
:type TotalFlux: float
"""
self.StreamName = None
self.TotalFlux = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TotalFlux = params.get("TotalFlux")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlayStatInfo(AbstractModel):
"""按省份运营商查询的播放信息。
"""
def __init__(self):
r"""
:param Time: 数据时间点。
:type Time: str
:param Value: 带宽/流量/请求数/并发连接数/下载速度的值,若没数据返回时该值为0。
注意:此字段可能返回 null,表示取不到有效值。
:type Value: float
"""
self.Time = None
self.Value = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlaySumStatInfo(AbstractModel):
"""播放汇总统计信息。
"""
def __init__(self):
r"""
:param Name: 域名或流 ID。
:type Name: str
:param AvgFluxPerSecond: 平均下载速度,
单位: MB/s。
计算公式: 每分钟的下载速度求平均值。
:type AvgFluxPerSecond: float
:param TotalFlux: 总流量,单位: MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
"""
self.Name = None
self.AvgFluxPerSecond = None
self.TotalFlux = None
self.TotalRequest = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProIspPlayCodeDataInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
r"""
:param CountryAreaName: 国家或地区。
:type CountryAreaName: str
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param Code2xx: 错误码为2开头的次数。
:type Code2xx: int
:param Code3xx: 错误码为3开头的次数。
:type Code3xx: int
:param Code4xx: 错误码为4开头的次数。
:type Code4xx: int
:param Code5xx: 错误码为5开头的次数。
:type Code5xx: int
"""
self.CountryAreaName = None
self.ProvinceName = None
self.IspName = None
self.Code2xx = None
self.Code3xx = None
self.Code4xx = None
self.Code5xx = None
def _deserialize(self, params):
self.CountryAreaName = params.get("CountryAreaName")
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
self.Code2xx = params.get("Code2xx")
self.Code3xx = params.get("Code3xx")
self.Code4xx = params.get("Code4xx")
self.Code5xx = params.get("Code5xx")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProIspPlaySumInfo(AbstractModel):
"""获取省份/运营商的播放信息。
"""
def __init__(self):
r"""
:param Name: 省份/运营商/国家或地区。
:type Name: str
:param TotalFlux: 总流量,单位: MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param AvgFluxPerSecond: 平均下载流量,单位: MB/s。
:type AvgFluxPerSecond: float
"""
self.Name = None
self.TotalFlux = None
self.TotalRequest = None
self.AvgFluxPerSecond = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PublishTime(AbstractModel):
"""推流时间。
"""
def __init__(self):
r"""
:param PublishTime: 推流时间。
UTC 格式,例如:2018-06-29T19:00:00Z。
:type PublishTime: str
"""
self.PublishTime = None
def _deserialize(self, params):
self.PublishTime = params.get("PublishTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PullStreamConfig(AbstractModel):
"""拉流配置。
"""
def __init__(self):
r"""
:param ConfigId: 拉流配置 ID。
:type ConfigId: str
:param FromUrl: 源 URL。
:type FromUrl: str
:param ToUrl: 目的 URL。
:type ToUrl: str
:param AreaName: 区域名。
:type AreaName: str
:param IspName: 运营商名。
:type IspName: str
:param StartTime: | |
<reponame>elecbrick/a2fomu<gh_stars>1-10
#!/usr/bin/env python3
#
# a2fomu_build.py - Part of a2fomu - Copyright (c) 2020-2021 <NAME>
#
# This file contains a significant contribution from foboot-bitstream.py
# which wass made available under the Apache License, Version 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
# Import the litex build environment to integrate the deps/ directory
# external programs this lxbuildenv project relies on.
lx_dependencies = ["riscv", "icestorm", "yosys", "nextpnr-ice40"]
import lxbuildenv
# base hardware definitions: migen
from migen import Module, Signal, Instance, ClockDomain, If
from migen.fhdl.specials import TSTriple
from migen.fhdl.structure import ResetSignal
from migen.fhdl.decorators import ClockDomainsRenamer
# SoC modules and integrator: LiteX
from litex.build.lattice.platform import LatticePlatform
from litex.build.generic_platform import Pins, Subsignal
from litex.build.sim.platform import SimPlatform
from litex.soc.integration.doc import AutoDoc, ModuleDoc
from litex.soc.integration.soc_core import SoCCore
from litex.soc.cores.cpu import CPUNone
from litex.soc.integration.builder import Builder
from litex.soc.interconnect import wishbone
from litex.soc.cores import up5kspram, spi_flash
from litex_boards.targets.fomu import _CRG
import litex.soc.doc as lxsocdoc
# USB controller: ValentyUSB
from valentyusb.usbcore import io as usbio
from valentyusb.usbcore.cpu import epmem, unififo, epfifo, dummyusb, eptri
from valentyusb.usbcore.endpoint import EndpointType
# Fomu and ice40 modules:
from rtl.fomutouch import TouchPads
from rtl.romgen import RandomFirmwareROM, FirmwareROM
from rtl.sbwarmboot import SBWarmBoot
from rtl.apple2 import Apple2
from rtl.pdpspram import PDP_SPRAM
# Generic Python modules
import argparse
import os
# Simulations Interface: Create I/O pins that can interface with standard test
# suites.
sim_io = [
# define top level connection between FOMU and simulator
("clk", 0,
Subsignal("clk48", Pins(1)),
Subsignal("clk12", Pins(1)),
),
("reset", 0, Pins(1)),
("user_led_n", 0, Pins(1)),
("rgb_led", 0,
Subsignal("r", Pins(1)),
Subsignal("g", Pins(1)),
Subsignal("b", Pins(1)),
),
("touch_pins", 0,
Subsignal("user_touch_0", Pins(1)),
Subsignal("user_touch_1", Pins(1)),
Subsignal("user_touch_2", Pins(1)),
Subsignal("user_touch_3", Pins(1)),
),
("usb", 0,
Subsignal("d_p", Pins(1)),
Subsignal("d_n", Pins(1)),
Subsignal("pullup", Pins(1)),
Subsignal("tx_en", Pins(1)),
),
("spiflash", 0,
Subsignal("cs_n", Pins(1)),
Subsignal("clk", Pins(1)),
Subsignal("mosi", Pins(1)),
Subsignal("miso", Pins(1)),
Subsignal("wp", Pins(1)),
Subsignal("hold", Pins(1)),
),
("spiflash4x", 0,
Subsignal("cs_n", Pins(1)),
Subsignal("clk", Pins(1)),
Subsignal("dq", Pins(4)),
),
]
sim_connectors = [("touch_pins", "user_touch_0, user_touch_1, user_touch_2, user_touch_3")]
# Clock and reset signals that the simulator needs for proper emulation.
class sim_CRG(Module):
def __init__(self, platform):
clk = platform.request("clk")
rst = platform.request("reset")
clk12 = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_usb_12 = ClockDomain()
self.clock_domains.cd_usb_48 = ClockDomain()
self.clock_domains.cd_usb_48_to_12 = ClockDomain()
#clk12 = clk.clk12
clk48 = clk.clk48
#self.comb += clk.clk12.eq(clk12)
#self.comb += clk.clk48.eq(clk48)
self.comb += self.cd_usb_48.clk.eq(clk48)
self.comb += self.cd_usb_48_to_12.clk.eq(clk48)
# derive 12MHz clock by division of 48MHz clock
clk12_counter = Signal(2)
self.sync.usb_48_to_12 += clk12_counter.eq(clk12_counter + 1)
self.comb += clk12.eq(clk12_counter[1])
# Uncomment the following to enable 48MHz Risc-V for faster simulation
# Warning: it breaks USB communication as all data will be sent 4x
#self.comb += self.cd_sys.clk.eq(clk48)
# Use the following for FPGA timing
self.comb += self.cd_sys.clk.eq(clk12)
self.comb += self.cd_usb_12.clk.eq(clk12)
self.comb += [
ResetSignal("sys").eq(rst),
ResetSignal("usb_12").eq(rst),
ResetSignal("usb_48").eq(rst),
# Must not reset cd_usb_48_to_12 otherwise clock divider halts
# and sys_clk domain fails to reset
]
class sim_Platform(SimPlatform):
def __init__(self, revision=None, toolchain="verilator"):
default_clk_name = "clk12"
SimPlatform.__init__(self,
"sim",
sim_io,
connectors=sim_connectors,
toolchain=toolchain)
self.revision = revision
self.spi_size = 2 * 1024 * 1024
self.spi_dummy = 6
def create_programmer(self):
raise ValueError("programming is not supported")
def add_fsm_state_names():
"""Hack the FSM module to add state names to the output"""
from migen.fhdl.visit import NodeTransformer
from migen.genlib.fsm import NextState, NextValue, _target_eq
from migen.fhdl.bitcontainer import value_bits_sign
class My_LowerNext(NodeTransformer):
def __init__(self, next_state_signal, next_state_name_signal, encoding,
aliases):
self.next_state_signal = next_state_signal
self.next_state_name_signal = next_state_name_signal
self.encoding = encoding
self.aliases = aliases
# (target, next_value_ce, next_value)
self.registers = []
def _get_register_control(self, target):
for x in self.registers:
if _target_eq(target, x[0]):
return x[1], x[2]
raise KeyError
def visit_unknown(self, node):
if isinstance(node, NextState):
try:
actual_state = self.aliases[node.state]
except KeyError:
actual_state = node.state
return [
self.next_state_signal.eq(self.encoding[actual_state]),
self.next_state_name_signal.eq(
int.from_bytes(actual_state.encode(), byteorder="big"))
]
elif isinstance(node, NextValue):
try:
next_value_ce, next_value = self._get_register_control(
node.target)
except KeyError:
related = node.target if isinstance(node.target,
Signal) else None
next_value = Signal(bits_sign=value_bits_sign(node.target),
related=related)
next_value_ce = Signal(related=related)
self.registers.append(
(node.target, next_value_ce, next_value))
return next_value.eq(node.value), next_value_ce.eq(1)
else:
return node
import migen.genlib.fsm as fsm
def my_lower_controls(self):
self.state_name = Signal(len(max(self.encoding, key=len)) * 8,
reset=int.from_bytes(
self.reset_state.encode(),
byteorder="big"))
self.next_state_name = Signal(len(max(self.encoding, key=len)) * 8,
reset=int.from_bytes(
self.reset_state.encode(),
byteorder="big"))
self.comb += self.next_state_name.eq(self.state_name)
self.sync += self.state_name.eq(self.next_state_name)
return My_LowerNext(self.next_state, self.next_state_name,
self.encoding, self.state_aliases)
fsm.FSM._lower_controls = my_lower_controls
class Platform(LatticePlatform):
def __init__(self, revision=None, toolchain="icestorm"):
self.revision = revision
if revision == "evt":
from litex_boards.platforms.fomu_evt import _io, _connectors
LatticePlatform.__init__(self, "ice40-up5k-sg48", _io, _connectors, toolchain="icestorm")
self.spi_size = 16 * 1024 * 1024
self.spi_dummy = 6
elif revision == "dvt":
from litex_boards.platforms.fomu_pvt import _io, _connectors
LatticePlatform.__init__(self, "ice40-up5k-uwg30", _io, _connectors, toolchain="icestorm")
self.spi_size = 2 * 1024 * 1024
self.spi_dummy = 6
elif revision == "pvt":
from litex_boards.platforms.fomu_pvt import _io, _connectors
LatticePlatform.__init__(self, "ice40-up5k-uwg30", _io, _connectors, toolchain="icestorm")
self.spi_size = 2 * 1024 * 1024
self.spi_dummy = 6
elif revision == "hacker":
from litex_boards.platforms.fomu_hacker import _io, _connectors
LatticePlatform.__init__(self, "ice40-up5k-uwg30", _io, _connectors, toolchain="icestorm")
self.spi_size = 2 * 1024 * 1024
self.spi_dummy = 4
else:
raise ValueError("Unrecognized revision: {}. Known values: evt, dvt, pvt, hacker".format(revision))
def create_programmer(self):
raise ValueError("programming is not supported in this environment")
class BaseSoC(SoCCore, AutoDoc):
"""A2Fomu SoC and Bootloader
Fomu is an FPGA that fits entirely within a USB port.
A2Fomu is an Apple II clone inside of Fomu along with an operating
system for its control processor. The SoC contains a small ROM that
loads the OS from Flash memory into RAM.
"""
SoCCore.csr_map = {
#"ctrl": 0, # LiteX - many better uses for the space
"apple2": 0,
"crg": 1, # user - no registers in the default clock module
#"uart_phy": 2, # Fomu PVT has no pins for uart
#"uart": 3, # Fomu PVT has no pins for uart
#"identifier_mem": 4, # unnecessary
"timer0": 5, # provided by default (optional)
#"cpu_or_bridge": 8, # Nothing here
"usb": 9,
#"picorvspi": 10,
"touch": 11,
"reboot": 12,
"rgb": 13,
#"version": 14,
"lxspi": 15,
#"messible": 16,
}
SoCCore.mem_map = {
"rom": 0x00000000, # (default shadow @0x80000000)
"sram": 0x10000000, # (default shadow @0x90000000)
"spiflash": 0x20000000, # (default shadow @0xa0000000)
"a2ram": 0xC0000000, # (default shadow @0xc0000000)
"csr": 0xe0000000, # (default shadow @0xe0000000)
"vexriscv_debug": 0xf00f0000,
}
interrupt_map = {
"timer0": 2,
"usb": 3,
}
interrupt_map.update(SoCCore.interrupt_map)
def __init__(self, platform, boot_source="rand",
gdb_debug=None, usb_wishbone=False, bios_file=None,
use_dsp=False, placer="heap", output_dir="build",
pnr_seed=0,
warmboot_offsets=None,
**kwargs):
# Disable integrated RAM unless using simulator - we'll add it later
self.integrated_sram_size = 0
self.output_dir = output_dir
if kwargs["sim"]:
clk_freq = int(48e6)
self.submodules.crg = sim_CRG(platform)
self.integrated_sram_size = 0 # 0x8000,
else:
clk_freq = int(12e6)
self.submodules.crg = _CRG(platform, clk_freq)
SoCCore.__init__(self, platform, clk_freq,
integrated_sram_size=self.integrated_sram_size, with_uart=False,
with_ctrl=False, csr_data_width=32, **kwargs)
if gdb_debug is not None:
if gdb_debug == "uart":
from litex.soc.cores.uart import UARTWishboneBridge
self.submodules.uart_bridge = UARTWishboneBridge(platform.request("serial"), clk_freq, baudrate=115200)
self.add_wb_master(self.uart_bridge.wishbone)
elif gdb_debug == "usb":
usb_wishbone = True
elif gdb_debug == "spi":
import spibone
# Add SPI Wishbone bridge
debug_device = [
("spidebug", 0,
Subsignal("mosi", Pins("dbg:0")),
Subsignal("miso", Pins("dbg:1")),
Subsignal("clk", Pins("dbg:2")),
Subsignal("cs_n", Pins("dbg:3")),
)
]
platform.add_extension(debug_device)
spi_pads = platform.request("spidebug")
self.submodules.spibone = ClockDomainsRenamer("usb_12")(spibone.SpiWishboneBridge(spi_pads, wires=4))
self.add_wb_master(self.spibone.wishbone)
if hasattr(self, "cpu") and not isinstance(self.cpu, CPUNone):
self.cpu.use_external_variant("rtl/VexRiscv_Fomu_NoMMU_Debug.v")
os.path.join(output_dir, "gateware")
# This was needed for an earlier version of LiteX
#self.register_mem("vexriscv_debug", 0xf00f0000, self.cpu.debug_bus, 0x100)
else:
if hasattr(self, "cpu") and not isinstance(self.cpu, CPUNone):
#self.cpu.use_external_variant("rtl/VexRiscv_Fomu.v")
self.cpu.use_external_variant("rtl/VexRiscv_Fomu_NoMMU.v")
# SPRAM- UP5K has four blocks of Single Port RAM (SPRAM). This is split
# evenly between the Risc-V and the 6502 for main memory, 64kB each.
spram_size = 64*1024
if not kwargs["sim"]:
self.submodules.spram = up5kspram.Up5kSPRAM(size=spram_size)
else:
self.submodules.spram = wishbone.SRAM(spram_size, read_only=False, init=[])
self.register_mem("sram", self.mem_map["sram"], self.spram.bus, spram_size)
# Add a Messible for device->host communications
#Doug
#self.submodules.messible = Messible()
# Apple II specific modules here
a2mem_size = 64*1024
if not kwargs["sim"]:
a2mem = PDP_SPRAM(sim=kwargs["sim"])
else:
self.submodules.spram = wishbone.SRAM(spram_size, read_only=False, init=[])
self.submodules.a2mem = a2mem
self.register_mem("a2ram", self.mem_map["a2ram"], self.a2mem.bus, a2mem_size)
print("=====================\n", gdb_debug, gdb_debug!=None, "\n=====================\n")
self.submodules.apple2 = Apple2(platform, a2mem, minimal=(gdb_debug!=None))
if not kwargs["no_cpu"]:
bios_size = 0x2000 # Fomu standard 8 Kb ROM
if kwargs['sim']:
# 64kB ROM used in place of flash during simulation
bios_size = 0x10000
else:
# ROM consumes a large quantity of the limited EBR block RAM
# 1 KB ROM that just initializes flash and jumps to it
bios_size = 0x3000 # 12kB max size using all ebr
bios_size = 0x2400 # 9kB
bios_size = 0x2000 # 8kB foboot failsafe size
bios_size = 0x1000 # 4kB bootloader
bios_size = 0x0800 # 2kB bootloader
if boot_source == "bios" or bios_file is not None:
kwargs['cpu_reset_address'] = 0
if bios_file is None:
self.integrated_rom_size = bios_size
self.submodules.rom = wishbone.SRAM(bios_size, read_only=True, init=[])
self.register_rom(self.rom.bus, bios_size)
else:
self.firmware_rom_size = bios_size
self.submodules.firmware_rom = FirmwareROM(bios_size, bios_file)
self.add_constant("ROM_DISABLE", 1)
self.register_rom(self.firmware_rom.bus, bios_size)
elif boot_source == "rand":
kwargs['cpu_reset_address'] = 0
self.submodules.random_rom = RandomFirmwareROM(bios_size)
self.add_constant("ROM_DISABLE", 1)
self.register_rom(self.random_rom.bus, bios_size)
| |
POLY_FT4 *PrintOBJ_TORCHL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076570)
SetType(0x80076570, "struct POLY_FT4 *PrintOBJ_TORCHR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076600)
SetType(0x80076600, "struct POLY_FT4 *PrintOBJ_TORCHL2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076690)
SetType(0x80076690, "struct POLY_FT4 *PrintOBJ_TORCHR2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076720)
SetType(0x80076720, "struct POLY_FT4 *PrintOBJ_SARC__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007674C)
SetType(0x8007674C, "struct POLY_FT4 *PrintOBJ_FLAMEHOLE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076778)
SetType(0x80076778, "struct POLY_FT4 *PrintOBJ_FLAMELVR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800767A4)
SetType(0x800767A4, "struct POLY_FT4 *PrintOBJ_WATER__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800767D0)
SetType(0x800767D0, "struct POLY_FT4 *PrintOBJ_BOOKLVR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800767FC)
SetType(0x800767FC, "struct POLY_FT4 *PrintOBJ_TRAPL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076828)
SetType(0x80076828, "struct POLY_FT4 *PrintOBJ_TRAPR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076854)
SetType(0x80076854, "struct POLY_FT4 *PrintOBJ_BOOKSHELF__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076880)
SetType(0x80076880, "struct POLY_FT4 *PrintOBJ_WEAPRACK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800768AC)
SetType(0x800768AC, "struct POLY_FT4 *PrintOBJ_BARREL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800768D8)
SetType(0x800768D8, "struct POLY_FT4 *PrintOBJ_BARRELEX__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076A30)
SetType(0x80076A30, "struct POLY_FT4 *PrintOBJ_SHRINEL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076AFC)
SetType(0x80076AFC, "struct POLY_FT4 *PrintOBJ_SHRINER__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076BC8)
SetType(0x80076BC8, "struct POLY_FT4 *PrintOBJ_SKELBOOK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076BF4)
SetType(0x80076BF4, "struct POLY_FT4 *PrintOBJ_BOOKCASEL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076C20)
SetType(0x80076C20, "struct POLY_FT4 *PrintOBJ_BOOKCASER__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076C4C)
SetType(0x80076C4C, "struct POLY_FT4 *PrintOBJ_BOOKSTAND__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076C78)
SetType(0x80076C78, "struct POLY_FT4 *PrintOBJ_BOOKCANDLE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076C9C)
SetType(0x80076C9C, "struct POLY_FT4 *PrintOBJ_BLOODFTN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076CC8)
SetType(0x80076CC8, "struct POLY_FT4 *PrintOBJ_DECAP__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076CF4)
SetType(0x80076CF4, "struct POLY_FT4 *PrintOBJ_TCHEST1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076D20)
SetType(0x80076D20, "struct POLY_FT4 *PrintOBJ_TCHEST2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076D4C)
SetType(0x80076D4C, "struct POLY_FT4 *PrintOBJ_TCHEST3__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076D78)
SetType(0x80076D78, "struct POLY_FT4 *PrintOBJ_BLINDBOOK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076DA4)
SetType(0x80076DA4, "struct POLY_FT4 *PrintOBJ_BLOODBOOK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076DD0)
SetType(0x80076DD0, "struct POLY_FT4 *PrintOBJ_PEDISTAL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076DFC)
SetType(0x80076DFC, "struct POLY_FT4 *PrintOBJ_PURIFYINGFTN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076E28)
SetType(0x80076E28, "struct POLY_FT4 *PrintOBJ_ARMORSTAND__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076E54)
SetType(0x80076E54, "struct POLY_FT4 *PrintOBJ_ARMORSTANDN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076E80)
SetType(0x80076E80, "struct POLY_FT4 *PrintOBJ_GOATSHRINE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076EAC)
SetType(0x80076EAC, "struct POLY_FT4 *PrintOBJ_CAULDRON__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076ED8)
SetType(0x80076ED8, "struct POLY_FT4 *PrintOBJ_MURKYFTN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076F04)
SetType(0x80076F04, "struct POLY_FT4 *PrintOBJ_TEARFTN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076F30)
SetType(0x80076F30, "struct POLY_FT4 *PrintOBJ_ALTBOY__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076F5C)
SetType(0x80076F5C, "struct POLY_FT4 *PrintOBJ_MCIRCLE1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800770F0)
SetType(0x800770F0, "struct POLY_FT4 *PrintOBJ_STORYBOOK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007711C)
SetType(0x8007711C, "struct POLY_FT4 *PrintOBJ_STORYCANDLE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80077140)
SetType(0x80077140, "struct POLY_FT4 *PrintOBJ_STEELTOME__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007716C)
SetType(0x8007716C, "struct POLY_FT4 *PrintOBJ_WARARMOR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80077198)
SetType(0x80077198, "struct POLY_FT4 *PrintOBJ_WARWEAP__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800771C4)
SetType(0x800771C4, "struct POLY_FT4 *PrintOBJ_TBCROSS__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800771F0)
SetType(0x800771F0, "struct POLY_FT4 *PrintOBJ_WEAPONRACK__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007721C)
SetType(0x8007721C, "struct POLY_FT4 *PrintOBJ_WEAPONRACKN__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80077248)
SetType(0x80077248, "struct POLY_FT4 *PrintOBJ_MUSHPATCH__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80077274)
SetType(0x80077274, "struct POLY_FT4 *PrintOBJ_LAZSTAND__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800772A0)
SetType(0x800772A0, "struct POLY_FT4 *PrintOBJ_SLAINHERO__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800772CC)
SetType(0x800772CC, "struct POLY_FT4 *PrintOBJ_SIGNCHEST__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800772F8)
SetType(0x800772F8, "struct POLY_FT4 *PRIM_GetCopy__FP8POLY_FT4_addr_800772F8(struct POLY_FT4 *Prim)")
del_items(0x80077334)
SetType(0x80077334, "void PRIM_CopyPrim__FP8POLY_FT4T0_addr_80077334(struct POLY_FT4 *Dest, struct POLY_FT4 *Source)")
del_items(0x8007735C)
SetType(0x8007735C, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8007735C(struct POLY_FT4 **Prim)")
del_items(0x800773D8)
SetType(0x800773D8, "struct TextDat *GetBlockTexDat__7CBlocks_addr_800773D8(struct CBlocks *this)")
del_items(0x800773E4)
SetType(0x800773E4, "int GetNumOfFrames__7TextDatii_addr_800773E4(struct TextDat *this, int Creature, int Action)")
del_items(0x8007741C)
SetType(0x8007741C, "struct CCreatureHdr *GetCreature__7TextDati_addr_8007741C(struct TextDat *this, int Creature)")
del_items(0x80077494)
SetType(0x80077494, "int GetNumOfCreatures__7TextDat_addr_80077494(struct TextDat *this)")
del_items(0x800774A8)
SetType(0x800774A8, "struct FRAME_HDR *GetFr__7TextDati_addr_800774A8(struct TextDat *this, int FrNum)")
del_items(0x800774C4)
SetType(0x800774C4, "void gamemenu_on__Fv()")
del_items(0x800774CC)
SetType(0x800774CC, "void gamemenu_off__Fv()")
del_items(0x800774D4)
SetType(0x800774D4, "void LoadPalette__FPCc(char *pszFileName)")
del_items(0x800774DC)
SetType(0x800774DC, "void LoadRndLvlPal__Fi(int l)")
del_items(0x800774E4)
SetType(0x800774E4, "void ResetPal__Fv()")
del_items(0x800774EC)
SetType(0x800774EC, "void SetFadeLevel__Fi(int fadeval)")
del_items(0x8007751C)
SetType(0x8007751C, "bool GetFadeState__Fv()")
del_items(0x80077528)
SetType(0x80077528, "void SetPolyXY__FP8POLY_GT4PUc(struct POLY_GT4 *gt4, unsigned char *coords)")
del_items(0x8007762C)
SetType(0x8007762C, "void DrawFadedScreen__Fv()")
del_items(0x8007776C)
SetType(0x8007776C, "void BlackPalette__Fv()")
del_items(0x80077828)
SetType(0x80077828, "void PaletteFadeInTask__FP4TASK(struct TASK *T)")
del_items(0x800778AC)
SetType(0x800778AC, "bool PaletteFadeIn__Fi(int fr)")
del_items(0x80077904)
SetType(0x80077904, "void PaletteFadeOutTask__FP4TASK(struct TASK *T)")
del_items(0x8007799C)
SetType(0x8007799C, "bool PaletteFadeOut__Fi(int fr)")
del_items(0x800779F0)
SetType(0x800779F0, "void PRIM_GetPrim__FPP8POLY_FT4_addr_800779F0(struct POLY_FT4 **Prim)")
del_items(0x80077A6C)
SetType(0x80077A6C, "void M_CheckEFlag__Fi(int i)")
del_items(0x80077A8C)
SetType(0x80077A8C, "void M_ClearSquares__Fi(int i)")
del_items(0x80077BF8)
SetType(0x80077BF8, "unsigned char IsSkel__Fi(int mt)")
del_items(0x80077C34)
SetType(0x80077C34, "void NewMonsterAnim__FiR10AnimStructii(int i, struct AnimStruct *anim, int md, int AnimType)")
del_items(0x80077C80)
SetType(0x80077C80, "unsigned char M_Ranged__Fi(int i)")
del_items(0x80077CC8)
SetType(0x80077CC8, "unsigned char M_Talker__Fi(int i)")
del_items(0x80077D28)
SetType(0x80077D28, "void M_Enemy__Fi(int i)")
del_items(0x800782E8)
SetType(0x800782E8, "void ClearMVars__Fi(int i)")
del_items(0x8007835C)
SetType(0x8007835C, "void InitMonster__Fiiiii(int i, int rd, int mtype, int x, int y)")
del_items(0x800787A8)
SetType(0x800787A8, "int AddMonster__FiiiiUc(int x, int y, int dir, int mtype, int InMap)")
del_items(0x80078858)
SetType(0x80078858, "void M_StartStand__Fii(int i, int md)")
del_items(0x8007899C)
SetType(0x8007899C, "void M_UpdateLeader__Fi(int i)")
del_items(0x80078A94)
SetType(0x80078A94, "void ActivateSpawn__Fiiii(int i, int x, int y, int dir)")
del_items(0x80078B3C)
SetType(0x80078B3C, "unsigned char SpawnSkeleton__Fiii(int ii, int x, int y)")
del_items(0x80078D2C)
SetType(0x80078D2C, "void M_StartSpStand__Fii(int i, int md)")
del_items(0x80078E0C)
SetType(0x80078E0C, "unsigned char PosOkMonst__Fiii(int i, int x, int y)")
del_items(0x80079088)
SetType(0x80079088, "unsigned char CanPut__Fii(int i, int j)")
del_items(0x80079390)
SetType(0x80079390, "unsigned short GetAutomapType__FiiUc(int x, int y, unsigned char view)")
del_items(0x8007969C)
SetType(0x8007969C, "void SetAutomapView__Fii(int x, int y)")
del_items(0x8001F458)
SetType(0x8001F458, "void TICK_InitModule()")
del_items(0x8001F478)
SetType(0x8001F478, "void TICK_Set(unsigned long Val)")
del_items(0x8001F488)
SetType(0x8001F488, "unsigned long TICK_Get()")
del_items(0x8001F498)
SetType(0x8001F498, "void TICK_Update()")
del_items(0x8001F4B8)
SetType(0x8001F4B8, "unsigned long TICK_GetAge(unsigned long OldTick)")
del_items(0x8001F4E4)
SetType(0x8001F4E4, "char *TICK_GetDateString()")
del_items(0x8001F4F4)
SetType(0x8001F4F4, "char *TICK_GetTimeString()")
del_items(0x8001F504)
SetType(0x8001F504, "unsigned char GU_InitModule()")
del_items(0x8001F530)
SetType(0x8001F530, "void GU_SetRndSeed(unsigned long *Tab)")
del_items(0x8001F560)
SetType(0x8001F560, "unsigned long GU_GetRnd()")
del_items(0x8001F5F0)
SetType(0x8001F5F0, "long GU_GetSRnd()")
del_items(0x8001F610)
SetType(0x8001F610, "unsigned long GU_GetRndRange(unsigned int Range)")
del_items(0x8001F64C)
SetType(0x8001F64C, "unsigned int GU_AlignVal(unsigned int w, unsigned int round)")
del_items(0x8001F670)
SetType(0x8001F670, "void main()")
del_items(0x8001F6C0)
SetType(0x8001F6C0, "unsigned char DBG_OpenModule()")
del_items(0x8001F6C8)
SetType(0x8001F6C8, "void DBG_PollHost()")
del_items(0x8001F6D0)
SetType(0x8001F6D0, "void DBG_Halt()")
del_items(0x8001F6D8)
SetType(0x8001F6D8, "void DBG_SendMessage(char *e)")
del_items(0x8001F6F0)
SetType(0x8001F6F0, "void DBG_SetMessageHandler(void (*Func)())")
del_items(0x8001F700)
SetType(0x8001F700, "void DBG_Error(char *Text, char *File, int Line)")
del_items(0x8001F72C)
SetType(0x8001F72C, "void DBG_SetErrorFunc(void (*EFunc)())")
del_items(0x8001F73C)
SetType(0x8001F73C, "void SendPsyqString(char *e)")
del_items(0x8001F744)
SetType(0x8001F744, "void DBG_SetPollRoutine(void (*Func)())")
del_items(0x8001F754)
SetType(0x8001F754, "unsigned long GTIMSYS_GetTimer()")
del_items(0x8001F778)
SetType(0x8001F778, "void GTIMSYS_ResetTimer()")
del_items(0x8001F79C)
SetType(0x8001F79C, "unsigned long GTIMSYS_InitTimer()")
del_items(0x8001F9D0)
SetType(0x8001F9D0, "void GAL_SetErrorChecking(unsigned char OnOff)")
del_items(0x8001F9E0)
SetType(0x8001F9E0, "long GAL_SplitBlock(long CurBlock, unsigned long Size)")
del_items(0x8001FB14)
SetType(0x8001FB14, "void GAL_InitModule()")
del_items(0x8001FBCC)
SetType(0x8001FBCC, "unsigned char GAL_AddMemType(struct MEM_INIT_INFO *M)")
del_items(0x8001FCEC)
SetType(0x8001FCEC, "long GAL_Alloc(unsigned long Size, unsigned long Type, char *Name)")
del_items(0x8001FE84)
SetType(0x8001FE84, "void *GAL_Lock(long Handle)")
del_items(0x8001FEE4)
SetType(0x8001FEE4, "unsigned char GAL_Unlock(long Handle)")
del_items(0x8001FF60)
SetType(0x8001FF60, "unsigned char GAL_Free(long Handle)")
del_items(0x80020000)
SetType(0x80020000, "unsigned long GAL_GetFreeMem(unsigned long Type)")
del_items(0x80020074)
SetType(0x80020074, "unsigned long GAL_GetUsedMem(unsigned long Type)")
del_items(0x800200E8)
SetType(0x800200E8, "unsigned long GAL_LargestFreeBlock(unsigned long Type)")
del_items(0x80020164)
SetType(0x80020164, "void AttachHdrToList(struct MEM_HDR **Head, struct MEM_HDR *Block)")
del_items(0x80020184)
SetType(0x80020184, "void DetachHdrFromList(struct MEM_HDR **Head, struct MEM_HDR *Block)")
del_items(0x800201D0)
SetType(0x800201D0, "unsigned char IsActiveValidHandle(long Handle)")
del_items(0x80020200)
SetType(0x80020200, "void *AlignPtr(void *P, unsigned long Align)")
del_items(0x80020230)
SetType(0x80020230, "unsigned long AlignSize(unsigned long Size, unsigned long Align)")
del_items(0x80020260)
SetType(0x80020260, "struct MEM_HDR *FindClosestSizedBlock(struct MEM_HDR *Head, unsigned long Size)")
del_items(0x800202B8)
SetType(0x800202B8, "struct MEM_HDR *FindHighestMemBlock(struct MEM_HDR *Head, unsigned long Size)")
del_items(0x80020320)
SetType(0x80020320, "struct MEM_HDR *FindLowestMemBlock(struct MEM_HDR *Head, unsigned long Size)")
del_items(0x80020388)
SetType(0x80020388, "struct MEM_INIT_INFO *GetMemInitInfoBlockFromType(unsigned long Type)")
del_items(0x800203C4)
SetType(0x800203C4, "void MergeToEmptyList(struct MEM_INIT_INFO *MI, struct MEM_HDR *M)")
del_items(0x80020498)
SetType(0x80020498, "long GAL_AllocAt(unsigned long Size, void *Addr, unsigned long Type, char *Name)")
del_items(0x80020574)
SetType(0x80020574, "long LoAlloc(struct MEM_INIT_INFO *M, struct MEM_HDR *Block, void *Addr, unsigned long Size, char *Name)")
del_items(0x8002070C)
SetType(0x8002070C, "struct MEM_HDR *FindBlockInTheseBounds(struct MEM_HDR *Head, void *Addr, unsigned long Size)")
del_items(0x80020778)
SetType(0x80020778, "struct MEM_HDR *GetFreeMemHdrBlock()")
del_items(0x80020800)
SetType(0x80020800, "void ReleaseMemHdrBlock(struct MEM_HDR *Index)")
del_items(0x80020840)
SetType(0x80020840, "void GAL_IterateEmptyMem(unsigned long MemType, void (*Func)())")
del_items(0x800208C4)
SetType(0x800208C4, "void GAL_IterateUsedMem(unsigned long MemType, void (*Func)())")
del_items(0x80020960)
SetType(0x80020960, "unsigned char GAL_SetMemName(long Hnd, char *Text)")
del_items(0x800209C8)
SetType(0x800209C8, "unsigned long GAL_TotalMem(unsigned long Type)")
del_items(0x80020A1C)
SetType(0x80020A1C, "void *GAL_MemBase(unsigned long Type)")
del_items(0x80020A70)
SetType(0x80020A70, "unsigned char GAL_DefragMem(unsigned long type)")
del_items(0x80020AF4)
SetType(0x80020AF4, "unsigned char GSetError(enum GAL_ERROR_CODE Err)")
del_items(0x80020B50)
SetType(0x80020B50, "unsigned char GAL_CheckMem(unsigned long Type)")
del_items(0x80020C4C)
SetType(0x80020C4C, "unsigned char CheckCollisions(struct MEM_INIT_INFO *M, struct MEM_HDR *MemHdr)")
del_items(0x80020CF8)
SetType(0x80020CF8, "unsigned char AreBlocksColliding(struct MEM_HDR *Hdr1, struct MEM_HDR *Hdr2)")
del_items(0x80020D50)
SetType(0x80020D50, "char *GAL_GetErrorText(enum GAL_ERROR_CODE Err)")
del_items(0x80020D80)
SetType(0x80020D80, "enum GAL_ERROR_CODE GAL_GetLastErrorCode()")
del_items(0x80020D90)
SetType(0x80020D90, "char *GAL_GetLastErrorText()")
del_items(0x80020DB8)
SetType(0x80020DB8, "int GAL_HowManyEmptyRegions(unsigned long Type)")
del_items(0x80020E20)
SetType(0x80020E20, "int GAL_HowManyUsedRegions(unsigned long Type)")
del_items(0x80020E88)
SetType(0x80020E88, "void GAL_SetTimeStamp(int Time)")
del_items(0x80020E98)
SetType(0x80020E98, "void GAL_IncTimeStamp()")
del_items(0x80020EB8)
SetType(0x80020EB8, "int GAL_GetTimeStamp()")
del_items(0x80020EC8)
SetType(0x80020EC8, "long GAL_AlignSizeToType(unsigned long Size, unsigned long MemType)")
del_items(0x80020F18)
SetType(0x80020F18, "long GAL_AllocMultiStruct(struct GAL_STRUCT *G, unsigned long Type, char *Name)")
del_items(0x80020F68)
SetType(0x80020F68, "unsigned int GAL_ProcessMultiStruct(struct GAL_STRUCT *G, unsigned long Type)")
del_items(0x80021014)
SetType(0x80021014, "long GAL_GetSize(long hnd)")
del_items(0x80021068)
SetType(0x80021068, "unsigned char GazDefragMem(unsigned long MemType)")
del_items(0x800211D0)
SetType(0x800211D0, | |
from collections import defaultdict
from itertools import chain
from functools import reduce
import operator
import copy
from core.timeline import Timer, Event, Listener, now
from core.log import log
from core.ctx import Static
from core.acl import allow_acl
class ModifierDict(defaultdict):
BUFF_CAPS_FOR_TYPE = {"maxhp": 0.3}
def __init__(self, *args, **kwargs):
if args:
super().__init__(*args, **kwargs)
else:
super().__init__(lambda: defaultdict(lambda: []))
def append(self, modifier):
self[modifier.mod_type][modifier.mod_order].append(modifier)
def remove(self, modifier):
self[modifier.mod_type][modifier.mod_order].remove(modifier)
@staticmethod
def mod_mult(a, b):
return a * (1 + b)
def mod(self, mtype, operator=None, initial=1):
operator = operator or ModifierDict.mod_mult
return reduce(
operator,
[self.sub_mod(mtype, order) for order in self[mtype].keys()],
initial,
)
def sub_mod(self, mtype, morder):
if morder == "buff":
capped_sum = 0
uncapped_sum = 0
for modifier in self[mtype][morder]:
if modifier.buff_capped:
capped_sum += modifier.get()
else:
uncapped_sum += modifier.get()
capped_sum = min(capped_sum, ModifierDict.BUFF_CAPS_FOR_TYPE.get(mtype, 2.0))
return capped_sum + uncapped_sum
else:
return sum((modifier.get() for modifier in self[mtype][morder]))
class Modifier(object):
_static = Static({"all_modifiers": ModifierDict(), "g_condition": None, "damage_sources": set()})
def __init__(self, name, mtype, order, value, condition=None, get=None):
self.mod_name = name
self.mod_type = mtype
self.mod_order = order
self.mod_value = value
self.mod_condition = condition
if self.mod_condition:
# initialize cond
self._static.g_condition(self.mod_condition)
self.mod_get = get
self.buff_capped = order == "buff"
self._mod_active = 0
self.on()
# self._static.all_modifiers.append(self)
# self.__active = 1
# @classmethod
# def mod(cls, mtype, all_modifiers=None, morder=None):
# if not all_modifiers:
# all_modifiers = cls._static.all_modifiers
# if morder:
# return 1 + sum([modifier.get() for modifier in all_modifiers[mtype][morder]])
# m = defaultdict(lambda: 1)
# for order, modifiers in all_modifiers[mtype].items():
# m[order] += sum([modifier.get() for modifier in modifiers])
# ret = 1.0
# for i in m:
# ret *= m[i]
# return ret
def get(self):
if callable(self.mod_get) and not self.mod_get():
return 0
if self.mod_condition is not None and not self._static.g_condition(self.mod_condition):
return 0
return self.mod_value
def on(self, modifier=None):
if self._mod_active == 1:
return self
if modifier == None:
modifier = self
# if modifier.mod_condition:
# if not m_condition.on(modifier.mod_condition):
# return self
# if modifier.mod_condition is not None:
# if not self._static.g_condition(modifier.mod_condition):
# return self
self._static.all_modifiers.append(self)
self._mod_active = 1
return self
def off(self, modifier=None):
if self._mod_active == 0:
return self
self._mod_active = 0
if modifier == None:
modifier = self
self._static.all_modifiers.remove(self)
return self
def __enter__(self):
self.on()
def __exit__(self, exc_type, exc_val, exc_tb):
self.off()
def __repr__(self):
return "<%s %s %s %s>" % (
self.mod_name,
self.mod_type,
self.mod_order,
self.get(),
)
class KillerModifier(Modifier):
def __init__(self, name, order, value, killer_condition):
self.killer_condition = killer_condition
super().__init__(f"{name}_killer", f"{killer_condition}_killer", order, value)
def on(self, modifier=None):
if self._mod_active == 1:
return self
if modifier == None:
modifier = self
if modifier.mod_condition is not None:
if not self._static.g_condition(modifier.mod_condition):
return self
for kcondition in self.killer_condition:
self._static.all_modifiers[f"{kcondition}_killer"][self.mod_order].append(modifier)
self._mod_active = 1
return self
def off(self, modifier=None):
if self._mod_active == 0:
return self
self._mod_active = 0
if modifier == None:
modifier = self
for kcondition in self.killer_condition:
self._static.all_modifiers[f"{kcondition}_killer"][self.mod_order].remove(self)
return self
class CrisisModifier(Modifier):
def __init__(self, name, scale, hp):
self.hp_scale = scale
self.hp_lost = 100 - hp
super().__init__("mod_{}_crisis".format(name), "att", "crisis", self.c_mod_value())
def c_mod_value(self):
return self.hp_scale * (self.hp_lost ** 2) / 10000
def get(self):
self.mod_value = self.c_mod_value()
return self.mod_value
def on(self):
self.mod_value = self.c_mod_value()
return super().on()
bufftype_dict = {}
class Buff(object):
MAXHP_BUFF_CAP = 0.30
_static = Static({"all_buffs": [], "adv": None})
DB_DURATION = 15 # usual doublebuff effect duration for offensive buffs, note that regen lasts 20s
def __init__(
self,
name="<buff_noname>",
value=0,
duration=0,
mtype="att",
morder=None,
modifier=None,
hidden=False,
source=None,
):
self.name = name
self.__value = value
self.duration = duration
self.mod_type = mtype
self.mod_order = morder or ("chance" if self.mod_type == "crit" else "buff")
self.bufftype = "misc" if hidden else "self"
self.source = source
if self.source is not None and source[0] != "s" and source[0:2] != "ds":
self.bufftime = self._ex_bufftime
else:
self.bufftime = self._bufftime
self.buff_end_timer = Timer(self.buff_end_proc)
if modifier:
self.modifier = modifier
self.get = self.modifier.get
elif mtype != "effect":
self.modifier = Modifier("mod_" + self.name, self.mod_type, self.mod_order, 0)
self.modifier.get = self.get
else:
self.modifier = None
self.dmg_test_event = Event("dmg_formula")
self.dmg_test_event.dmg_coef = 1
self.dmg_test_event.dname = "test"
self.hidden = hidden
self.__stored = 0
self.__active = 0
self.buffevent = Event("buff")
self.pause_time = -1
self.refresh_time = -1
# self.on()
def logwrapper(self, *args):
if not self.hidden:
log("buff", *args)
def _no_bufftime(self):
return 1
def _ex_bufftime(self):
return 1 + self._static.adv.sub_mod("buff", "ex")
def _bufftime(self):
return self._static.adv.mod("buff", operator=operator.add)
def _debufftime(self):
return self._static.adv.mod("debuff", operator=operator.add)
def any_bufftime(self):
self.bufftime = self._bufftime
return self
def no_bufftime(self):
self.bufftime = self._no_bufftime
return self
def ex_bufftime(self):
self.bufftime = self._ex_bufftime
return self
def value(self, newvalue=None):
if newvalue is not None:
self.logwrapper(
self.name,
f"{self.mod_type}({self.mod_order}): {newvalue:.02f}",
"buff value change",
)
return self.set(newvalue)
else:
return self.__value
@allow_acl
def get(self):
if self.__active:
return self.__value
else:
return 0
def set(self, v, d=None):
self.__value = v
if d != None:
self.duration = d
return self
def stack(self):
stack = 0
for i in self._static.all_buffs:
if i.name == self.name:
if i.__active != 0:
stack += 1
return stack
def valuestack(self):
stack = 0
value = 0
for i in self._static.all_buffs:
if i.name == self.name:
if i.__active != 0:
stack += 1
value += i.__value
return value, stack
def effect_on(self):
value = self.get()
if self.mod_type == "defense" and value > 0:
db = Event("defchain")
db.source = self.source
db.on()
if self.bufftype == "team":
log("buff", "doublebuff", 15 * self.bufftime())
if self.bufftime == self._bufftime:
self._static.adv.slots.c.set_need_bufftime()
elif self.mod_type == "maxhp":
if self._static.adv.sub_mod("maxhp", "buff") < Buff.MAXHP_BUFF_CAP:
self.modifier.on()
percent = value * 100
log("heal", self.name, self._static.adv.max_hp * value, "team" if self.bufftype == "team" else "self")
self._static.adv.add_hp(percent)
# FIXME: heal formula 1day twust
elif self.mod_type == "regen" and value != 0:
self.set_hp_event = Event("set_hp")
self.set_hp_event.delta = value
self.regen_timer = Timer(self.hp_regen, 3.9, True).on()
elif self.mod_type == "heal" and value != 0:
self.set_hp_event = Event("heal_make")
self.set_hp_event.name = self.name
self.set_hp_event.delta = self._static.adv.heal_formula(self.source, value)
self.set_hp_event.target = "team" if self.bufftype == "team" else "self"
self.regen_timer = Timer(self.hp_regen, 2.9, True).on()
else:
return self.modifier and self.modifier.on()
def effect_off(self):
if self.mod_type in ("regen", "heal"):
self.regen_timer.off()
else:
return self.modifier and self.modifier.off()
def buff_end_proc(self, e):
self.logwrapper(
self.name,
f"{self.mod_type}({self.mod_order}): {self.value():.02f}",
"buff end <timeout>",
)
self.__active = 0
if self.__stored:
self._static.all_buffs.remove(self)
self.__stored = 0
value, stack = self.valuestack()
if stack > 0:
self.logwrapper(
self.name,
f"{self.mod_type}({self.mod_order}): {value:.02f}",
f"buff stack <{stack}>",
)
self.effect_off()
def count_team_buff(self):
if self.bufftype == "self":
return
base_mods = [
Modifier("base_cc", "crit", "chance", 0.12),
Modifier("base_killer", "killer", "passive", 0.30),
]
self.dmg_test_event.modifiers = ModifierDict()
for mod in base_mods:
self.dmg_test_event.modifiers.append(mod)
for b in filter(lambda b: b.get() and b.bufftype == "simulated_def", self._static.all_buffs):
self.dmg_test_event.modifiers.append(b.modifier)
self.dmg_test_event()
no_team_buff_dmg = self.dmg_test_event.dmg
placeholders = []
for b in filter(
lambda b: b.get() and b.bufftype in ("team", "debuff"),
self._static.all_buffs,
):
placehold = None
if b.modifier.mod_type == "s":
placehold = Modifier("placehold_sd", "att", "sd", b.modifier.get() / 2)
elif b.modifier.mod_type == "spd":
placehold = Modifier("placehold_spd", "att", "spd", b.modifier.get())
elif b.modifier.mod_type.endswith("_killer"):
placehold = Modifier("placehold_k", "killer", "passive", b.modifier.get())
if placehold:
self.dmg_test_event.modifiers.append(placehold)
placeholders.append(placehold)
else:
self.dmg_test_event.modifiers.append(b.modifier)
self.dmg_test_event()
team_buff_dmg = self.dmg_test_event.dmg
log("buff", "team", team_buff_dmg / no_team_buff_dmg - 1)
for mod in chain(base_mods, placeholders):
mod.off()
def on(self, duration=None):
d = max(-1, (duration or self.duration) * self.bufftime())
if d != -1 and self.bufftime == self._bufftime:
self._static.adv.slots.c.set_need_bufftime()
if self.__active == 0:
self.__active = 1
if self.__stored == 0:
self._static.all_buffs.append(self)
self.__stored = 1
if d >= 0:
self.buff_end_timer.on(d)
proc_type = "start"
else:
if d >= 0:
if self.buff_end_timer.online:
self.buff_end_timer.on(d)
else:
self.refresh_time = d
else:
return self
proc_type = "refresh"
self.logwrapper(
self.name,
f"{self.mod_type}({self.mod_order}): {self.value():.02f}",
f"buff {proc_type} <{d:.02f}s>",
)
value, stack = self.valuestack()
if stack > 1:
log(
"buff",
self.name,
f"{self.mod_type}({self.mod_order}): {value:.02f}",
f"buff stack <{stack}>",
)
self.effect_on()
self.buffevent.buff = self
self.buffevent.on()
return self
def hp_regen(self, t):
self.set_hp_event.on()
def off(self):
if self.__active == 0:
return
self.logwrapper(
self.name,
f"{self.mod_type}({self.mod_order}): {self.value():.02f}",
f"buff end <turn off>",
)
self.__active = 0
self.buff_end_timer.off()
self.effect_off()
return self
@property
def adv(self):
return self._static.adv
@allow_acl
def timeleft(self):
return -1 if self.duration == -1 else (self.buff_end_timer.timing - now())
def add_time(self, delta):
self.buff_end_timer.add(delta)
def pause(self):
self.pause_time = self.timeleft()
if self.pause_time > 0:
log("pause", self.name, self.pause_time)
self.buff_end_timer.off()
def resume(self):
self.pause_time = max(self.pause_time, self.refresh_time)
if self.pause_time > 0:
log("resume", self.name, self.pause_time, now() + self.pause_time)
self.buff_end_timer.on(self.pause_time)
self.pause_time = -1
# def __repr__(self):
# return f'{self.modifier}({self.buff_end_timer})'
class EffectBuff(Buff):
def __init__(self, name, duration, effect_on, effect_off, source=None):
super().__init__(name, 1, duration, "effect", source=source)
self.bufftype = "self"
self.effect_on = effect_on
self.effect_off = effect_off
class ModeAltBuff(Buff):
def __init__(self, name, duration=-1, uses=None, hidden=False, source=None):
super().__init__(name, 1, duration, "effect", hidden=hidden, | |
#!/usr/bin/env python
# encoding: utf-8
import pytest
import time
import re
import os
import tarfile
import requests
import tarfile
import docker
import json
import urllib
import yaml
import subprocess
from shutil import copy, copytree, rmtree
from executor import Executor
from docker.types import Mount
# Code to suppress insecure https warnings
import urllib3
from urllib3.exceptions import InsecureRequestWarning, SubjectAltNameWarning
urllib3.disable_warnings(InsecureRequestWarning)
urllib3.disable_warnings(SubjectAltNameWarning)
global PLATFORM
PLATFORM = "debian-9"
OLD_SPLUNK_VERSION = "7.3.4"
def pytest_generate_tests(metafunc):
# This is called for every test. Only get/set command line arguments
# if the argument is specified in the list of test "fixturenames".
option_value = metafunc.config.option.platform
global PLATFORM
PLATFORM = option_value
class TestDockerSplunk(Executor):
@classmethod
def setup_class(cls):
super(TestDockerSplunk, cls).setup_class(PLATFORM)
def setup_method(self, method):
# Make sure all running containers are removed
self._clean_docker_env()
self.compose_file_name = None
self.project_name = None
self.DIR = None
def teardown_method(self, method):
if self.compose_file_name and self.project_name:
if self.DIR:
command = "docker-compose -p {} -f {} down --volumes --remove-orphans".format(self.project_name, os.path.join(self.DIR, self.compose_file_name))
else:
command = "docker-compose -p {} -f test_scenarios/{} down --volumes --remove-orphans".format(self.project_name, self.compose_file_name)
out, err, rc = self._run_command(command)
self._clean_docker_env()
if self.DIR:
try:
rmtree(self.DIR)
except OSError:
pass
self.compose_file_name, self.project_name, self.DIR = None, None, None
def test_compose_3idx1cm_custom_repl_factor(self):
self.project_name = self.generate_random_string()
# Generate default.yml
cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults")
self.client.start(cid.get("Id"))
output = self.get_container_logs(cid.get("Id"))
self.client.remove_container(cid.get("Id"), v=True, force=True)
# Get the password
password = re.search(r"^ password: (.*?)\n", output, flags=re.MULTILINE|re.DOTALL).group(1).strip()
assert password and password != "<PASSWORD>"
# Change repl factor & search factor
output = re.sub(r' replication_factor: 3', r''' replication_factor: 2''', output)
output = re.sub(r' search_factor: 3', r''' search_factor: 1''', output)
# Write the default.yml to a file
with open(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)), "w") as f:
f.write(output)
# Standup deployment
try:
self.compose_file_name = "3idx1cm.yaml"
container_count, rc = self.compose_up(defaults_url="/tmp/defaults/{}.yml".format(self.project_name))
assert rc == 0
# Wait for containers to come up
assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name), timeout=600)
# Get container logs
container_mapping = {"cm1": "cm", "idx1": "idx", "idx2": "idx", "idx3": "idx"}
for container in container_mapping:
# Check ansible version & configs
ansible_logs = self.get_container_logs("{}_{}_1".format(self.project_name, container))
self.check_ansible(ansible_logs)
# Check values in log output
inventory_json = self.extract_json("{}_{}_1".format(self.project_name, container))
self.check_common_keys(inventory_json, container_mapping[container])
try:
assert inventory_json["splunk_indexer"]["hosts"] == ["idx1", "idx2", "idx3"]
assert inventory_json["splunk_cluster_master"]["hosts"] == ["cm1"]
except KeyError as e:
self.logger.error(e)
raise e
# Check Splunkd on all the containers
assert self.check_splunkd("admin", self.password)
# Make sure apps are installed, and shcluster is setup properly
containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)})
assert len(containers) == 4
for container in containers:
container_name = container["Names"][0].strip("/").split("_")[1]
splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"]
if container_name == "cm1":
# Check the replication factor & search factor
url = "https://localhost:{}/services/cluster/config/config?output_mode=json".format(splunkd_port)
kwargs = {"auth": ("admin", self.password), "verify": False}
status, content = self.handle_request_retry("GET", url, kwargs)
assert status == 200
assert json.loads(content)["entry"][0]["content"]["replication_factor"] == 2
assert json.loads(content)["entry"][0]["content"]["search_factor"] == 1
except Exception as e:
self.logger.error(e)
raise e
finally:
try:
os.remove(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)))
except OSError as e:
pass
def test_compose_1idx3sh1cm1dep(self):
self.project_name = self.generate_random_string()
# Generate default.yml -- for SHC, we need a common default.yml otherwise things won't work
cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults")
self.client.start(cid.get("Id"))
output = self.get_container_logs(cid.get("Id"))
self.client.remove_container(cid.get("Id"), v=True, force=True)
# Get the password
password = re.search(r"^ password: (.*?)\n", output, flags=re.MULTILINE|re.DOTALL).group(1).strip()
assert password and password != "null"
# Write the default.yml to a file
with open(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)), "w") as f:
f.write(output)
# Tar the app before spinning up the scenario
with tarfile.open(os.path.join(self.FIXTURES_DIR, "{}.tgz".format(self.project_name)), "w:gz") as tar:
tar.add(self.EXAMPLE_APP, arcname=os.path.basename(self.EXAMPLE_APP))
# Standup deployment
try:
self.compose_file_name = "1idx3sh1cm1dep.yaml"
container_count, rc = self.compose_up(defaults_url="/tmp/defaults/{}.yml".format(self.project_name), apps_url="http://appserver/{}.tgz".format(self.project_name))
assert rc == 0
# Wait for containers to come up
assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name), timeout=600)
# Get container logs
container_mapping = {"sh1": "sh", "sh2": "sh", "sh3": "sh", "cm1": "cm", "idx1": "idx", "dep1": "dep"}
for container in container_mapping:
# Check ansible version & configs
ansible_logs = self.get_container_logs("{}_{}_1".format(self.project_name, container))
self.check_ansible(ansible_logs)
# Check values in log output
inventory_json = self.extract_json("{}_{}_1".format(self.project_name, container))
self.check_common_keys(inventory_json, container_mapping[container])
try:
assert inventory_json["splunk_indexer"]["hosts"] == ["idx1"]
assert inventory_json["splunk_search_head_captain"]["hosts"] == ["sh1"]
assert inventory_json["splunk_search_head"]["hosts"] == ["sh2", "sh3"]
assert inventory_json["splunk_cluster_master"]["hosts"] == ["cm1"]
assert inventory_json["splunk_deployer"]["hosts"] == ["dep1"]
except KeyError as e:
self.logger.error(e)
raise e
# Check Splunkd on all the containers
assert self.check_splunkd("admin", self.password)
# Make sure apps are installed, and shcluster is setup properly
containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)})
assert len(containers) == 7
for container in containers:
# Skip the nginx container
if "nginx" in container["Image"]:
continue
container_name = container["Names"][0].strip("/").split("_")[1]
splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"]
if container_name in {"sh1", "sh2", "sh3", "idx1"}:
# Check the app and version
url = "https://localhost:{}/servicesNS/nobody/splunk_app_example/configs/conf-app/launcher?output_mode=json".format(splunkd_port)
kwargs = {"auth": ("admin", self.password), "verify": False}
status, content = self.handle_request_retry("GET", url, kwargs)
assert status == 200
assert json.loads(content)["entry"][0]["content"]["version"] == "0.0.1"
# Make sure preferred captain is set
if container_name == "sh1":
url = "https://localhost:{}/servicesNS/nobody/system/configs/conf-server/shclustering?output_mode=json".format(splunkd_port)
kwargs = {"auth": ("admin", self.password), "verify": False}
status, content = self.handle_request_retry("GET", url, kwargs)
assert json.loads(content)["entry"][0]["content"]["preferred_captain"] == "1"
# Search results won't return the correct results immediately :(
time.sleep(30)
RETRIES = 10
IMPLICIT_WAIT = 6
for n in range(RETRIES):
try:
self.logger.info("Attempt #{}: checking internal search host count".format(n+1))
search_providers, distinct_hosts = self.search_internal_distinct_hosts("{}_sh1_1".format(self.project_name), password=self.password)
assert len(search_providers) == 2
assert "idx1" in search_providers and "sh1" in search_providers
assert distinct_hosts == 6
break
except Exception as e:
self.logger.error("Attempt #{} error: {}".format(n+1, str(e)))
if n < RETRIES-1:
time.sleep(IMPLICIT_WAIT)
continue
raise e
except Exception as e:
self.logger.error(e)
raise e
finally:
try:
os.remove(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)))
os.remove(os.path.join(self.FIXTURES_DIR, "{}.tgz".format(self.project_name)))
except OSError as e:
pass
def test_compose_1uf1so(self):
# Standup deployment
self.compose_file_name = "1uf1so.yaml"
self.project_name = self.generate_random_string()
container_count, rc = self.compose_up()
assert rc == 0
# Wait for containers to come up
assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name))
# Get container logs
container_mapping = {"so1": "so", "uf1": "uf"}
for container in container_mapping:
# Check ansible version & configs
ansible_logs = self.get_container_logs("{}_{}_1".format(self.project_name, container))
self.check_ansible(ansible_logs)
# Check values in log output
inventory_json = self.extract_json("{}_{}_1".format(self.project_name, container))
self.check_common_keys(inventory_json, container_mapping[container])
try:
assert inventory_json["splunk_standalone"]["hosts"] == ["so1"]
except KeyError as e:
self.logger.error(e)
raise e
# Search results won't return the correct results immediately :(
time.sleep(30)
search_providers, distinct_hosts = self.search_internal_distinct_hosts("{}_so1_1".format(self.project_name), password=self.password)
assert len(search_providers) == 1
assert search_providers[0] == "so1"
assert distinct_hosts == 2
def test_compose_3idx1cm_default_repl_factor(self):
self.project_name = self.generate_random_string()
# Generate default.yml
cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults")
self.client.start(cid.get("Id"))
output = self.get_container_logs(cid.get("Id"))
self.client.remove_container(cid.get("Id"), v=True, force=True)
# Get the password
password = re.search(r"^ password: (.*?)\n", output, flags=re.MULTILINE|re.DOTALL).group(1).strip()
assert password and password != "<PASSWORD>"
# Write the default.yml to a file
with open(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)), "w") as f:
f.write(output)
# Standup deployment
try:
self.compose_file_name = "3idx1cm.yaml"
container_count, rc = self.compose_up(defaults_url="/tmp/defaults/{}.yml".format(self.project_name))
assert rc == 0
# Wait for containers to come up
assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name), timeout=600)
# Get container logs
container_mapping = {"cm1": "cm", "idx1": "idx", "idx2": "idx", "idx3": "idx"}
for container in container_mapping:
# Check ansible version & configs
ansible_logs = self.get_container_logs("{}_{}_1".format(self.project_name, container))
self.check_ansible(ansible_logs)
# Check values in log output
inventory_json = self.extract_json("{}_{}_1".format(self.project_name, container))
self.check_common_keys(inventory_json, container_mapping[container])
try:
assert inventory_json["splunk_indexer"]["hosts"] == ["idx1", "idx2", "idx3"]
assert inventory_json["splunk_cluster_master"]["hosts"] == ["cm1"]
except KeyError as e:
self.logger.error(e)
raise e
# Check Splunkd on all the containers
assert self.check_splunkd("admin", self.password)
# Make sure apps are installed, and shcluster is setup properly
containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)})
assert len(containers) == 4
for container in containers:
container_name = container["Names"][0].strip("/").split("_")[1]
splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"]
if container_name == "cm1":
# Check the replication factor & search factor
url = "https://localhost:{}/services/cluster/config/config?output_mode=json".format(splunkd_port)
kwargs = {"auth": ("admin", self.password), "verify": False}
status, content = self.handle_request_retry("GET", url, kwargs)
assert status == 200
assert json.loads(content)["entry"][0]["content"]["replication_factor"] == 3
assert json.loads(content)["entry"][0]["content"]["search_factor"] == 3
except Exception as e:
self.logger.error(e)
raise e
finally:
try:
os.remove(os.path.join(self.SCENARIOS_DIR, "defaults", "{}.yml".format(self.project_name)))
except OSError as e:
pass
def test_compose_1so1cm_connected(self):
# Standup deployment
self.compose_file_name = "1so1cm_connected.yaml"
self.project_name = self.generate_random_string()
container_count, rc = self.compose_up()
assert rc == 0
# Wait for containers to come up
assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name))
# Get container logs
container_mapping = {"so1": "so", "cm1": "cm"}
for container in container_mapping:
# Check ansible version & configs
ansible_logs = self.get_container_logs("{}_{}_1".format(self.project_name, container))
self.check_ansible(ansible_logs)
# Check values in log output
inventory_json = self.extract_json("{}_{}_1".format(self.project_name, container))
self.check_common_keys(inventory_json, container_mapping[container])
# Check Splunkd on all the containers
assert self.check_splunkd("admin", self.password)
# Check connections
containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)})
for container in containers:
container_name = container["Names"][0].strip("/").split('_')[1]
splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"]
if container_name == "cm1":
status, content = self.handle_request_retry("GET", "https://localhost:{}/services/cluster/master/searchheads?output_mode=json".format(splunkd_port),
{"auth": ("admin", self.password), "verify": False})
assert status == 200
output = json.loads(content)
assert len(output["entry"]) == 2
for sh in output["entry"]:
assert sh["content"]["label"] in ["cm1", "so1"]
assert sh["content"]["status"] == "Connected"
def test_compose_1so1cm_unconnected(self):
# Standup | |
if len(rest) >= 32:
try:
my_uuid = uuid.UUID(rest[:32])
rest = rest[32:]
except ValueError:
pass
if not rest:
kwargs = dict()
elif rest[0] == '?':
kwargs = BaseLocator.parse_args(rest[1:])
else:
raise ValueError
return UntitledLocator(my_uuid, **kwargs)
def to_url(self):
args_str = BaseLocator.generate_args(self.kwargs)
url_tuple = ('untitled', '', self._uuid.hex, args_str, '')
return urlparse.urlunsplit(url_tuple)
def __hash__(self):
return self._uuid.int
def __eq__(self, other):
if type(other) != type(self):
return False
return (self._uuid == other._uuid)
def __ne__(self, other):
return not self.__eq__(other)
def is_untitled(self):
return True
@classmethod
def all_untitled_temporaries(cls):
autosave_dir = SaveTemporariesMixin.get_autosave_dir()
fnames = []
for fname in os.listdir(autosave_dir):
if fname.startswith(cls.UNTITLED_PREFIX) and \
os.path.isfile(os.path.join(autosave_dir, fname)):
fnames.append(fname)
locators = {}
for fname in fnames:
uuid_start = len(cls.UNTITLED_PREFIX)
my_uuid = uuid.UUID(fname[uuid_start:uuid_start+32])
if my_uuid not in locators:
locators[my_uuid] = cls(my_uuid)
return locators.values()
class XMLFileLocator(SaveTemporariesMixin, BaseLocator):
def __init__(self, filename, **kwargs):
self._name = self.real_filename(filename)
self._vnode = kwargs.get('version_node', None)
self._vtag = kwargs.get('version_tag', '')
self._mshptrail = kwargs.get('mashuptrail', None)
if 'mashupVersion' in kwargs:
self._mshpversion = kwargs.get('mashupVersion', None)
else:
self._mshpversion = kwargs.get('mashup', None)
self._parameterexploration = kwargs.get('parameterExploration', None)
self.kwargs = kwargs
def load(self, type):
fname = self.get_temporary()
if fname:
obj = io.open_from_xml(fname, type)
else:
obj = io.open_from_xml(self._name, type)
obj.locator = self
return obj
def save(self, obj, do_copy=True, version=None):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
obj = io.save_to_xml(obj, self._name, version)
obj.locator = self
# Only remove the temporaries if save succeeded!
self.clean_temporaries()
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
def is_valid(self):
return os.path.isfile(self._name)
def _get_name(self):
return str(self._name)
name = property(_get_name)
def _get_short_filename(self):
return os.path.splitext(os.path.basename(self._name))[0]
short_filename = property(_get_short_filename)
def _get_short_name(self):
name = self._get_short_filename()
enc = sys.getfilesystemencoding() or locale.getpreferredencoding()
return name.decode(enc)
short_name = property(_get_short_name)
@classmethod
def from_url(cls, url):
if '://' in url:
scheme, path = url.split('://', 1)
if scheme != 'file':
raise ValueError
else:
url = BaseLocator.convert_filename_to_url(url)
old_uses_query = urlparse.uses_query
urlparse.uses_query = urlparse.uses_query + ['file']
scheme, host, path, args_str, fragment = urlparse.urlsplit(url)
urlparse.uses_query = old_uses_query
# De-urlencode pathname
path = url2pathname(str(path))
kwargs = BaseLocator.parse_args(args_str)
return cls(os.path.abspath(path), **kwargs)
def to_url(self):
args_str = BaseLocator.generate_args(self.kwargs)
url_tuple = ('file', '',
pathname2url(os.path.abspath(self._name)),
args_str, '')
return urlparse.urlunsplit(url_tuple)
def serialize(self, dom, element):
"""serialize(dom, element) -> None
Convert this object to an XML representation.
"""
locator = dom.createElement('locator')
locator.setAttribute('type', 'file')
node = dom.createElement('name')
filename = dom.createTextNode(str(self._name))
node.appendChild(filename)
locator.appendChild(node)
element.appendChild(locator)
@staticmethod
def parse(element):
""" parse(element) -> XMLFileLocator or None
Parse an XML object representing a locator and returns a
XMLFileLocator object.
"""
if str(element.getAttribute('type')) == 'file':
for n in element.childNodes:
if n.localName == "name":
filename = str(n.firstChild.nodeValue).strip(" \n\t")
return XMLFileLocator(filename)
return None
else:
return None
#ElementTree port
def to_xml(self, node=None):
"""to_xml(node: ElementTree.Element) -> ElementTree.Element
Convert this object to an XML representation.
"""
if node is None:
node = ElementTree.Element('locator')
node.set('type', 'file')
childnode = ElementTree.SubElement(node,'name')
childnode.text = self._name.decode('latin-1')
return node
@staticmethod
def from_xml(node):
"""from_xml(node:ElementTree.Element) -> XMLFileLocator or None
Parse an XML object representing a locator and returns a
XMLFileLocator object."""
if node.tag != 'locator':
return None
#read attributes
data = node.get('type', '')
type = str(data)
if type == 'file':
for child in node.getchildren():
if child.tag == 'name':
filename = child.text.encode('latin-1').strip()
return XMLFileLocator(filename)
return None
def __str__(self):
return '<%s vistrail_name="%s" />' % (self.__class__.__name__, self._name)
###########################################################################
# Operators
def __eq__(self, other):
if not isinstance(other, XMLFileLocator):
return False
return self._name == other._name
def __ne__(self, other):
return not self.__eq__(other)
class ZIPFileLocator(XMLFileLocator):
"""Files are compressed in zip format. The temporaries are
still in xml"""
def __init__(self, filename, **kwargs):
XMLFileLocator.__init__(self, filename, **kwargs)
self.tmp_dir = None
def load(self, type):
fname = self.get_temporary()
if fname:
from vistrails.db.domain import DBVistrail
obj = io.open_from_xml(fname, type)
return SaveBundle(DBVistrail.vtType, obj)
else:
(save_bundle, tmp_dir) = io.open_bundle_from_zip_xml(type, self._name)
self.tmp_dir = tmp_dir
for obj in save_bundle.get_db_objs():
obj.locator = self
return save_bundle
def save(self, save_bundle, do_copy=True, version=None):
if do_copy:
# make sure we create a fresh temporary directory if we're
# duplicating the vistrail
tmp_dir = None
else:
# otherwise, use the existing temp directory if one is set
tmp_dir = self.tmp_dir
(save_bundle, tmp_dir) = io.save_bundle_to_zip_xml(save_bundle, self._name, tmp_dir, version)
self.tmp_dir = tmp_dir
for obj in save_bundle.get_db_objs():
obj.locator = self
# Only remove the temporaries if save succeeded!
self.clean_temporaries()
return save_bundle
def close(self):
if self.tmp_dir is not None:
io.close_zip_xml(self.tmp_dir)
self.tmp_dir = None
###########################################################################
# Operators
def __eq__(self, other):
if not isinstance(other, ZIPFileLocator):
return False
return self._name == other._name
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def parse(element):
""" parse(element) -> ZIPFileLocator or None
Parse an XML object representing a locator and returns a
ZIPFileLocator object.
"""
if str(element.getAttribute('type')) == 'file':
for n in element.childNodes:
if n.localName == "name":
filename = str(n.firstChild.nodeValue).strip(" \n\t")
return ZIPFileLocator(filename)
return None
else:
return None
#ElementTree port
@staticmethod
def from_xml(node):
"""from_xml(node:ElementTree.Element) -> ZIPFileLocator or None
Parse an XML object representing a locator and returns a
ZIPFileLocator object."""
if node.tag != 'locator':
return None
#read attributes
data = node.get('type', '')
type = str(data)
if type == 'file':
for child in node.getchildren():
if child.tag == 'name':
filename = child.text.encode('latin-1').strip()
return ZIPFileLocator(filename)
return None
return None
# class URLLocator(ZIPFileLocator):
# def load(self, type):
class DBLocator(BaseLocator):
cache = {}
cache_timestamps = {}
connections = {}
cache_connections = {}
def __init__(self, host, port, database, user, passwd, name=None,
**kwargs):
self._host = host
self._port = int(port)
self._db = database
self._user = user
self._passwd = <PASSWORD>
self._name = name
self._hash = ''
self.kwargs = kwargs
self._obj_id = self.kwargs.get('obj_id', None)
self._obj_type = self.kwargs.get('obj_type', None)
self._conn_id = self.kwargs.get('connection_id', None)
self._vnode = self.kwargs.get('version_node', None)
self._vtag = self.kwargs.get('version_tag', None)
self._mshptrail = self.kwargs.get('mashuptrail', None)
if 'mashupVersion' in self.kwargs:
self._mshpversion = self.kwargs.get('mashupVersion', None)
else:
self._mshpversion = self.kwargs.get('mashup', None)
self._parameterexploration = self.kwargs.get('parameterExploration', None)
def _get_host(self):
return self._host
host = property(_get_host)
def _get_port(self):
return self._port
port = property(_get_port)
def _get_db(self):
return self._db
db = property(_get_db)
def _get_obj_id(self):
return self._obj_id
obj_id = property(_get_obj_id)
def _get_obj_type(self):
return self._obj_type
obj_type = property(_get_obj_type)
def _get_connection_id(self):
return self._conn_id
connection_id = property(_get_connection_id)
def _get_name(self):
return self._host + ':' + str(self._port) + ':' + self._db + ':' + \
str(self._name)
name = property(_get_name)
def _get_short_filename(self):
return str(self._name)
short_filename = property(_get_short_filename)
def _get_short_name(self):
name = self._name
if not isinstance(name, unicode):
name = name.decode('ascii')
return name
short_name = property(_get_short_name)
def hash(self):
node = self.to_xml()
xml_string = ElementTree.tostring(node)
#print "hash", xml_string
return hashlib.sha224(xml_string).hexdigest()
def is_valid(self):
if self._conn_id is not None \
and self._conn_id in DBLocator.connections:
return True
try:
self.get_connection()
except Exception:
return False
return True
def get_connection(self):
if self._conn_id is not None \
and DBLocator.connections.has_key(self._conn_id):
connection = DBLocator.connections[self._conn_id]
if io.ping_db_connection(connection):
return connection
else:
if self._conn_id is None:
if DBLocator.cache_connections.has_key(self._hash):
connection = DBLocator.cache_connections[self._hash]
if io.ping_db_connection(connection):
debug.log("Reusing cached connection")
return connection
if len(DBLocator.connections.keys()) == 0:
self._conn_id = 1
else:
self._conn_id = max(DBLocator.connections.keys()) + 1
config = {'host': self._host,
'port': self._port,
'db': self._db,
'user': self._user,
'passwd': <PASSWORD>}
#print "config:", config
connection = io.open_db_connection(config)
DBLocator.connections[self._conn_id] = connection
DBLocator.cache_connections[self._hash] = connection
return connection
def load(self, type, tmp_dir=None):
self._hash = self.hash()
#print "LLoad Big|type", type
if DBLocator.cache.has_key(self._hash):
save_bundle = DBLocator.cache[self._hash]
obj = save_bundle.get_primary_obj()
ts = self.get_db_modification_time(obj.vtType)
#debug.log("cached time: %s, db time: %s"%(DBLocator.cache_timestamps[self._hash],ts))
if DBLocator.cache_timestamps[self._hash] == ts:
#debug.log("using cached vistrail")
self._name = obj.db_name
# If thumbnail cache was cleared, get thumbs from db
if tmp_dir is not None:
for absfname in save_bundle.thumbnails:
if not os.path.isfile(absfname):
save_bundle.thumbnails = io.open_thumbnails_from_db(self.get_connection(), type, self.obj_id, tmp_dir)
break
return save_bundle
#debug.log("loading vistrail from db")
connection = self.get_connection()
if type == DBWorkflow.vtType:
return io.open_from_db(connection, type, self.obj_id)
save_bundle = io.open_bundle_from_db(type, connection, self.obj_id, tmp_dir)
primary_obj = save_bundle.get_primary_obj()
self._name = primary_obj.db_name
#print "locator db name:", self._name
for obj in save_bundle.get_db_objs():
obj.locator = self
_hash = self.hash()
DBLocator.cache[self._hash] = save_bundle.do_copy()
DBLocator.cache_timestamps[self._hash] = primary_obj.db_last_modified
return save_bundle
def save(self, save_bundle, do_copy=False, version=None):
connection = self.get_connection()
for obj in save_bundle.get_db_objs():
obj.db_name = self._name
save_bundle = io.save_bundle_to_db(save_bundle, connection, do_copy, version)
primary_obj = save_bundle.get_primary_obj()
self._obj_id = primary_obj.db_id
self._obj_type = primary_obj.vtType
for obj in save_bundle.get_db_objs():
obj.locator = self
#update the cache with a copy of the new bundle
self._hash = self.hash()
DBLocator.cache[self._hash] = save_bundle.do_copy()
DBLocator.cache_timestamps[self._hash] = primary_obj.db_last_modified
return save_bundle
def get_db_modification_time(self, obj_type=None):
if obj_type is None:
if self.obj_type is None:
obj_type = DBVistrail.vtType
else:
obj_type = self.obj_type
ts = io.get_db_object_modification_time(self.get_connection(),
| |
<reponame>javiergayala/jenkins-job-wrecker<filename>jenkins_job_wrecker/modules/scm.py
# encoding=utf8
import jenkins_job_wrecker.modules.base
class Scm(jenkins_job_wrecker.modules.base.Base):
component = 'scm'
def gen_yml(self, yml_parent, data):
scm = []
scm_class = None
if 'class' in data.attrib:
if data.attrib['class'] == 'hudson.scm.NullSCM':
return None
if data.attrib['class'] == 'org.jenkinsci.plugins.multiplescms.MultiSCM':
for scm in data[0]:
self.gen_yml(yml_parent, scm)
return
scm_class = data.attrib['class'].split('.')[-1].lower()
scm_tag = data.tag.split('.')[-1].lower()
if scm_tag in self.registry.registry[self.component]:
self.registry.dispatch(self.component, scm_tag, data, scm)
yml_parent.append(['scm', scm])
return
if scm_class is not None and scm_class in self.registry.registry[self.component]:
self.registry.dispatch(self.component, scm_class, data, scm)
yml_parent.append(['scm', scm])
return
raise NotImplementedError('%s scm not supported' % data.attrib['class'])
def gitscm(top, parent):
git = {}
for child in top:
if child.tag == 'configVersion':
continue # we don't care
elif child.tag == 'userRemoteConfigs':
if len(list(child)) != 1:
# expected "hudson.plugins.git.UserRemoteConfig" tag
raise NotImplementedError("%s not supported with %i "
"children" % (child.tag,
len(list(child))))
for setting in child[0]:
if setting.tag == 'credentialsId':
git['credentials-id'] = setting.text
else:
git[setting.tag] = setting.text
elif child.tag == 'gitTool':
git['git-tool'] = child.text
elif child.tag == 'excludedUsers':
if child.text:
users = child.text.split()
git['excluded-users'] = users
elif child.tag == 'buildChooser':
if child.attrib['class'] == \
'hudson.plugins.git.util.DefaultBuildChooser':
continue
else:
# see JJB's jenkins_jobs/modules/scm.py
# for other build choosers
raise NotImplementedError("%s build "
"chooser" % child.attrib['class'])
elif child.tag == 'disableSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'recursiveSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'authorOrCommitter':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['use-author'] = True
elif child.tag == 'useShallowClone':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['shallow-clone'] = True
elif child.tag == 'ignoreNotifyCommit':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['ignore-notify'] = True
elif child.tag == 'wipeOutWorkspace':
git['wipe-workspace'] = (child.text == 'true')
elif child.tag == 'skipTag':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['skip-tag'] = True
elif child.tag == 'pruneBranches':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['prune'] = True
elif child.tag == 'remotePoll':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['fastpoll'] = True
elif child.tag == 'relativeTargetDir':
# If it's empty, no explicit 'basedir' YAML needed.
if child.text:
git['basedir'] = child.text
elif child.tag == 'reference':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigEmail':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'scmName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'branches':
if child[0][0].tag != 'name':
raise NotImplementedError("%s XML not supported"
% child[0][0].tag)
branches = []
for item in child:
for branch in item:
branches.append(branch.text)
git['branches'] = branches
elif child.tag == 'doGenerateSubmoduleConfigurations':
if len(list(child)) != 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
# JJB doesn't handle this element anyway. Just continue on.
continue
elif child.tag == 'submoduleCfg':
if len(list(child)) > 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
elif child.tag == 'browser':
# XXX: blunt hammer: just use the "auto" browser for everything.
git['browser'] = 'auto'
elif child.tag == 'extensions':
for extension in child:
# hudson.plugins.git.extensions.impl.RelativeTargetDirectory
if extension.tag == 'hudson.plugins.git.extensions.impl.RelativeTargetDirectory':
if len(list(extension)) != 1:
# expected <relativeTargetDir>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'relativeTargetDir':
raise NotImplementedError("%s XML not supported" % extension[0].tag)
git['basedir'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.CheckoutOption':
if len(list(extension)) != 1:
# expected <timeout>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'timeout':
raise NotImplementedError("%s XML not supported" % child[0][0].tag)
git['timeout'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.WipeWorkspace':
if len(list(extension)) != 0:
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
git['wipe-workspace'] = True
elif extension.tag == 'hudson.plugins.git.extensions.impl.LocalBranch':
git['local-branch'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.PerBuildTag':
pass
elif extension.tag == 'hudson.plugins.git.extensions.impl.CleanBeforeCheckout':
clean_dict = {'before': True}
if 'clean' in git: # after has already been added
git['clean'].update(clean_dict)
else: # Need to create dict for git['clean']
git['clean'] = clean_dict
elif extension.tag == 'hudson.plugins.git.extensions.impl.CleanCheckout':
clean_dict = {'after': True}
if 'clean' in git: # before has already been added
git['clean'].update(clean_dict)
else: # Need to create dict for git['clean']
git['clean'] = clean_dict
elif extension.tag == 'hudson.plugins.git.extensions.impl.PathRestriction':
paths = {'includedRegions': 'included-regions',
'excludedRegions': 'excluded-regions'}
for jxml, jjb in list(paths.items()):
if extension.find(jxml) is not None:
regions = extension.find(jxml).text
if regions is not None:
git[jjb] = regions.splitlines()
else:
raise NotImplementedError("%s not supported" % extension.tag)
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'git': git})
def mercurialscm(top, parent):
hg = {}
for child in top:
if child.tag == 'source':
hg['url'] = child.text
elif child.tag == 'credentialsId':
hg['credentials-id'] = child.text
elif child.tag == 'revisionType':
hg['revision-type'] = child.text.lower()
elif child.tag == 'revision':
hg['revision'] = child.text
elif child.tag == 'modules':
pass
elif child.tag == 'clean':
hg['clean'] = (child.text == 'true')
elif child.tag == 'subdir':
hg['subdir'] = child.text
elif child.tag == 'disableChangeLog':
hg['disable-changelog'] = (child.text == 'true')
elif child.tag == 'browser' and 'class' in child.attrib:
browser_class = child.attrib['class']
if browser_class == 'hudson.plugins.mercurial.browser.BitBucket':
hg['browser'] = 'bitbucketweb'
elif browser_class == 'hudson.plugins.mercurial.browser.FishEye':
hg['browser'] = 'fisheye'
elif browser_class == 'hudson.plugins.mercurial.browser.GoogleCode':
hg['browser'] = 'googlecode'
elif browser_class == 'hudson.plugins.mercurial.browser.HgWeb':
hg['browser'] = 'hgweb'
elif browser_class == 'hudson.plugins.mercurial.browser.Kallithea':
# Not supported by JJB
raise NotImplementedError("%s is not yet supported by jenkins-job-builder." %
browser_class)
elif browser_class == 'hudson.plugins.mercurial.browser.KilnHG':
hg['browser'] = 'kilnhg'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCode':
hg['browser'] = 'rhodecode'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCodeLegacy':
hg['browser'] = 'rhodecode-pre-1.2'
if child.find('url') is not None:
hg['browser-url'] = child.find('url').text
parent.append({'hg': hg})
def subversionscm(top, parent):
# Parameters:
# url (str) - URL of the svn repository
# basedir (str) - location relative to the workspace root to checkout to (default '.')
# credentials-id (str) - optional argument to specify the ID of credentials to use
# repo-depth (str) - Repository depth. Can be one of 'infinity', 'empty',
# 'files', 'immediates' or 'unknown'. (default 'infinity')
# ignore-externals (bool) - Ignore Externals. (default false)
# workspaceupdater (str) - optional argument to specify
# workspaceupdater -
# optional argument to specify how to update the workspace (default wipeworkspace)
# supported values:
# wipeworkspace - deletes the workspace before checking out
# revertupdate - do an svn revert then an svn update
# emulateclean - delete unversioned/ignored files then update
# update - do an svn update as much as possible
# excluded-users (list(str)) - list of users to ignore revisions from when polling for changes (if polling is enabl
# included-regions (list(str)) - list of file/folders to include (optional)
# excluded-regions (list(str)) - list of file/folders to exclude (optional)
# excluded-commit-messages (list(str)) - list of commit messages to exclude (optional)
# exclusion-revprop-name (str) - revision svn-property to ignore (optional)
# ignore-property-changes-on-directories (bool) - ignore svn-property only changes of directories (default false)
# filter-changelog (bool) - If set Jenkins will apply the same inclusion and exclusion patterns for displaying chan
# repos (list) - list of repositories to checkout (optional)
# viewvc-url (str) -
# URL of the svn web interface (optional)
# Repo:
# url (str) - URL for the repository
# basedir (str) - Location relative to the workspace root to checkout to (default '.')
# credentials-id - optional ID of credentials to use
# repo-depth - Repository depth. Can be one of 'infinity', 'empty', 'files', 'immediates' or 'unknown'. (de
# ignore-externals - Ignore Externals. (default false)
svn = {}
for child in top:
if child.tag == 'remote':
svn['url'] = child.text if child.text else ''
elif child.tag == 'local':
svn['basedir'] = child.text if child.text else ''
elif child.tag == 'credentialsId':
svn['credentials-id'] = child.text if child.text else ''
elif child.tag == 'depthOption':
svn['repo-depth'] = child.text if child.text else ''
elif child.tag == 'ignoreExternalsOption':
| |
1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],
# "marker": {
# "color": "rgb(255, 127, 14)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "versicolor",
# "type": "scatter",
# "uid": "9c1f01",
"xaxis": "x",
"yaxis": "y"
}
trace18 = {
"x": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],
"y": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],
# "marker": {
# "color": "rgb(44, 160, 44)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "virginica",
# "type": "scatter",
# "uid": "ec780b",
"xaxis": "x",
"yaxis": "y"
}
trace19 = {
"x": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],
"y": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],
# "marker": {
# "color": "rgb(31, 119, 180)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "setosa",
# "type": "scatter",
# "uid": "8ec916",
"xaxis": "x",
"yaxis": "y2"
}
trace20 = {
"x": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],
"y": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],
# "marker": {
# "color": "rgb(255, 127, 14)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "versicolor",
# "type": "scatter",
# "uid": "cf0bba",
"xaxis": "x",
"yaxis": "y2"
}
trace21 = {
"x": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],
"y": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],
# "marker": {
# "color": "rgb(44, 160, 44)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "virginica",
# "type": "scatter",
# "uid": "e00102",
"xaxis": "x",
"yaxis": "y2"
}
trace22 = {
"x": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],
"y": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],
# "marker": {
# "color": "rgb(31, 119, 180)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "setosa",
# "type": "scatter",
# "uid": "4e091a",
"xaxis": "x",
"yaxis": "y3"
}
trace23 = {
"x": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],
"y": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],
# "marker": {
# "color": "rgb(255, 127, 14)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "versicolor",
# "type": "scatter",
# "uid": "aad08e",
"xaxis": "x",
"yaxis": "y3"
}
trace24 = {
"x": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],
"y": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],
# "marker": {
# "color": "rgb(44, 160, 44)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "virginica",
# "type": "scatter",
# "uid": "a45457",
"xaxis": "x",
"yaxis": "y3"
}
trace25 = {
"x": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],
"y": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],
# "marker": {
# "color": "rgb(31, 119, 180)",
# "line": {
# "color": "rgb(255, 255, 255)",
# "width": 0.5
# },
# "opacity": 0.74,
# "size": 8
# },
# "mode": "markers",
# "name": "setosa",
# "type": "scatter",
# "uid": "431578",
"xaxis": "x2",
"yaxis": "y"
}
trace26 = {
"x": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, | |
from time import time
import autograd.numpy as np
import autograd.numpy.random as npr
import scipy
import ssm
def test_sample(T=10, K=4, D=3, M=2):
"""
Test that we can construct and sample an HMM
with or withou, prefixes, noise, and noise.
"""
transition_names = [
"standard",
"sticky",
"constrained",
"inputdriven",
"recurrent",
"recurrent_only",
"rbf_recurrent",
"nn_recurrent"
]
observation_names = [
"gaussian",
"diagonal_gaussian",
"t",
"diagonal_t",
"exponential",
"bernoulli",
"categorical",
"poisson",
"vonmises",
"ar",
"no_input_ar",
"diagonal_ar",
"independent_ar",
"robust_ar",
"no_input_robust_ar",
"diagonal_robust_ar"
]
# Sample basic (no prefix, inputs, etc.)
for transitions in transition_names:
for observations in observation_names:
hmm = ssm.HMM(K, D, M=0, transitions=transitions, observations=observations)
zsmpl, xsmpl = hmm.sample(T)
# Sample with prefix
for transitions in transition_names:
for observations in observation_names:
hmm = ssm.HMM(K, D, M=0, transitions=transitions, observations=observations)
zpre, xpre = hmm.sample(3)
zsmpl, xsmpl = hmm.sample(T, prefix=(zpre, xpre))
# Sample with inputs
for transitions in transition_names:
for observations in observation_names:
hmm = ssm.HMM(K, D, M=M, transitions=transitions, observations=observations)
zpre, xpre = hmm.sample(3, input=npr.randn(3, M))
zsmpl, xsmpl = hmm.sample(T, prefix=(zpre, xpre), input=npr.randn(T, M))
# Sample without noise
for transitions in transition_names:
for observations in observation_names:
hmm = ssm.HMM(K, D, M=M, transitions=transitions, observations=observations)
zpre, xpre = hmm.sample(3, input=npr.randn(3, M))
zsmpl, xsmpl = hmm.sample(T, prefix=(zpre, xpre), input=npr.randn(T, M), with_noise=False)
def test_constrained_hmm(T=100, K=3, D=3):
hmm = ssm.HMM(K, D, M=0,
transitions="constrained",
observations="gaussian")
z, x = hmm.sample(T)
transition_mask = np.array([
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
]).astype(bool)
init_Ps = np.random.rand(3, 3)
init_Ps /= init_Ps.sum(axis=-1, keepdims=True)
transition_kwargs = dict(
transition_mask=transition_mask
)
fit_hmm = ssm.HMM(K, D, M=0,
transitions="constrained",
observations="gaussian",
transition_kwargs=transition_kwargs)
fit_hmm.fit(x)
learned_Ps = fit_hmm.transitions.transition_matrix
assert np.all(learned_Ps[~transition_mask] == 0)
def test_hmm_likelihood(T=1000, K=5, D=2):
# Create a true HMM
A = npr.rand(K, K)
A /= A.sum(axis=1, keepdims=True)
A = 0.75 * np.eye(K) + 0.25 * A
C = npr.randn(K, D)
sigma = 0.01
# Sample from the true HMM
z = np.zeros(T, dtype=int)
y = np.zeros((T, D))
for t in range(T):
if t > 0:
z[t] = np.random.choice(K, p=A[z[t-1]])
y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)
# Compare to pyhsmm answer
from pyhsmm.models import HMM as OldHMM
from pybasicbayes.distributions import Gaussian
oldhmm = OldHMM([Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
trans_matrix=A,
init_state_distn="uniform")
true_lkhd = oldhmm.log_likelihood(y)
# Make an HMM with these parameters
hmm = ssm.HMM(K, D, observations="diagonal_gaussian")
hmm.transitions.log_Ps = np.log(A)
hmm.observations.mus = C
hmm.observations.sigmasq = sigma * np.ones((K, D))
test_lkhd = hmm.log_probability(y)
assert np.allclose(true_lkhd, test_lkhd)
def test_big_hmm_likelihood(T=50000, K=50, D=50):
test_hmm_likelihood(T=T, K=K, D=D)
def test_expectations(T=1000, K=20, D=2):
# Create a true HMM
A = npr.rand(K, K)
A /= A.sum(axis=1, keepdims=True)
A = 0.75 * np.eye(K) + 0.25 * A
C = npr.randn(K, D)
sigma = 0.01
# Sample from the true HMM
z = np.zeros(T, dtype=int)
y = np.zeros((T, D))
for t in range(T):
if t > 0:
z[t] = np.random.choice(K, p=A[z[t-1]])
y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)
# Compare to pyhsmm answer
from pyhsmm.models import HMM as OldHMM
from pyhsmm.basic.distributions import Gaussian
oldhmm = OldHMM([Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
trans_matrix=A,
init_state_distn="uniform")
oldhmm.add_data(y)
states = oldhmm.states_list.pop()
states.E_step()
true_Ez = states.expected_states
true_E_trans = states.expected_transcounts
# Make an HMM with these parameters
hmm = ssm.HMM(K, D, observations="diagonal_gaussian")
hmm.transitions.log_Ps = np.log(A)
hmm.observations.mus = C
hmm.observations.sigmasq = sigma * np.ones((K, D))
test_Ez, test_Ezzp1, _ = hmm.expected_states(y)
test_E_trans = test_Ezzp1.sum(0)
print(true_E_trans.round(3))
print(test_E_trans.round(3))
assert np.allclose(true_Ez, test_Ez)
assert np.allclose(true_E_trans, test_E_trans)
def test_viterbi(T=1000, K=20, D=2):
# Create a true HMM
A = npr.rand(K, K)
A /= A.sum(axis=1, keepdims=True)
A = 0.75 * np.eye(K) + 0.25 * A
C = npr.randn(K, D)
sigma = 0.01
# Sample from the true HMM
z = np.zeros(T, dtype=int)
y = np.zeros((T, D))
for t in range(T):
if t > 0:
z[t] = np.random.choice(K, p=A[z[t-1]])
y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)
# Compare to pyhsmm answer
from pyhsmm.models import HMM as OldHMM
from pyhsmm.basic.distributions import Gaussian
oldhmm = OldHMM([Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
trans_matrix=A,
init_state_distn="uniform")
oldhmm.add_data(y)
states = oldhmm.states_list.pop()
states.Viterbi()
z_star = states.stateseq
# Make an HMM with these parameters
hmm = ssm.HMM(K, D, observations="diagonal_gaussian")
hmm.transitions.log_Ps = np.log(A)
hmm.observations.mus = C
hmm.observations.sigmasq = sigma * np.ones((K, D))
z_star2 = hmm.most_likely_states(y)
assert np.allclose(z_star, z_star2)
def test_hmm_mp_perf(T=10000, K=100, D=20):
# Make parameters
pi0 = np.ones(K) / K
Ps = npr.rand(T-1, K, K)
Ps /= Ps.sum(axis=2, keepdims=True)
ll = npr.randn(T, K)
out1 = np.zeros((T, K))
out2 = np.zeros((T, K))
# Run the PyHSMM message passing code
from pyhsmm.internals.hmm_messages_interface import messages_forwards_log, messages_backwards_log
tic = time()
messages_forwards_log(Ps, ll, pi0, out1)
pyhsmm_dt = time() - tic
print("PyHSMM Fwd: ", pyhsmm_dt, "sec")
# Run the SSM message passing code
from ssm.messages import forward_pass, backward_pass
forward_pass(pi0, Ps, ll, out2) # Call once to compile, then time it
tic = time()
forward_pass(pi0, Ps, ll, out2)
smm_dt = time() - tic
print("SMM Fwd: ", smm_dt, "sec")
assert np.allclose(out1, out2)
# Backward pass
tic = time()
messages_backwards_log(Ps, ll, out1)
pyhsmm_dt = time() - tic
print("PyHSMM Bwd: ", pyhsmm_dt, "sec")
backward_pass(Ps, ll, out2) # Call once to compile, then time it
tic = time()
backward_pass(Ps, ll, out2)
smm_dt = time() - tic
print("SMM (Numba) Bwd: ", smm_dt, "sec")
assert np.allclose(out1, out2)
def test_hmm_likelihood_perf(T=10000, K=50, D=20):
# Create a true HMM
A = npr.rand(K, K)
A /= A.sum(axis=1, keepdims=True)
A = 0.75 * np.eye(K) + 0.25 * A
C = npr.randn(K, D)
sigma = 0.01
# Sample from the true HMM
z = np.zeros(T, dtype=int)
y = np.zeros((T, D))
for t in range(T):
if t > 0:
z[t] = np.random.choice(K, p=A[z[t-1]])
y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)
# Compare to pyhsmm answer
from pyhsmm.models import HMM as OldHMM
from pybasicbayes.distributions import Gaussian
oldhmm = OldHMM([Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
trans_matrix=A,
init_state_distn="uniform")
states = oldhmm.add_data(y)
tic = time()
true_lkhd = states.log_likelihood()
pyhsmm_dt = time() - tic
print("PyHSMM: ", pyhsmm_dt, "sec. Val: ", true_lkhd)
# Make an HMM with these parameters
hmm = ssm.HMM(K, D, observations="gaussian")
hmm.transitions.log_Ps = np.log(A)
hmm.observations.mus = C
hmm.observations._sqrt_Sigmas = np.sqrt(sigma) * np.array([np.eye(D) for k in range(K)])
tic = time()
test_lkhd = hmm.log_probability(y)
smm_dt = time() - tic
print("SMM HMM: ", smm_dt, "sec. Val: ", test_lkhd)
# Make an ARHMM with these parameters
arhmm = ssm.HMM(K, D, observations="ar")
tic = time()
arhmm.log_probability(y)
arhmm_dt = time() - tic
print("SSM ARHMM: ", arhmm_dt, "sec.")
# Make an ARHMM with these parameters
arhmm = ssm.HMM(K, D, observations="ar")
tic = time()
arhmm.expected_states(y)
arhmm_dt = time() - tic
print("SSM ARHMM Expectations: ", arhmm_dt, "sec.")
def test_trace_product():
A = np.random.randn(100, 50, 10)
B = np.random.randn(100, 10, 50)
assert np.allclose(ssm.util.trace_product(A, B),
np.trace(A @ B, axis1=1, axis2=2))
A = np.random.randn(50, 10)
B = np.random.randn(10, 50)
assert np.allclose(ssm.util.trace_product(A, B),
np.trace(A @ B))
A = np.random.randn(1, 1)
B = np.random.randn(1, 1)
assert np.allclose(ssm.util.trace_product(A, B),
np.trace(A @ B))
def test_SLDSStructuredMeanField_entropy():
"""Test correctness of the entropy calculation for the
SLDSStructuredMeanFieldVariationalPosterior class.
"""
def entropy_mv_gaussian(J, h):
mu = np.linalg.solve(J, h)
sigma = np.linalg.inv(J)
mv_normal = scipy.stats.multivariate_normal(mu, sigma)
return mv_normal.entropy()
def make_lds_parameters(T, D, N, U):
m0 = np.zeros(D)
S0 = np.eye(D)
As = 0.99 * np.eye(D)
Bs = np.zeros((D, U))
Qs = 0.1 * np.eye(D)
Cs = npr.randn(N, D)
Ds = np.zeros((N, U))
Rs = 0.1 * np.eye(N)
us = np.zeros((T, U))
ys = np.sin(2 * np.pi * np.arange(T) / 50)[:, None] * npr.randn(1, N) + 0.1 * npr.randn(T, N)
return m0, S0, As, Bs, Qs, Cs, Ds, Rs, us, ys
def cumsum(v,strict=False):
if not strict:
return np.cumsum(v,axis=0)
else:
out = np.zeros_like(v)
out[1:] = np.cumsum(v[:-1],axis=0)
return out
def bmat(blocks):
rowsizes = [row[0].shape[0] for row in blocks]
colsizes = [col[0].shape[1] for col in zip(*blocks)]
rowstarts = cumsum(rowsizes,strict=True)
colstarts = cumsum(colsizes,strict=True)
nrows, ncols = sum(rowsizes), sum(colsizes)
out = np.zeros((nrows,ncols))
for i, (rstart, rsz) in enumerate(zip(rowstarts, rowsizes)):
for j, (cstart, csz) in enumerate(zip(colstarts, colsizes)):
out[rstart:rstart+rsz,cstart:cstart+csz] = blocks[i][j]
return out
def lds_to_dense_infoparams(params):
m0, S0, As, Bs, Qs, Cs, Ds, Rs, us, ys = params
mu_init = m0
sigma_init = S0
A, B, sigma_states = As, Bs, Qs
C, D, sigma_obs = Cs, Ds, Rs
data = ys
inputs = us
# Copied from PYLDS tests/test_dense.py
T, n = data.shape[0], D.shape[0]
# mu_init, sigma_init = | |
#!/usr/bin/env python
from math import copysign
import rospy, cv2, cv_bridge, numpy
from tf.transformations import decompose_matrix, euler_from_quaternion
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from ros_numpy import numpify
import numpy as np
from kobuki_msgs.msg import Led
from kobuki_msgs.msg import Sound
import smach
import smach_ros
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped
from ar_track_alvar_msgs.msg import AlvarMarkers
import tf
from sensor_msgs.msg import Joy
from nav_msgs.srv import SetMap
from nav_msgs.msg import OccupancyGrid
integral = 0
previous_error = 0
cur_pos = [0, 0]
cur_heading = 0
current_twist = Twist()
# Three flag to check touching the line or not
stop_line_flag = False
flag_line_flag = False
backing_flag = False
twist_pub = rospy.Publisher("/cmd_vel_mux/input/teleop", Twist, queue_size=1)
led_pub_1 = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
led_pub_2 = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
max_rotate_vel = 0.5
max_linear_vel = 0.25
degree_ninty = 4.0 / 2
counter_loc1 = 0
counter_loc2 = 0
location_index = 1
#
TRIANGLE = 1
CIRCLE = 2
RECTANGLE = 3
object_type = CIRCLE
current_type = 0
# checking object or not
isChecking = False
# moving back from loc2 or not
is_loc2_backing = False
# the stop times of location 3
loc3_stop_time = 2
# the index of checked object of location 3
loc3_step_index = 1
# to change the size of window, so that it can see the vertical red line
is_finishing_loc2 = False
time_after_stop = 2
# flag of moving forward
moving_after_stop_flag = False
# --------------new global variable-----------------
# some of the waypoints
is_selecting_second_line = False # stay on line to go to the location 4
is_end_of_line = False # stop on the end of the line before go to location 4
client = None
current_pose = [(0, 0, 0), (0, 0, 0, 0)]
linear_velocity = 0.15
rotate_velocity = 0.43
current_id = 2
target_pose = None
search_origin = [(0, 0, 0), (0, 0, 0, 0)]
y_threshold = 0.05
x_threshold = 0.05
y_scale = 0.2
x_scale = 0.5
max_angular_speed = 0.4
min_angular_speed = 0.2
max_linear_speed = 0.3
min_linear_speed = 0.1
goal_x = 0.9
long_goal = None
found = False
unmarked_location = None
loc4_task_id = 1
amcl_pose = None
waypoints = [[(4.03520061705, -1.38057946519, 0.0), (0.0, 0.0, -0.641725485096, 0.766934418173)], # 1
[(3.36223502017, -1.54772256866, 0.0), (0.0, 0.0, -0.68204527927, 0.731309945937)], # 2
[(2.6380713061, -1.69560516301, 0.0), (0.0, 0.0, -0.667204765293, 0.744874352606)], # 3
[(1.89158623165, -1.81141808092, 0.0), (0.0, 0.0, -0.67349647024, 0.739190438639)], # 4
[(1.25000321697, -2.02173721367, 0.0), (0.0, 0.0, -0.628555672845, 0.777764595578)], # 5
[(3.09692334536, -0.53059310309, 0.0), (0.0, 0.0, 0.772863523771, 0.634572276123)], # 6
[(2.20892687623, -0.440633218432, 0.0), (0.0, 0.0, 0.76868255071, 0.6396304685)], # 7
[(1.11810920579, -1.11707101113, 0.0), (0.0, 0.0, -0.992131524061, 0.125199995865)]] # 8
line_wayspoints = [[(0.809008239523, 0.0995233932844, 0.0), (0.0, 0.0, -0.107391319377, 0.994216829732)],
[(1.19946615995, -0.0487476900574, 0.0), (0.0, 0.0, -0.488053527053, 0.872813699899)]]
center_waypoints = [(2.30430805763, -1.27184294231, 0.0), (0.0, 0.0, 0.0715265400681, 0.997438696896)]
off_ramp_exit_pose = [(3.7551093735, 0.792378819378, 0.0), (0.0, 0.0, 0.823169254763, 0.567796070798)]
ar_sub = None
# new flag for loc2
# To show whether it is moving toward the object
is_moving_loc2 = False
# To show whether it is arrived at the end of the line
is_end_loc2 = False
# location 1 states
class moving_forward(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'flag_line', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'])
def execute(self, userdata):
global stop_line_flag, flag_line_flag, moving_after_stop_flag
if stop_line_flag == True:
stop_line_flag = False
userdata.cur_time = rospy.Time.now()
twist_pub.publish(Twist())
flag_line_flag = False
return 'stop'
elif flag_line_flag == True:
flag_line_flag = False
userdata.cur_time = rospy.Time.now()
return 'flag_line'
# if userdata.cur_time + rospy.Duration(1) < rospy.Time.now():
#
# userdata.cur_time = rospy.Time.now()
# userdata.flag_line_flag = cur_heading
# temp_twist = Twist()
# temp_twist.linear.x = 0
# temp_twist.angular.z = max_rotate_vel
# twist_pub.publish(temp_twist)
# return 'flag_line'
# else:
# twist_pub.publish(current_twist)
# return 'moving'
else:
twist_pub.publish(current_twist)
return 'moving'
class stop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['keep', 'recover'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global flag_line_flag, loc3_stop_time
if userdata.cur_time + rospy.Duration(3) > rospy.Time.now():
# userdata.cur_time = rospy.Time.now()
twist_pub.publish(Twist())
flag_line_flag = False
return 'keep'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
flag_line_flag = False
if location_index == 3:
loc3_stop_time -= 1
return 'recover'
class moving_after_stop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['moving', 'stop', 'loc4_start'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global stop_line_flag, flag_line_flag, is_selecting_second_line
if userdata.cur_time + rospy.Duration(2.0) > rospy.Time.now():
twist_pub.publish(current_twist)
stop_line_flag = False
flag_line_flag = False
return 'moving'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
stop_line_flag = False
flag_line_flag = False
if loc3_stop_time == 1:
is_selecting_second_line = True
return 'loc4_start'
return 'stop'
class turning_left(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['moving_a_bit', 'left_turning', 'stop_turning_loc1', 'stop_turning_loc2',
'stop_turning_loc3'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global flag_line_flag, isChecking, is_moving_loc2
if userdata.cur_time + rospy.Duration(1.0) > rospy.Time.now():
twist_pub.publish(current_twist)
flag_line_flag = False
return 'moving_a_bit'
elif userdata.cur_time + rospy.Duration(degree_ninty / max_rotate_vel + 1.0) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
flag_line_flag = False
return 'left_turning'
else:
if location_index == 1:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
return 'stop_turning_loc1'
elif location_index == 2:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
isChecking = True
is_moving_loc2 = True
return 'stop_turning_loc2'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
isChecking = True
return 'stop_turning_loc3'
class checking_object_loc1(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['overtime', 'get_sth', 'not_get_sth'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global counter_loc1, isChecking
if userdata.cur_time + rospy.Duration(10) < rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
isChecking = False
return 'overtime'
elif counter_loc1 > 2:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_1.publish(Led.GREEN)
led_pub_2.publish(Led.GREEN)
for i in range(3):
sound_pub.publish(0)
rospy.sleep(1)
counter_loc1 = 0
isChecking = False
return 'get_sth'
elif counter_loc1 == 2:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_1.publish(Led.GREEN)
sound_pub.publish(0)
rospy.sleep(1)
sound_pub.publish(0)
counter_loc1 = 0
isChecking = False
return 'get_sth'
elif counter_loc1 == 1:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_2.publish(Led.GREEN)
sound_pub.publish(0)
counter_loc1 = 0
isChecking = False
return 'get_sth'
else:
isChecking = True
return 'not_get_sth'
class turning_back(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['back_turning', 'stop_back'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global location_index, isChecking, stop_line_flag, flag_line_flag
if userdata.cur_time + rospy.Duration((degree_ninty + 0.2) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
isChecking = False
return 'back_turning'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
location_index += 1
# led_pub_1.publish(Led.BLACK)
# led_pub_2.publish(Led.BLACK)
isChecking = False
stop_line_flag = False
flag_line_flag = False
return 'stop_back'
# location 2 states
class moving_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global isChecking, is_end_loc2
if counter_loc2 > 0 and is_end_loc2:
if counter_loc2 == 1:
led_pub_2.publish(Led.ORANGE)
sound_pub.publish(0)
elif counter_loc2 == 2:
led_pub_1.publish(Led.ORANGE)
sound_pub.publish(0)
sound_pub.publish(0)
else:
led_pub_1.publish(Led.ORANGE)
led_pub_2.publish(Led.ORANGE)
sound_pub.publish(0)
sound_pub.publish(0)
sound_pub.publish(0)
if userdata.cur_time + rospy.Duration(0.5) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = max_linear_vel
temp_twist.angular.z = 0
twist_pub.publish(temp_twist)
return 'moving'
else:
twist_pub.publish(Twist())
isChecking = False
userdata.cur_time = rospy.Time.now()
is_end_loc2 = False
return 'stop'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
return 'moving'
class back_dirction(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'rotating'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global is_loc2_backing, is_finishing_loc2
if userdata.cur_time + rospy.Duration((degree_ninty * 2 - 0.8) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
return 'rotating'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
is_loc2_backing = True
is_finishing_loc2 = True
return 'stop'
class moving_back_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global backing_flag, is_loc2_backing, is_finishing_loc2
if backing_flag:
if userdata.cur_time + rospy.Duration(2.15) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = max_linear_vel
temp_twist.angular.z = 0
twist_pub.publish(temp_twist)
is_finishing_loc2 = False
return 'moving'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
backing_flag = False
is_loc2_backing = False
is_finishing_loc2 = False
return 'stop'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
return 'moving'
class finish_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'rotating'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global location_index, flag_line_flag, loc3_stop_time
if userdata.cur_time + rospy.Duration((degree_ninty - 0.5) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
return 'rotating'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
location_index = 3
flag_line_flag = False
loc3_stop_time = 2
# led_pub_1.publish(Led.BLACK)
# led_pub_2.publish(Led.BLACK)
return 'stop'
# location 4 states
class moving_on_line(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'])
def execute(self, userdata):
global stop_line_flag, flag_line_flag, moving_after_stop_flag, is_end_of_line, is_selecting_second_line, location_index
# send map and initial pose to reset amcl
new_map = rospy.wait_for_message("/map", OccupancyGrid)
# rospy.init_node('set_map_service')
rospy.wait_for_service('set_map')
set_map = rospy.ServiceProxy('set_map', SetMap)
pose_stamped = PoseWithCovarianceStamped()
pose_stamped.header.frame_id = 'map'
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.pose.pose.position.x = 0
pose_stamped.pose.pose.position.y = 0
pose_stamped.pose.pose.position.z = 0
pose_stamped.pose.pose.orientation.x = | |
<filename>mypy/semanal.py
"""The semantic analyzer.
Bind names to definitions and do various other simple consistency
checks. For example, consider this program:
x = 1
y = x
Here semantic analysis would detect that the assignment 'x = 1'
defines a new variable, the type of which is to be inferred (in a
later pass; type inference or type checking is not part of semantic
analysis). Also, it would bind both references to 'x' to the same
module-level variable node. The second assignment would also be
analyzed, and the type of 'y' marked as being inferred.
Semantic analysis is the first analysis pass after parsing, and it is
subdivided into three passes:
* FirstPass looks up externally visible names defined in a module but
ignores imports and local definitions. It helps enable (some)
cyclic references between modules, such as module 'a' that imports
module 'b' and used names defined in b *and* vice versa. The first
pass can be performed before dependent modules have been processed.
* SemanticAnalyzer is the second pass. It does the bulk of the work.
It assumes that dependent modules have been semantically analyzed,
up to the second pass, unless there is a import cycle.
* ThirdPass checks that type argument counts are valid; for example,
it will reject Dict[int]. We don't do this in the second pass,
since we infer the type argument counts of classes during this
pass, and it is possible to refer to classes defined later in a
file, which would not have the type argument count set yet.
Semantic analysis of types is implemented in module mypy.typeanal.
TODO: Check if the third pass slows down type checking significantly.
We could probably get rid of it -- for example, we could collect all
analyzed types in a collection and check them without having to
traverse the entire AST.
"""
from typing import (
List, Dict, Set, Tuple, cast, Any, overload, TypeVar, Union, Optional
)
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, MODULE_REF, FuncItem, Import,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, YieldStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, BOUND_TVAR, UNBOUND_TVAR, ListComprehension, GeneratorExpr,
FuncExpr, MDEF, FuncBase, Decorator, SetExpr, TypeVarExpr,
StrExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ARG_POS, ARG_NAMED, MroError, type_aliases,
YieldFromStmt, YieldFromExpr, NamedTupleExpr, NonlocalDecl,
SetComprehension, DictionaryComprehension, TYPE_ALIAS, TypeAliasExpr,
YieldExpr, ExecStmt, COVARIANT, CONTRAVARIANT, INVARIANT
)
from mypy.visitor import NodeVisitor
from mypy.traverser import TraverserVisitor
from mypy.errors import Errors
from mypy.types import (
NoneTyp, CallableType, Overloaded, Instance, Type, TypeVarType, AnyType,
FunctionLike, UnboundType, TypeList, ErrorType, TypeVarDef,
replace_leading_arg_type, TupleType, UnionType, StarType, EllipsisType
)
from mypy.nodes import function_type, implicit_module_attrs
from mypy.typeanal import TypeAnalyser, TypeAnalyserPass3, analyze_type_alias
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.lex import lex
from mypy.parsetype import parse_type
from mypy.sametypes import is_same_type
from mypy import defaults
T = TypeVar('T')
# Inferred value of an expression.
ALWAYS_TRUE = 0
ALWAYS_FALSE = 1
TRUTH_VALUE_UNKNOWN = 2
# Map from obsolete name to the current spelling.
obsolete_name_mapping = {
'typing.Function': 'typing.Callable',
'typing.typevar': 'typing.TypeVar',
}
# Hard coded type promotions (shared between all Python versions).
# These add extra ad-hoc edges to the subtyping relation. For example,
# int is considered a subtype of float, even though there is no
# subclass relationship.
TYPE_PROMOTIONS = {
'builtins.int': 'builtins.float',
'builtins.float': 'builtins.complex',
}
# Hard coded type promotions for Python 3.
#
# Note that the bytearray -> bytes promotion is a little unsafe
# as some functions only accept bytes objects. Here convenience
# trumps safety.
TYPE_PROMOTIONS_PYTHON3 = TYPE_PROMOTIONS.copy()
TYPE_PROMOTIONS_PYTHON3.update({
'builtins.bytearray': 'builtins.bytes',
})
# Hard coded type promotions for Python 2.
#
# These promotions are unsafe, but we are doing them anyway
# for convenience and also for Python 3 compatibility
# (bytearray -> str).
TYPE_PROMOTIONS_PYTHON2 = TYPE_PROMOTIONS.copy()
TYPE_PROMOTIONS_PYTHON2.update({
'builtins.str': 'builtins.unicode',
'builtins.bytearray': 'builtins.str',
})
class SemanticAnalyzer(NodeVisitor):
"""Semantically analyze parsed mypy files.
The analyzer binds names and does various consistency checks for a
parse tree. Note that type checking is performed as a separate
pass.
This is the second phase of semantic analysis.
"""
# Library search paths
lib_path = None # type: List[str]
# Module name space
modules = None # type: Dict[str, MypyFile]
# Global name space for current module
globals = None # type: SymbolTable
# Names declared using "global" (separate set for each scope)
global_decls = None # type: List[Set[str]]
# Names declated using "nonlocal" (separate set for each scope)
nonlocal_decls = None # type: List[Set[str]]
# Local names of function scopes; None for non-function scopes.
locals = None # type: List[SymbolTable]
# Nested block depths of scopes
block_depth = None # type: List[int]
# TypeInfo of directly enclosing class (or None)
type = None # type: TypeInfo
# Stack of outer classes (the second tuple item contains tvars).
type_stack = None # type: List[TypeInfo]
# Type variables that are bound by the directly enclosing class
bound_tvars = None # type: List[SymbolTableNode]
# Stack of type varialbes that were bound by outer classess
tvar_stack = None # type: List[List[SymbolTableNode]]
# Stack of functions being analyzed
function_stack = None # type: List[FuncItem]
loop_depth = 0 # Depth of breakable loops
cur_mod_id = '' # Current module id (or None) (phase 2)
imports = None # type: Set[str] # Imported modules (during phase 2 analysis)
errors = None # type: Errors # Keeps track of generated errors
def __init__(self, lib_path: List[str], errors: Errors,
pyversion: Tuple[int, int] = defaults.PYTHON3_VERSION) -> None:
"""Construct semantic analyzer.
Use lib_path to search for modules, and report analysis errors
using the Errors instance.
"""
self.locals = [None]
self.imports = set()
self.type = None
self.type_stack = []
self.bound_tvars = None
self.tvar_stack = []
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.lib_path = lib_path
self.errors = errors
self.modules = {}
self.pyversion = pyversion
def visit_file(self, file_node: MypyFile, fnam: str) -> None:
self.errors.set_file(fnam)
self.errors.set_ignored_lines(file_node.ignored_lines)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname()
self.is_stub_file = fnam.lower().endswith('.pyi')
self.globals = file_node.names
if 'builtins' in self.modules:
self.globals['__builtins__'] = SymbolTableNode(
MODULE_REF, self.modules['builtins'], self.cur_mod_id)
for name in implicit_module_attrs:
v = self.globals[name].node
if isinstance(v, Var):
v.type = self.anal_type(v.type)
v.is_ready = True
defs = file_node.defs
for d in defs:
d.accept(self)
if self.cur_mod_id == 'builtins':
remove_imported_names_from_symtable(self.globals, 'builtins')
self.errors.set_ignored_lines(set())
def visit_func_def(self, defn: FuncDef) -> None:
self.errors.push_function(defn.name())
self.update_function_type_variables(defn)
self.errors.pop_function()
if self.is_class_scope():
# Method definition
defn.is_conditional = self.block_depth[-1] > 0
defn.info = self.type
if not defn.is_decorated:
if not defn.is_overload:
if defn.name() in self.type.names:
n = self.type.names[defn.name()].node
if self.is_conditional_func(n, defn):
defn.original_def = cast(FuncDef, n)
else:
self.name_already_defined(defn.name(), defn)
self.type.names[defn.name()] = SymbolTableNode(MDEF, defn)
if not defn.is_static:
if not defn.args:
self.fail('Method must have at least one argument', defn)
elif defn.type:
sig = cast(FunctionLike, defn.type)
# TODO: A classmethod's first argument should be more
# precisely typed than Any.
leading_type = AnyType() if defn.is_class else self_type(self.type)
defn.type = replace_implicit_first_type(sig, leading_type)
if self.is_func_scope() and (not defn.is_decorated and
not defn.is_overload):
self.add_local_func(defn, defn)
defn._fullname = defn.name()
self.errors.push_function(defn.name())
self.analyze_function(defn)
self.errors.pop_function()
def is_conditional_func(self, n: Node, defn: FuncDef) -> bool:
return (isinstance(n, FuncDef) and cast(FuncDef, n).is_conditional and
defn.is_conditional)
def update_function_type_variables(self, defn: FuncDef) -> None:
"""Make any type variables in the signature of defn explicit.
Update the signature of defn to contain type variable definitions
if defn is generic.
"""
if defn.type:
functype = cast(CallableType, defn.type)
typevars = self.infer_type_variables(functype)
# Do not define a new type variable if already defined in scope.
typevars = [(name, tvar) for name, tvar in typevars
if not self.is_defined_type_var(name, defn)]
if typevars:
defs = [TypeVarDef(tvar[0], -i - 1, tvar[1].values, self.object_type(),
tvar[1].variance)
for i, tvar in enumerate(typevars)]
functype.variables = defs
def infer_type_variables(self,
type: CallableType) -> List[Tuple[str, TypeVarExpr]]:
"""Return list of unique type variables referred to in a callable."""
names = [] # type: List[str]
tvars = [] # type: List[TypeVarExpr]
for arg in type.arg_types + [type.ret_type]:
for name, tvar_expr in self.find_type_variables_in_type(arg):
if name not in names:
names.append(name)
tvars.append(tvar_expr)
return list(zip(names, tvars))
def find_type_variables_in_type(
self, type: Type) -> List[Tuple[str, TypeVarExpr]]:
"""Return a list of all unique type variable references in type.
This effectively does partial name binding, results of which are mostly thrown away.
"""
result = [] # type: List[Tuple[str, TypeVarExpr]]
if isinstance(type, UnboundType):
name = type.name
node = self.lookup_qualified(name, type)
if node and node.kind == UNBOUND_TVAR:
| |
union of all dat2 keys,
## and want max of dat2[x][0] for each experiment x.
##
return (expts, dat2, nytot, netot, tsl )
def setTierMax( self, tierMax ):
"""Set the maxium tier and recompute request sizes"""
if tierMax != self.tierMax:
self.tierMax = tierMax
self.requestItemExpAll( )
def summaryByMip( self, pmax=1 ):
bytesPerFloat = 2.
for m in self.mipls:
v = self.volByMip( m, pmax=pmax )
mlg.prnt ( '%12.12s: %6.2fTb' % (m,v*bytesPerFloat*1.e-12) )
def rqlByMip( self, mip):
if mip == 'TOTAL':
mip = self.mips
if type(mip) in [type( '' ),type( u'') ]:
if mip not in self.mips:
mlg.prnt ( self.mips )
raise baseException( 'rqiByMip: Name of mip not recognised: %s' % mip )
l1 = [i for i in self.dq.coll['requestLink'].items if i.mip == mip]
elif type(mip) in [ type( set()), type( [] ) ]:
nf = [ m for m in mip if m not in self.mips]
if len(nf) > 0:
raise baseException( 'rqlByMip: Name of mip(s) not recognised: %s' % str(nf) )
l1 = [i for i in self.dq.coll['requestLink'].items if i.mip in mip]
elif type(mip) == type( dict()):
nf = [ m for m in mip if m not in self.mips]
if len(nf) > 0:
raise baseException( 'rqlByMip: Name of mip(s) not recognised: %s' % str(nf) )
l1 = []
for i in self.dq.coll['requestLink'].items:
if i.mip in mip:
ok = False
if len( mip[i.mip] ) == 0:
ok = True
else:
for ol in self.dq.inx.iref_by_sect[i.uid].a['objectiveLink']:
o = self.dq.inx.uid[ol]
if self.dq.inx.uid[o.oid].label in mip[i.mip]:
ok = True
if ok:
l1.append( i )
else:
raise baseException( 'rqiByMip: "mip" (1st explicit argument) should be type string or set: %s -- %s' % (mip, type(mip)) )
return l1
def rqiByMip( self, mip):
l1 = self.rqlByMip( mip )
if len(l1) == 0:
return []
l2 = []
for i in l1:
if 'requestItem' in self.dq.inx.iref_by_sect[i.uid].a:
for u in self.dq.inx.iref_by_sect[i.uid].a['requestItem']:
l2.append( self.dq.inx.uid[u] )
l20 = self.rqiByMip0( mip )
##for i in l20:
##assert i in l2
return l2
def rqiByMip0( self, mip):
if mip == 'TOTAL':
mip = self.mips
if type(mip) in [type( '' ),type( u'') ]:
if mip not in self.mips:
mlg.prnt ( self.mips )
raise baseException( 'rqiByMip: Name of mip not recognised: %s' % mip )
l1 = [i for i in self.dq.coll['requestItem'].items if i.mip == mip]
elif type(mip) in [ type( set()), type( [] ) ]:
nf = [ m for m in mip if m not in self.mips]
if len(nf) > 0:
raise baseException( 'rqiByMip: Name of mip(s) not recognised: %s' % str(nf) )
l1 = [i for i in self.dq.coll['requestItem'].items if i.mip in mip]
elif type(mip) == type( dict()):
nf = [ m for m in mip if m not in self.mips]
if len(nf) > 0:
raise baseException( 'rqiByMip: Name of mip(s) not recognised: %s' % str(nf) )
l1 = []
for i in self.dq.coll['requestLink'].items:
if i.mip in mip:
ok = False
if len( mip[i.mip] ) == 0:
ok = True
else:
for ol in self.dq.inx.iref_by_sect[i.uid].a['objectiveLink']:
o = self.dq.inx.uid[ol]
if self.dq.inx.uid[o.oid].label in mip[i.mip]:
ok = True
if ok:
if 'requestItem' in self.dq.inx.iref_by_sect[i.uid].a:
for u in self.dq.inx.iref_by_sect[i.uid].a['requestItem']:
l1.append( self.dq.inx.uid[u] )
else:
raise baseException( 'rqiByMip: "mip" (1st explicit argument) should be type string or set: %s -- %s' % (mip, type(mip)) )
return l1
def checkDir(self,odir,msg):
if not os.path.isdir( odir ):
try:
os.mkdir( odir )
except:
print ('\n\nFailed to make directory "%s" for: %s: make necessary subdirectories or run where you have write access' % (odir,msg) )
print ( '\n\n' )
raise
print ('Created directory %s for: %s' % (odir,msg) )
def xlsByMipExpt(self,m,ex,pmax,odir='xls',xls=True,txt=False,txtOpts=None):
mxls = scope_utils.xlsTabs(self,tiermax=self.tierMax,pmax=pmax,xls=xls, txt=txt, txtOpts=txtOpts,odir=odir)
mlab = misc_utils.setMlab( m )
mxls.run( m, mlab=mlab )
def cmvByInvMip( self, mip,pmax=1,includeYears=False, exptFilter=None,exptFilterBlack=None ):
mips = set( self.mips[:] )
if type(mip) == type( '' ):
mips.discard( mip )
else:
for m in mip:
mips.discard( m )
return self.cmvByMip( mips,pmax=pmax,includeYears=includeYears, exptFilter=exptFilter, exptFilterBlack=exptFilterBlack )
def cmvByMip( self, mip,pmax=1,includeYears=False, exptFilter=None, exptFilterBlack=None ):
if exptFilter != None:
assert type(exptFilter) == type( set() ), 'Argument exptFilter must be None or a set: %s' % str(exptFilter)
if exptFilterBlack != None:
assert type(exptFilterBlack) == type( set() ), 'Argument exptFilterBlack must be None or a set: %s' % str(exptFilterBlack)
if exptFilter != None:
assert len( exptFilter.difference( exptFilterBlack ) ) > 0, 'If exptFilter and exptFilterBlack are both set, exptFilter must have non-black listed elements'
l1,ee = self.rvgByMip( mip, includePreset=True, returnLinks=True )
if includeYears:
expys = self.exptYears( l1, ex=exptFilter, exBlack=exptFilterBlack )
cc = collections.defaultdict( set )
ccts = collections.defaultdict( set )
mipsByVar = collections.defaultdict( set )
ss = set()
for pr in ee:
### loop over request var groups.
for i in ee[pr]:
if 'requestVar' in self.dq.inx.iref_by_sect[i.uid].a:
#
# loop over request vars in group
#
for x in self.dq.inx.iref_by_sect[i.uid].a['requestVar']:
i1 = self.dq.inx.uid[x]
##
## BALAJI .... need to override here ... to specified list of CMOR variables ...
## .... or just go through requestVar list and chane every priority ... easieir
##
thisp = i1.priority
if pr != -1:
thisp = pr
if thisp <= pmax:
if includeYears and i1.vid in self.cmvGridId:
##assert i.uid in expys, 'No experiment info found for requestVarGroup: %s' % i.uid
## may have no entry as a consequence of tierMin being set in the requestLink(s).
assert i1.vid in self.cmvGridId, 'No grid identification lookup found for %s: %s' % (i1.label,i1.vid)
assert self.cmvGridId[i1.vid] in ['a','o','si','li'], 'Unexpected grid id: %s: %s:: %s' % (i1.label,i1.vid, self.cmvGridId[i1.vid])
gflg = {'si':'','li':''}.get( self.cmvGridId[i1.vid], self.cmvGridId[i1.vid] )
rtl = True
if i.uid in expys.exptYears:
mipsByVar[i1.vid].add( i.mip )
if rtl:
for e,grd in expys.exptYears[i.uid]:
if exptFilter == None or e in exptFilter:
if grd == 'DEF':
if gflg == 'o' and not self.gridPolicyDefaultNative:
##if gflg == 'o':
grd1 = '1deg'
else:
grd1 = 'native'
else:
grd1 = grd
cc[(i1.vid,e,grd1)].add( expys.exptYears[i.uid][e,grd] )
if i.uid in self.tsliceDict and e in self.tsliceDict[i.uid]:
for thisSlice in self.tsliceDict[i.uid][e]:
ccts[(i1.vid,e)].add( (thisSlice,thisp) )
else:
ccts[(i1.vid,e)].add( (None,thisp) )
else:
for gf in expys.exptYears[i.uid]:
for e,grd in expys.exptYears[i.uid][gf]:
if grd in ["1deg",'2deg'] or gf == gflg:
if exptFilter == None or e in exptFilter:
cc[(i1.vid,e,grd)].add( expys.exptYears[i.uid][gf][e,grd] )
else:
print ( 'SKIPPING %s: %s' % (i1.label,i1.vid) )
ss.add( i1.vid )
if self.intersection and type(mip) == type( set() ) and len(mip) > 1:
sint = set( [k for k in mipsByVar if len( mipsByVar[k] ) == len(mip)] )
print ( 'INTERSECTION: %s out of %s variables [%s]' % (len(sint),len(mipsByVar.keys()),str(mip)) )
xxx = [t for t in cc if t[0] not in sint]
for t in xxx:
del cc[t]
if includeYears:
l2 = collections.defaultdict( dict )
l2x = collections.defaultdict( dict )
##
## this removes lower ranked grids .... but for some groups want different grids for different variable categories
##
if self.gridPolicyTopOnly:
for v,e,g in cc:
l2x[(v,e)][g] = max( list( cc[(v,e,g)] ) )
for v,e in l2x:
if len( l2x[(v,e)].keys() ) == 1:
g,val = list( l2x[(v,e)].items() )[0]
else:
kk = gridSorter.sort( l2x[(v,e)].keys() )
gflg = {'si':'','li':''}.get( self.cmvGridId[v], self.cmvGridId[v] )
g = kk[0]
if g not in l2x[(v,e)]:
print ( '%s not found in %s (%s):' % (g,str(l2x[(v,e)].keys()),str(kk)) )
val = l2x[(v,e)][g]
l2[v][(e,g)] = val
else:
for v,e,g in cc:
l2[v][(e,g)] = max( list( cc[(v,e,g)] ) )
l2ts = collections.defaultdict( dict )
for v in l2:
for e,g in l2[v]:
if (v,e) in ccts:
ccx = collections.defaultdict( set )
for x in ccts[(v,e)]:
ccx[x[0]].add( x[1] )
if len( ccx.keys() ) > 1:
tslp = [ (k,min(ccx[k])) for k in ccx ]
thisTimeSlice = timeSlice( tslp )
rc, ts, msg = thisTimeSlice.sort()
##rc, ts, msg = sortTimeSlice( tslp )
if rc == 1:
l2ts[v][e] = tuple( list(ts) + [g,] )
elif rc == 2:
try:
##(('abrupt5', 'simpleRange', 0, 5), 1), (('abrupt30', 'simpleRange', 121, 150), 1)]
yl = list( range( ts[0][0][2], ts[0][0][3] + 1) ) + list( range( ts[1][0][2], ts[1][0][3] + 1) )
except:
print ( 'FAILED TO GENERATE YEARLIST' )
print ( str((v,e) ) )
print ( 'range( ts[0][0][2], ts[0][0][3] + 1) + range( ts[1][0][2], ts[1][0][3] + | |
"""This module handles the creation of `cross_experiment_key` s and `hyperparameter_key` s for
:class:`hyperparameter_hunter.environment.Environment`, and
:class:`hyperparameter_hunter.experiments.BaseExperiment`, respectively. It also handles the
treatment of complex-typed inputs and their storage in the 'KeyAttributeLookup' subdirectory. The
descendants of :class:`hyperparameter_hunter.key_handler.KeyMaker` defined herein are each
responsible for the generation and saving of their keys, as well as determining whether such a key
already exists
Related
-------
:mod:`hyperparameter_hunter.environment`
This module uses :class:`hyperparameter_hunter.key_handler.CrossExperimentKeyMaker` to set
:attr:`hyperparameter_hunter.environment.Environment.cross_experiment_key`
:mod:`hyperparameter_hunter.experiments`
This module uses :class:`hyperparameter_hunter.key_handler.HyperparameterKeyMaker` to set
:attr:`hyperparameter_hunter.experiments.BaseExperiment.hyperparameter_key`"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.exceptions import EnvironmentInvalidError, EnvironmentInactiveError
from hyperparameter_hunter.library_helpers.keras_helper import (
keras_callback_to_dict,
parameterize_compiled_keras_model,
)
from hyperparameter_hunter.library_helpers.keras_optimization_helper import initialize_dummy_model
from hyperparameter_hunter.metrics import Metric
from hyperparameter_hunter.sentinels import Sentinel
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.utils.file_utils import write_json, read_json, add_to_json, make_dirs
from hyperparameter_hunter.utils.boltons_utils import remap, default_enter
##################################################
# Import Miscellaneous Assets
##################################################
from abc import ABCMeta, abstractmethod
import base64
from copy import deepcopy
import dill # TODO: Figure out if this can be safely removed
from functools import partial
import hashlib
from inspect import getsourcelines, isclass, getsource
from os import listdir
import os.path
import pandas as pd
from pickle import PicklingError
import re
import shelve
import sys
##################################################
# Import Learning Assets
##################################################
try:
from keras.callbacks import Callback as BaseKerasCallback
except ModuleNotFoundError:
class BaseKerasCallback:
placeholder_attribute = """
Hello, there! I am a `placeholder_attribute` for `BaseKerasCallback` if attempting to import `Keras` raised a
`ModuleNotFoundError`. You might be wondering what I'm doing here. I'm special because no normal/sane person would make a
class, or an attribute just like me! That means that if anyone checks to see if something is an instance of yours truly,
hopefully it won't be! :) Nice to meet you! &*%#))(%#(*&@*HIOV0(#*W*Q()UFIJW_Q)_#R*(*(T{_E_QWO_))T+VMS"W)|GO{>A?C<A/woe0
"""
##################################################
# KeyMaker Base Class:
##################################################
class KeyMaker(metaclass=ABCMeta):
def __init__(self, parameters, **kwargs):
"""Base class to handle making key hashes and checking for their existence. Additionally,
this class handles saving entries for complex-typed parameters, along with their hashes to
ensure experiments are reproducible
Parameters
----------
parameters: Dict
All the parameters to be included when creating the key hash. Keys should correspond to
parameter names, and values should be the values of the corresponding keys
**kwargs: Dict
Additional arguments
Attributes
----------
parameters: Dict
A deep copy of the given `parameters` input
key: Str, or None
If a key has been generated for `parameters`, it is saved here. Else, None
exists: Boolean
True if `key` is not None, and already exists in `tested_keys_dir`. Else, False
lookup_dir: Str
The directory in which complex-typed parameter entries will be saved
tested_keys_dir: Str, or None
The directory is which `key` will be saved if it does not already contain `key`"""
self.parameters = deepcopy(parameters)
self.key = None
self.exists = False
self.lookup_dir = None
self.tested_keys_dir = None
self.validate_environment()
self.handle_complex_types()
self.make_key()
self.does_key_exist()
def __repr__(self):
return f"{self.__class__.__name__}(key={self.key!r})"
def __str__(self):
return f"{self.key!s}"
def __eq__(self, other):
return self.key == other
def __ne__(self, other):
"""Instance will always return True for a non-equality check if `key` is unset (None)"""
return (self.key is None) or (self.key != other)
##################################################
# Core Methods
##################################################
def validate_environment(self):
"""Check that the currently active Environment is suitable"""
if G.Env is None:
raise EnvironmentInactiveError("")
if not all([hasattr(G.Env, _) for _ in ["result_paths", "cross_experiment_key"]]):
raise EnvironmentInvalidError("")
try:
self.lookup_dir = G.Env.result_paths["key_attribute_lookup"]
self.tested_keys_dir = G.Env.result_paths["tested_keys"]
# Ensure :attr:`tested_keys_dir` exists before calling :meth:`does_key_exist`, so "None" paths won't be checked
if os.path.exists(self.tested_keys_dir) is False:
# TypeError may also be raised if :func:`os.path.exists` receives invalid input
raise TypeError
except TypeError: # Key-making blacklisted
if self.tested_keys_dir is None:
return
make_dirs(self.tested_keys_dir)
def handle_complex_types(self):
"""Locate complex types in :attr:`parameters`, create hashes for them, add lookup entries
linking their original values to their hashes, then update their values in
:attr:`parameters` to their hashes to facilitate Description saving"""
dataframe_hashes = {}
def enter(path, key, value):
"""Produce iterable of attributes to remap for instances of :class:`metrics.Metric`"""
if isinstance(value, Metric):
metric_attrs = ["name", "metric_function", "direction"]
return ({}, [(_, getattr(value, _)) for _ in metric_attrs])
return default_enter(path, key, value)
def visit(path, key, value):
"""Check whether a parameter is of a complex type. If not, return it unchanged.
Otherwise, 1) create a hash for its value; 2) save a complex type lookup entry linking
`key`, `value`, and the hash for `value`; and 3) return the hashed value with `key`,
instead of the original complex-typed `value`
Parameters
----------
path: Tuple
The path of keys that leads to `key`
key: Str
The parameter name
value: *
The value of the parameter `key`
Returns
-------
Tuple of (`key`, value), in which value is either unchanged or a hash for the original
`value`"""
if isinstance(value, BaseKerasCallback):
return (key, keras_callback_to_dict(value))
if isinstance(value, Sentinel):
return (key, value.sentinel)
elif callable(value) or isinstance(value, pd.DataFrame):
# TODO: Check here if callable, and using a `Trace`d model/model_initializer
# TODO: If so, pass extra kwargs to below `make_hash_sha256`, which are eventually given to `hash_callable`
# TODO: Notably, `ignore_source_lines=True` should be included
# FLAG: Also, look into adding package version number to hashed attributes
hashed_value = make_hash_sha256(value)
if isinstance(value, pd.DataFrame):
dataframe_hashes.setdefault(hashed_value, []).append(key)
if self.tested_keys_dir is not None: # Key-making not blacklisted
try:
self.add_complex_type_lookup_entry(path, key, value, hashed_value)
except (FileNotFoundError, OSError):
make_dirs(os.path.join(self.lookup_dir, *path), exist_ok=False)
self.add_complex_type_lookup_entry(path, key, value, hashed_value)
return (key, hashed_value)
return (key, value)
self.parameters = remap(self.parameters, visit=visit, enter=enter)
#################### Check for Identical DataFrames ####################
for df_hash, df_names in dataframe_hashes.items():
if len(df_names) > 1:
G.warn(
f"The dataframes: {df_names} have an identical hash: {df_hash!s}. This implies the dataframes are "
+ "identical, which is probably unintentional. If left alone, scores may be misleading!"
)
def add_complex_type_lookup_entry(self, path, key, value, hashed_value):
"""Add lookup entry in `lookup_dir` for a complex-typed parameter, linking
the parameter `key`, its `value`, and its `hashed_value`
Parameters
----------
path: Tuple
The path of keys that leads to `key`
key: Str
The parameter name
value: *
The value of the parameter `key`
hashed_value: Str
The hash produced for `value`"""
shelve_params = ["model_initializer", "cross_validation_type"]
lookup_path = partial(os.path.join, self.lookup_dir, *path)
if isclass(value) or (key in shelve_params):
with shelve.open(lookup_path(f"{key}"), flag="c") as s:
# NOTE: When reading from shelve file, DO NOT add the ".db" file extension
try:
s[hashed_value] = value
except PicklingError:
# "is not the same object" error can be raised due to `Mirror`/`TranslateTrace`
# Instead of saving the object that raised the error, save `getsourcelines`
# Source lines of traced object are identical to those of its un-traced original
s[hashed_value] = getsourcelines(value)
except Exception:
raise
elif isinstance(value, pd.DataFrame):
make_dirs(lookup_path(key), exist_ok=True)
value.to_csv(lookup_path(key, f"{hashed_value}.csv"), index=False)
else: # Possible types: partial, function, *other
add_to_json(
file_path=lookup_path(f"{key}.json"),
data_to_add=getsource(value),
key=hashed_value,
condition=lambda _: hashed_value not in _.keys(),
default={},
)
def make_key(self):
"""Set :attr:`key` to an sha256 hash for :attr:`parameters`"""
self.key = make_hash_sha256(self._filter_parameters_to_hash(deepcopy(self.parameters)))
@staticmethod
def _filter_parameters_to_hash(parameters):
"""Produce a filtered version of `parameters` that does not include values that should be
ignored during hashing
Parameters
----------
parameters: Dict
The full dictionary of initial parameters to be filtered
Returns
-------
parameters: Dict
The filtered version of the given `parameters`"""
return parameters
##################################################
# Abstract Methods
##################################################
@property
@abstractmethod
def key_type(self) -> str:
"""Str in ["hyperparameter", "cross_experiment"], denoting the key type being processed"""
@abstractmethod
def does_key_exist(self) -> bool:
"""Check if key hash exists among saved keys in the contents of :attr:`tested_keys_dir`"""
@abstractmethod
def save_key(self):
"""Save the key hash and the parameters used to make it to :attr:`tested_keys_dir`"""
class CrossExperimentKeyMaker(KeyMaker):
key_type = "cross_experiment"
def __init__(self, parameters, **kwargs):
"""A KeyMaker class dedicated to creating cross-experiment keys, which determine when
experiments were executed under sufficiently similar conditions to permit proper comparison.
Two separate instances of :class:`environment.Environment` should produce identical
`cross_experiment_key` s if their arguments are the same (or close enough)
Parameters
----------
parameters: Dict
All the parameters to be included when creating the key hash. Keys should correspond to
parameter names, and values should be the values of the corresponding keys
**kwargs: Dict
Additional arguments supplied to :meth:`key_handler.KeyMaker.__init__`"""
KeyMaker.__init__(self, parameters, **kwargs)
def does_key_exist(self):
"""Check if a file corresponding to this cross_experiment_key already exists
Returns
-------
Boolean"""
tested_keys_dir_contents = [os.path.splitext(_)[0] for _ in listdir(self.tested_keys_dir)]
self.exists = self.key in tested_keys_dir_contents
return self.exists
def save_key(self):
"""Create a new file for this cross_experiment_key if :attr:`exists` is False"""
if not self.exists:
write_json(f"{self.tested_keys_dir}/{self.key}.json", {})
self.exists = True
G.log(f'Saved {self.key_type}_key: "{self.key}"', 4)
else:
G.log(f'{self.key_type}_key "{self.key}" already exists - Skipped saving', 4)
class HyperparameterKeyMaker(KeyMaker):
key_type = "hyperparameter"
def __init__(self, parameters, cross_experiment_key, | |
= R[smax>=a]
Clmin = Cmin[smax>=a]
b1l = np.zeros(al.shape)
b1l[smin/al < 1.0] = np.arcsin(smin/al[smin/al < 1.0])
b2l = np.pi*np.ones(al.shape)
b2l[smin/al < 1.0] = np.pi-np.arcsin(smin/al[smin/al < 1.0])
C1l = np.ones(al.shape)
C1l[smin/al < 1.0] = p*(Rl[smin/al < 1.0]/al[smin/al < 1.0])**2*np.cos(b1l[smin/al < 1.0]/2.0)**4
C2l = np.ones(al.shape)
C2l[smin/al < 1.0] = p*(Rl[smin/al < 1.0]/al[smin/al < 1.0])**2*np.cos(b2l[smin/al < 1.0]/2.0)**4
C2l[C2l<Clmin] = Clmin[C2l<Clmin]
vals = C2l > C1l
C1l[vals] = 0.0
C2l[vals] = 0.0
fl = (al/np.sqrt(p*Rl**2)*(np.sqrt(C1l)-np.sqrt(C2l)))
f[smax<a] = fg
f[smax>=a] = fl
f[smin>a] = 0.0
return f
def one_DoS_bins(self,a,R,p,smin,smax,Cmin):
'''Calculates depth of search for each bin by integrating the
completeness for given semi-major axis and planetary radius
Args:
a (ndarray):
2D grid of semi-major axis bin edges in AU
R (ndarray):
2D grid of planetary radius bin edges in R_Earth
p (float):
expected value of geometric albedo
smin (float):
minimum separation in AU
smax (float):
maximum separation in AU
Cmin (ndarray):
2D grid of minimum contrast
Returns:
f (ndarray):
2D array of depth of search values in each bin
'''
tmp = self.one_DoS_grid(a,R,p,smin,smax,Cmin)
f = 0.25*(tmp[:-1,:-1]+tmp[1:,:-1]+tmp[:-1,1:]+tmp[1:,1:])
return f
def DoS_sum(self,a,aa,R,RR,pexp,smin,smax,dist,C_inst,WA):
'''Sums the depth of search
Args:
a (ndarray):
1D array of semi-major axis bin edge values in AU
aa (ndarray):
2D grid of semi-major axis bin edge values in AU
R (ndarray):
1D array of planetary radius bin edge values in AU
RR (ndarray):
2D grid of planetary radius bin edge values in AU
pexp (float):
expected value of geometric albedo
smin (ndarray):
1D array of minimum separation values in AU
smax (ndarray):
1D array of maximum separation values in AU
dist (ndarray):
1D array of stellar distance values in pc
C_inst (ndarray):
instrument contrast at working angle
WA (ndarray):
working angles in arcseconds
Returns:
DoS (ndarray):
2D array of depth of search values summed for input stellar list
'''
DoS = np.zeros((aa.shape[0]-1,aa.shape[1]-1))
for i in xrange(len(smin)):
Cs = interpolate.InterpolatedUnivariateSpline(WA, C_inst[i], k=1,ext=3)
Cmin = np.zeros(a.shape)
# expected value of Cmin calculations for each separation
for j in xrange(len(a)):
if a[j] < smin[i]:
Cmin[j] = 1.0
else:
if a[j] > smax[i]:
su = smax[i]
else:
su = a[j]
# find expected value of minimum contrast from contrast curve
tup = np.sqrt(1.0-(smin[i]/a[j])**2)
tlow = np.sqrt(1.0-(su/a[j])**2)
f = lambda t,a=a[j],d=dist[i]: Cs(a*np.sqrt(1.0-t**2)/d)
val = integrate.quad(f, tlow, tup, epsabs=0,epsrel=1e-3,limit=100)[0]
Cmin[j] = val/(tup - tlow)
CC,RR = np.meshgrid(Cmin,R)
tmp = self.one_DoS_bins(aa,RR,pexp,smin[i],smax[i],CC)
DoS += tmp
return DoS
def find_ck(self,amin,amax,smin,smax,Cmin,pexp,Rexp):
'''Finds ck metric
Args:
amin (float):
minimum semi-major axis value in AU
amax (float):
maximum semi-major axis value in AU
smin (ndarray):
1D array of minimum separation values in AU
smax (ndarray):
1D array of maximum separation values in AU
Cmin (float):
minimum contrast value
pexp (float):
expected value of geometric albedo
Rexp (float):
expected value of planetary radius in AU
Returns:
ck (ndarray):
1D array of ck metric
'''
an = 1.0/np.log(amax/amin)
cg = an*(np.sqrt(1.0-(smax/amax)**2) - np.sqrt(1.0-(smin/amax)**2) + np.log(smax/(np.sqrt(1.0-(smax/amax)**2)+1.0))-np.log(smin/(np.sqrt(1.0-(smin/amax)**2)+1.0)))
# calculate ck
anp = an/cg
# intermediate values
k1 = np.cos(0.5*(np.pi-np.arcsin(smin/amax)))**4/amax**2
k2 = np.cos(0.5*(np.pi-np.arcsin(smax/amax)))**4/amax**2
k3 = np.cos(0.5*np.arcsin(smax/amax))**4/amax**2
k4 = 27.0/64.0*smax**(-2)
k5 = np.cos(0.5*np.arcsin(smin/amax))**4/amax**2
k6 = 27.0/64.0*smin**(-2)
# set up
z = sympy.Symbol('z', positive=True)
k = sympy.Symbol('k', positive=True)
b = sympy.Symbol('b', positive=True)
# solve
sol = solve(z**4 - z**3/sympy.sqrt(k) + b**2/(4*k), z)
# third and fourth roots give valid roots
# lambdify these roots
sol3 = sympy.lambdify((k,b), sol[2], "numpy")
sol4 = sympy.lambdify((k,b), sol[3], "numpy")
# find ck
ck = np.zeros(smin.shape)
kmin = Cmin/(pexp*Rexp**2)
for i in xrange(len(ck)):
if smin[i] == smax[i]:
ck[i] = 0.0
else:
# equations to integrate
al1 = lambda k: sol3(k,smin[i])
au1 = lambda k: sol4(k,smin[i])
au2 = lambda k: sol3(k,smax[i])
al2 = lambda k: sol4(k,smax[i])
f12 = lambda k: anp[i]/(2.0*np.sqrt(k))*(amax - al1(k))
f23 = lambda k: anp[i]/(2.0*np.sqrt(k))*(au2(k) - al1(k))
f34 = lambda k: anp[i]/(2.0*np.sqrt(k))*(amax - al2(k) + au2(k) - al1(k))
f45 = lambda k: anp[i]/(2.0*np.sqrt(k))*(amax - al1(k))
f56 = lambda k: anp[i]/(2.0*np.sqrt(k))*(au1(k) - al1(k))
f35 = lambda k: anp[i]/(2.0*np.sqrt(k))*(amax - al2(k) + au2(k) - al1(k))
f54 = lambda k: anp[i]/(2.0*np.sqrt(k))*(au1(k) - al2(k) + au2(k) - al1(k))
f46 = lambda k: anp[i]/(2.0*np.sqrt(k))*(au1(k) - al1(k))
if k4[i] < k5[i]:
if kmin < k1[i]:
ck[i] = integrate.quad(f12,k1[i],k2[i],limit=50,epsabs=0,epsrel=1e-4)[0]
if k2[i] != k3[i]:
ck[i] += integrate.quad(f23,k2[i],k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f34,k3[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f45,k4[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f56,k5[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k1[i]) and (kmin < k2[i]):
ck[i] = integrate.quad(f12,kmin,k2[i],limit=50,epsabs=0,epsrel=1e-4)[0]
if k2[i] != k3[i]:
ck[i] += integrate.quad(f23,k2[i],k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f34,k3[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f45,k4[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f56,k5[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k2[i]) and (kmin < k3[i]):
ck[i] = integrate.quad(f23,kmin,k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f34,k3[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f45,k4[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f56,k5[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k3[i]) and (kmin < k4[i]):
ck[i] = integrate.quad(f34,kmin,k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f45,k4[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f56,k5[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k4[i]) and (kmin < k5[i]):
ck[i] = integrate.quad(f45,kmin,k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f56,k5[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin < k6[i]):
ck[i] = integrate.quad(f56,kmin,k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
else:
ck[i] = 0.0
else:
if kmin < k1[i]:
ck[i] = integrate.quad(f12,k1[i],k2[i],limit=50,epsabs=0,epsrel=1e-4)[0]
if k2[i] != k3[i]:
ck[i] += integrate.quad(f23,k2[i],k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f35,k3[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f54,k5[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f46,k4[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k1[i]) and (kmin < k2[i]):
ck[i] = integrate.quad(f12,kmin,k2[i],limit=50,epsabs=0,epsrel=1e-4)[0]
if k2[i] != k3[i]:
ck[i] += integrate.quad(f23,k2[i],k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f35,k3[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f54,k5[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f46,k4[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k2[i]) and (kmin < k3[i]):
ck[i] = integrate.quad(f23,kmin,k3[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f35,k3[i],k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f54,k5[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f46,k4[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k3[i]) and (kmin < k5[i]):
ck[i] = integrate.quad(f35,kmin,k5[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f54,k5[i],k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f46,k4[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin > k5[i]) and (kmin < k4[i]):
ck[i] = integrate.quad(f54,kmin,k4[i],limit=50,epsabs=0,epsrel=1e-4)[0]
ck[i] += integrate.quad(f46,k4[i],k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
elif (kmin < k6[i]):
ck[i] = integrate.quad(f46,kmin,k6[i],limit=50,epsabs=0,epsrel=1e-4)[0]
else:
ck[i] = 0.0
return ck
def select_obs(self,t0,maxTime,ck):
'''Selects stars for observation using ortools
Args:
t0 (ndarray):
1D array of integration times in days
maxTime (float):
total observation time allotted in days
ck (ndarray):
1D array of ck metric
Returns:
sInds (ndarray):
1D array of star indices selected for observation
'''
#set up solver
solver = pywraplp.Solver('SolveIntegerProblem',pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
#need one var per state
xs = [ solver.IntVar(0.0,1.0, 'x'+str(j)) for j in range(len(ck)) ]
#constraint is x_i*t_i < maxtime
constraint1 = solver.Constraint(-solver.infinity(),maxTime)
for j,x in enumerate(xs):
constraint1.SetCoefficient(x, t0[j])
#objective is max x_i*comp_i
objective = solver.Objective()
for j,x in enumerate(xs):
objective.SetCoefficient(x, ck[j])
objective.SetMaximization()
res = solver.Solve()
print 'Objective function value: %r' % (solver.Objective().Value())
#collect result
xs2 = np.array([x.solution_value() for x in xs])
# observed star indices for depth of search calculations
sInds = np.where(xs2>0)[0]
return sInds
def plot_dos(self,targ,name,path=None):
'''Plots depth of search as a filled contour plot with contour lines
Args:
targ (str):
string indicating which key to access from depth of search
result dictionary
name (str):
string indicating what to put in title of figure
path (str):
desired path to save figure (pdf, optional)
'''
acents = 0.5*(self.result['aedges'][1:]+self.result['aedges'][:-1])
a = np.hstack((self.result['aedges'][0],acents,self.result['aedges'][-1]))
a = np.around(a,4)
Rcents = 0.5*(self.result['Redges'][1:]+self.result['Redges'][:-1])
R = np.hstack((self.result['Redges'][0],Rcents,self.result['Redges'][-1]))
R = np.around(R,4)
DoS = self.result['DoS'][targ]
# extrapolate to left-most boundary
tmp = DoS[:,0] + (a[0]-a[1])*((DoS[:,1]-DoS[:,0])/(a[2]-a[1]))
DoS = np.insert(DoS, 0, tmp, axis=1)
# extrapolate to right-most boundary
tmp = DoS[:,-1] + (a[-1]-a[-2])*((DoS[:,-1]-DoS[:,-2])/(a[-2]-a[-3]))
DoS = np.insert(DoS, -1, tmp, axis=1)
# extrapolate to bottom-most boundary
tmp = DoS[0,:] + (R[0]-R[1])*((DoS[1,:]-DoS[0,:])/(R[2]-R[1]))
DoS = np.insert(DoS, 0, tmp, axis=0)
# extrapolate to upper-most boundary
tmp = DoS[-1,:] + (R[-1]-R[-2])*((DoS[-1,:]-DoS[-2,:])/(R[-2]-R[-3]))
DoS = np.insert(DoS, -1, tmp, axis=0)
DoS = np.ma.masked_where(DoS<=0.0, DoS)
fig = plt.figure()
ax = fig.add_subplot(111)
cs = ax.contourf(a,R,DoS,locator=ticker.LogLocator())
cs2 = ax.contour(a,R,DoS,levels=cs.levels[1:],colors='k')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('a (AU)')
ax.set_ylabel('R ($R_\oplus$)')
ax.set_title('Depth of Search - '+name+' ('+str(self.result['NumObs'][targ])+')')
cbar = fig.colorbar(cs)
ax.clabel(cs2, fmt=ticker.LogFormatterMathtext(), colors='k')
if path != None:
fig.savefig(path, format='pdf', dpi=600, bbox_inches='tight', pad_inches=0.1)
plt.show()
def plot_nplan(self,targ,name,path=None):
'''Plots depth of search convolved with occurrence rates as a filled
contour plot with contour lines
Args:
targ (str):
string indicating which key to access from depth of search
result dictionary
name (str):
string indicating what to put in title of figure
path (str):
desired path to save figure (pdf, | |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is part of https://github.com/scikit-learn/scikit-learn/blob/114616d9f6ce9eba7c1aacd3d4a254f868010e25/sklearn/manifold/_spectral_embedding.py and
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from collections import Counter
import numpy as np
import torch
from sklearn.cluster._kmeans import k_means
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from nemo.utils import logging
from nemo.utils.decorators.experimental import experimental
scaler = MinMaxScaler(feature_range=(0, 1))
try:
from torch.linalg import eigh as eigh
TORCH_EIGN = True
except ImportError:
TORCH_EIGN = False
from scipy.linalg import eigh as eigh
logging.warning("Using eigen decomposition from scipy, upgrade torch to 1.9 or higher for faster clustering")
def isGraphFullyConnected(affinity_mat):
return getTheLargestComponent(affinity_mat, 0).sum() == affinity_mat.shape[0]
def getTheLargestComponent(affinity_mat, seg_index):
"""
Find the largest affinity_mat connected components for each given node.
This is for checking whether the affinity_mat is fully connected.
"""
num_of_segments = affinity_mat.shape[0]
connected_nodes = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore[seg_index] = True
for k in range(num_of_segments):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
neighbors = affinity_mat[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def getKneighborsConnections(affinity_mat, p_value):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = np.zeros_like(affinity_mat)
for i, line in enumerate(affinity_mat):
sorted_idx = np.argsort(line)
sorted_idx = sorted_idx[::-1]
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = 1
return binarized_affinity_mat
def getAffinityGraphMat(affinity_mat_raw, p_value):
"""
Calculate a binarized graph matrix and
symmetrize the binarized graph matrix.
"""
X = getKneighborsConnections(affinity_mat_raw, p_value)
symm_affinity_mat = 0.5 * (X + X.T)
return symm_affinity_mat
def getMinimumConnection(mat, max_N, n_list):
"""
Generate connections until fully connect all the nodes in the graph.
If graph is not fully connected, it might generate an inaccurate results.
"""
p_value = 1
affinity_mat = getAffinityGraphMat(mat, p_value)
for i, p_value in enumerate(n_list):
fully_connected = isGraphFullyConnected(affinity_mat)
affinity_mat = getAffinityGraphMat(mat, p_value)
if fully_connected or p_value > max_N:
break
return affinity_mat, p_value
def getRepeatedList(mapping_argmat, score_mat_size):
"""
Count the numbers in the mapping dictionary and create lists that contain
repeated indices to be used for creating the repeated affinity matrix for
fusing the affinity values.
"""
count_dict = dict(Counter(mapping_argmat))
repeat_list = []
for k in range(score_mat_size):
if k in count_dict:
repeat_list.append(count_dict[k])
else:
repeat_list.append(0)
return repeat_list
@experimental
def get_argmin_mat(uniq_scale_dict):
"""
Calculate the mapping between the base scale and other scales. A segment from a longer scale is
repeatedly mapped to a segment from a shorter scale or the base scale.
Args:
uniq_scale_dict (dict) :
Dictionary of embeddings and timestamps for each scale.
Returns:
session_scale_mapping_dict (dict) :
Dictionary containing argmin arrays indexed by scale index.
"""
scale_list = sorted(list(uniq_scale_dict.keys()))
segment_anchor_dict = {}
for scale_idx in scale_list:
time_stamp_list = uniq_scale_dict[scale_idx]['time_stamps']
time_stamps_float = np.array([[float(x.split()[0]), float(x.split()[1])] for x in time_stamp_list])
segment_anchor_dict[scale_idx] = np.mean(time_stamps_float, axis=1)
base_scale_idx = max(scale_list)
base_scale_anchor = segment_anchor_dict[base_scale_idx]
session_scale_mapping_dict = {}
for scale_idx in scale_list:
curr_scale_anchor = segment_anchor_dict[scale_idx]
curr_mat = np.tile(curr_scale_anchor, (base_scale_anchor.shape[0], 1))
base_mat = np.tile(base_scale_anchor, (curr_scale_anchor.shape[0], 1)).T
argmin_mat = np.argmin(np.abs(curr_mat - base_mat), axis=1)
session_scale_mapping_dict[scale_idx] = argmin_mat
return session_scale_mapping_dict
@experimental
def getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps):
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
Returns:
fused_sim_d (np.array):
This function generates an ffinity matrix that is obtained by calculating
the weighted sum of the affinity matrices from the different scales.
base_scale_emb (np.array):
The base scale embedding (the embeddings from the finest scale)
"""
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
base_scale_idx = max(uniq_scale_dict.keys())
base_scale_emb = np.array(uniq_scale_dict[base_scale_idx]['embeddings'])
multiscale_weights = uniq_embs_and_timestamps['multiscale_weights']
score_mat_list, repeated_mat_list = [], []
session_scale_mapping_dict = get_argmin_mat(uniq_scale_dict)
for scale_idx in sorted(uniq_scale_dict.keys()):
mapping_argmat = session_scale_mapping_dict[scale_idx]
score_mat = getCosAffinityMatrix(uniq_scale_dict[scale_idx]['embeddings'])
score_mat_list.append(score_mat)
repeat_list = getRepeatedList(mapping_argmat, score_mat.shape[0])
repeated_mat = np.repeat(np.repeat(score_mat, repeat_list, axis=0), repeat_list, axis=1)
repeated_mat_list.append(repeated_mat)
fused_sim_d = np.average(np.array(repeated_mat_list), weights=multiscale_weights, axis=0)
return fused_sim_d, base_scale_emb
def addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma):
"""
Add randomly generated synthetic embeddings to make eigen analysis more stable.
We refer to these embeddings as anchor embeddings.
emb (np.array):
The input embedding from the emebedding extractor.
anchor_sample_n (int):
The number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
anchor_spk_n (int):
The number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
sigma (int):
The amplitude of synthetic noise for each embedding vector.
If sigma value is too small, under-counting could happen.
If sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
emb_dim = emb.shape[1]
std_org = np.std(emb, axis=0)
new_emb_list = []
for _ in range(anchor_spk_n):
emb_m = np.tile(np.random.randn(1, emb_dim), (anchor_sample_n, 1))
emb_noise = np.random.randn(anchor_sample_n, emb_dim).T
emb_noise = np.dot(np.diag(std_org), emb_noise / np.max(np.abs(emb_noise))).T
emb_gen = emb_m + sigma * emb_noise
new_emb_list.append(emb_gen)
new_emb_list.append(emb)
new_emb_np = np.vstack(new_emb_list)
return new_emb_np
def getEnhancedSpeakerCount(emb, cuda, random_test_count=5, anchor_spk_n=3, anchor_sample_n=10, sigma=50):
"""
Calculate the number of speakers using NME analysis with anchor embeddings.
"""
est_num_of_spk_list = []
for seed in range(random_test_count):
np.random.seed(seed)
emb_aug = addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma)
mat = getCosAffinityMatrix(emb_aug)
nmesc = NMESC(
mat,
max_num_speaker=emb.shape[0],
max_rp_threshold=0.25,
sparse_search=True,
sparse_search_volume=30,
fixed_thres=None,
NME_mat_size=300,
cuda=cuda,
)
est_num_of_spk, _ = nmesc.NMEanalysis()
est_num_of_spk_list.append(est_num_of_spk)
ctt = Counter(est_num_of_spk_list)
oracle_num_speakers = max(ctt.most_common(1)[0][0] - anchor_spk_n, 1)
return oracle_num_speakers
def getCosAffinityMatrix(emb):
"""
Calculate cosine similarity values among speaker embeddings.
"""
sim_d = cosine_similarity(emb)
scaler.fit(sim_d)
sim_d = scaler.transform(sim_d)
return sim_d
def getLaplacian(X):
"""
Calculate a laplacian matrix from an affinity matrix X.
"""
X[np.diag_indices(X.shape[0])] = 0
A = X
D = np.sum(np.abs(A), axis=1)
D = np.diag(D)
L = D - A
return L
def eigDecompose(laplacian, cuda, device=None):
if TORCH_EIGN:
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = torch.from_numpy(laplacian).float().to(device)
else:
laplacian = torch.from_numpy(laplacian).float()
lambdas, diffusion_map = eigh(laplacian)
lambdas = lambdas.cpu().numpy()
diffusion_map = diffusion_map.cpu().numpy()
else:
lambdas, diffusion_map = eigh(laplacian)
return lambdas, diffusion_map
def getLamdaGaplist(lambdas):
lambdas = np.real(lambdas)
return list(lambdas[1:] - lambdas[:-1])
def estimateNumofSpeakers(affinity_mat, max_num_speaker, is_cuda=False):
"""
Estimate the number of speakers using eigen decompose on laplacian Matrix.
affinity_mat: (array)
NxN affitnity matrix
max_num_speaker: (int)
Maximum number of clusters to consider for each session
is_cuda: (bool)
if cuda availble eigh decomposition would be computed on GPUs
"""
laplacian = getLaplacian(affinity_mat)
lambdas, _ = eigDecompose(laplacian, is_cuda)
lambdas = np.sort(lambdas)
lambda_gap_list = getLamdaGaplist(lambdas)
num_of_spk = np.argmax(lambda_gap_list[: min(max_num_speaker, len(lambda_gap_list))]) + 1
return num_of_spk, lambdas, lambda_gap_list
class _SpectralClustering:
def __init__(self, n_clusters=8, random_state=0, n_init=10, p_value=10, n_jobs=None, cuda=False):
self.n_clusters = n_clusters
self.random_state = random_state
self.n_init = n_init
self.p_value = p_value
self.affinity_matrix_ = None
self.cuda = cuda
def predict(self, X):
if X.shape[0] != X.shape[1]:
raise ValueError("The affinity matrix is not a square matrix.")
self.affinity_matrix_ = X
labels = self.clusterSpectralEmbeddings(self.affinity_matrix_, n_init=self.n_init, cuda=self.cuda)
return labels
def clusterSpectralEmbeddings(self, affinity, n_init=10, cuda=False):
spectral_emb = self.getSpectralEmbeddings(affinity, n_spks=self.n_clusters, drop_first=False, cuda=cuda)
_, labels, _ = k_means(spectral_emb, self.n_clusters, random_state=self.random_state, n_init=n_init)
return labels
def getSpectralEmbeddings(self, affinity_mat, n_spks=8, drop_first=True, cuda=False):
if not isGraphFullyConnected(affinity_mat):
logging.warning("Graph is not fully connected and the clustering result might not be accurate.")
laplacian = getLaplacian(affinity_mat)
lambdas_, diffusion_map_ = eigDecompose(laplacian, cuda)
diffusion_map = diffusion_map_[:, :n_spks]
embedding = diffusion_map.T[n_spks::-1]
return embedding[:n_spks].T
class NMESC:
"""
Normalized Maximum Eigengap based Spectral Clustering (NME-SC)
uses Eigengap analysis to get an estimated p-value for
| |
<gh_stars>0
"""Low-level operations on Gaussian output (log) files.
Provides low-level interfaces to manipulate/extract data in Gaussian
output files.
Methods
-------
get_data
Gets data from a GLog file for each quantity label.
Classes
-------
GLogIO
Main class to handle Gaussian output file operations.
"""
import os # Used for file existence check
import re # Used to find keys in log file
import typing as tp
from estampes import parser as ep
from estampes.base import ParseKeyError, QuantityError, TypeData, TypeDCrd, \
TypeDGLog, TypeDOrd, TypeQInfo, TypeQLvl, TypeQOpt, TypeQTag, TypeRSta
from estampes.data.physics import PHYSFACT
# ================
# Module Constants
# ================
__ang2au = 1.0 / PHYSFACT.bohr2ang
_tp_StrInt = tp.TypeVar('_tp_StrInt', str, int)
# TypeSBloc = tp.Optional[tp.Tuple[str, int]]
# TypeQInfos = tp.Tuple[list, tp.List[str, int, TypeSBloc]]
TypeQData = tp.Dict[str, tp.Optional[tp.Any]]
TypeQKwrd = tp.Tuple[
tp.Union[int, tp.List[int]], # Link
tp.Union[str, tp.List[str]], # Keyword
tp.Union[_tp_StrInt, tp.List[_tp_StrInt]], # Jump/Skip function
tp.Union[str, tp.List[str]], # Matching pattern for data to extract
# Block end condition
tp.Union[tp.Callable[[str], bool], tp.List[tp.Callable[[str], bool]]],
tp.Union[int, tp.List[int]] # Number of occurrences
]
TypeKData = tp.Tuple[
str, # Keyword
int, # Link
_tp_StrInt, # Information on lines to skip after keyword
tp.Pattern, # Data extraction matching pattern (compiled)
int, # which occurrences to extract
tp.Callable[[str], bool]
]
# ==============
# Module Classes
# ==============
class GLogIO(object):
"""Main class to handle Gaussian output file operations.
Attributes
----------
filename : str
Gaussian output filename.
version : str
Version, software-dependent.
full_version : tuple
full version:
* Gaussian
* Gaussian major and minor revisions, mach and relesase date
Methods
-------
read_data
Extracts 1 or more data blocks from Gaussian's log file.
"""
def __init__(self, fname: str,
load_pos: bool = True) -> None:
self.filename = fname
self.__linkpos = {}
self.__route = None
self.__gversion = None
self.__rte_opt = None
self.__links = None
self.get_head()
if load_pos:
self.__store_linkpos()
# try:
# txt = 'Gaussian Version'
# self.__gversion = self.get_data(txt)[txt]
# except ParseKeyError:
# self.__gversion = None
@property
def filename(self) -> str:
"""Gets or sets the filename associated to the GLog object."""
return self.__fname
@filename.setter
def filename(self, name: str) -> None:
if not os.path.exists(name):
raise FileNotFoundError('Formatted checkpoint not found')
self.__fname = name
@property
def version(self) -> tp.Dict[str, str]:
"""Returns the version of Gaussian used to generate the log file.
"""
return {key: self.__gversion[key] for key in ('major', 'minor')}
@property
def full_version(self) -> tp.Tuple[str, tp.Any]:
"""Returns the full version, for the parser interface"""
return "Gaussian", self.__gversion
def get_head(self):
"""Returns the header information: Version, Route."""
keydata = []
qtydata = {}
key2blk = {}
i = 0
for item in ('route', 'swopt', 'swver'):
qtydata[item] = ep.parse_qlabel(ep.build_qlabel(item))
key2blk[item] = (i, i)
i += 1
link, key, skips, fmt, end, num = qlab_to_linkdata(item)
keydata.append((key, link, skips, re.compile(fmt), num, end))
ndata, data = self.read_data(*keydata)
data = parse_data(qtydata, key2blk, ndata, data)
self.__route = data['route']['data']
self.__links = sorted(set([int(item[0]) for item in self.__route]))
self.__gversion = data['swver']
self.__rte_opt = data['swopt']['data']
# Look at verbosity level of output
i = self.__rte_opt.index('#') + 1
if i >= len(self.__rte_opt):
self.__verb = 0
else:
key = self.__rte_opt[i].upper()
if key == 'P':
self.__verb = 1
elif key == 'T':
self.__verb = -1
else:
self.__verb = 0
def read_data(self,
*to_find: TypeKData,
raise_error: bool = True) -> TypeDGLog:
"""Extracts data corresponding to the keys to find.
Parameters
----------
to_find
List of tuples with the following data:
keyword: str
keyword to search.
link: int
link where keyword should be found (0 if no specific).
skip: str/int
lines to skip from the keyword to reach actual data.
pattern: obj:`re.Pattern`
Regular expression pattern object.
niter: int
Which occurrences of the quantity to extract.
endcond: function
End condition function, which takes a string as argument.
raise_error
Only raises error if `True`, otherwise proceeds silently.
Raises
------
ParseKeyError
Key not found.
Notes
-----
* The system treats each item in to_find separately.
Post-processing routines should take care of aliases.
"""
def del_block(iblock: int,
block2id: tp.Sequence[tp.Sequence[int]],
nocc: tp.Sequence[int],
dataid: tp.Sequence[int],
blockskp: tp.Sequence[tp.Union[str, int]],
blockfmt: tp.Sequence[tp.Pattern],
blockend: tp.Callable[[str], bool]) -> int:
"""Deletes a block in the lookup tables.
Returns
-------
int
Status, as integer
0: block removed
1: keylist item removed"""
istat = 0
if nocc[iblock] == 0:
i, j = block2id[iblock]
del keydata[i][j]
if not keydata[i]:
del keydata[i]
del keylist[i]
istat = 1
for k in range(len(block2id)):
a, b = block2id[k]
if a > i:
block2id[k][0] -= 1
elif a == i and b > j:
block2id[k][1] -= 1
del block2id[iblock]
del dataid[iblock]
del blockskp[iblock]
del blockfmt[iblock]
del nocc[iblock]
del blockend[iblock]
return istat
n_tofind = len(to_find)
keylist = [] # List of keywords to search
keydata = [] # Data associated to each keyword
lnklist = [] # List of links involved
lnkdata = {}
datlist = [[] for _ in range(n_tofind)] # Data to return
ndatblk = [0 for _ in range(n_tofind)]
# Generate list of links and check if jump fast search possible
fast_mode = True
for i in range(n_tofind):
link = abs(to_find[i][1])
new = link not in lnklist
if new:
if link == 0:
fast_mode = False
lnklist.append(link)
else:
if self.__links is None or link in self.__links:
lnklist.append(link)
if link not in self.__linkpos:
fast_mode = False
lnklist.sort()
ind = 0
for link in lnklist:
imin = ind
if fast_mode:
lnkdata[link] = [(imin, 0)]
for i, item in enumerate(to_find):
if abs(item[1]) == link:
key = item[0]
if key not in keylist:
keylist.append(key)
keydata.append([])
j = ind
ind += 1
else:
j = keylist.index(key)
keydata[j].append((i, *item[2:]))
if fast_mode:
lnkdata[link][1] = ind
# Sequential Search
# -----------------
# Looks for keywords sequentially while reading file
if not fast_mode:
block2id = [] # stores real indexes in keylist/keydata
blockskp = [] # stores the "skip" information
blockfmt = [] # stores the formats
blockend = [] # stores the end conditions
nocc = [] # number of occurrences to extract, used to drop search
dataid = [] # stores the indexes for the data list
with open(self.filename, 'r') as fobj:
for line in fobj:
i = -1
for kword in keylist:
skip = False
i += 1
if line.startswith(kword):
iblock = 0
while iblock < len(block2id):
if block2id[iblock][0] == i:
res = del_block(iblock, block2id, nocc,
dataid, blockskp, blockfmt,
blockend)
if res == 1: # keylist empty
i -= 1
skip = True
else:
iblock += 1
if not skip:
for j, block in enumerate(keydata[i]):
# Save data to correct block in keydata
block2id.append([i, j])
dataid.append(block[0])
blockskp.append(block[1])
blockfmt.append(block[2])
nocc.append(block[3])
blockend.append(block[4])
if nocc[-1] > 0:
datlist[dataid[-1]].append([])
ndatblk[dataid[-1]] += 1
else:
datlist[dataid[-1]] = []
ndatblk[dataid[-1]] = 1
if block2id:
lblock = len(block2id)
iblock = 0
while iblock < lblock:
if isinstance(blockskp[iblock], str):
if line.startswith(blockskp[iblock]):
blockskp[iblock] = 0
if blockskp[iblock] == 0:
if blockfmt[iblock].match(line):
res = blockfmt[iblock].match(line)
if nocc[iblock] > 0:
datlist[dataid[iblock]][-1].append(
res.groupdict()['val'])
else:
datlist[dataid[iblock]].append(
res.groupdict()['val'])
if blockend[iblock](line):
res = del_block(iblock, block2id, nocc,
dataid, blockskp, blockfmt,
blockend)
lblock -= 1
else:
iblock += 1
else:
if isinstance(blockskp[iblock], int):
if blockskp[iblock] > 0:
blockskp[iblock] -= 1
iblock += 1
if not keylist:
break
# Fast Search
# -----------
else:
raise NotImplementedError('Fast search not yet ready')
return ndatblk, datlist
def __store_linkpos(self):
"""Stores the link header positions in the file if available.
Loads the keys present in the file and pointers to their
position to speed up their search.
Data type and block information are also stored.
"""
link_heads = {
# 1: 'Entering Gaussian System,',
1: 'Entering Link 1,',
601: 'Population analysis using the SCF Density.',
716: 'Full mass-weighted force constant matrix:',
717: 'Second-order Perturbative Anharmonic Analysis',
718: 'Generation of the Franck-Condon spectrum'
}
# to_search = re.compile(r'''\
# (?P<title>[\w\s]+?)\s* # Key
# \b(?P<type>[IRC])\b\s* # Data type
# (?P<block>N=)?\s+ # N= only set for non-scalar data
# (?P<value>[\d\-\+\.E]+) # Block size (N=) or scalar value
# \n''', re.VERBOSE)
# keys = {}
# with open(self.filename, 'r') as fobj:
# fpos = 0
# for line in fobj:
# res = to_search.match(line)
# if res:
# keys[res.group(1)] = (
# res.group(2),
# int(res.group(3) and res.group(4) or 0),
# fpos)
# fpos += len(line)
# return keys
# ================
# Module Functions
# ================
def qlab_to_linkdata(qtag: TypeQTag,
qopt: TypeQOpt = None,
dord: TypeDOrd = None,
| |
# ====================================================
# This module is run on the experimenter's computer.
# It controls almost all aspects of the experiment,
# including the highest level of the code.
#
# Upon startup, the module connects to two remote
# instances of SNAP running the LSE_GameClient module.
# ====================================================
# Panda3d
from direct.task.TaskManagerGlobal import taskMgr
from direct.task import Task
from pandac.PandaModules import Vec3, Vec4, Point3, BitMask32, PNMImage, Camera, NodePath, WindowProperties, GeomVertexReader, ConfigVariableSearchPath, TransparencyAttrib, TransformState, VBase4
#noinspection PyUnresolvedReferences
from panda3d.bullet import BulletTriangleMesh, BulletTriangleMeshShape, BulletRigidBodyNode, BulletHeightfieldShape, BulletWorld, BulletDebugNode, BulletBoxShape, BulletVehicle, ZUp
# SNAP framework
from framework.latentmodule import LatentModule
from framework.ui_elements import ScrollPresenter, TextPresenter, EventWatcher
from framework.ui_elements.WorldspaceGizmos import *
from framework.basicstimuli import BasicStimuli
from framework.eventmarkers.eventmarkers import send_marker
import framework.navigation.navigation as navigation
import framework.tickmodule
import pylsl.pylsl as pylsl
import rpyc
# Python
import random, time, threading, math, traceback, itertools
# =======================
# === MAGIC CONSTANTS ===
# =======================
server_version = '0.1' # displayed to the experimenter so he/she can keep track of versions
max_duration = 500000 # the maximum feasible duration (practically infinity)
max_agents = 20 # maximum number of simultaneous AI-controlled agents
screen_shuffle = [1,2,3] # the order of the screen indices from left to right (for handedness switch or random permutation)
screen_aspect = 1200/700.0 # aspect ratio that this should run on (note: this is the *client* aspect ratio)
# ========================
# === HELPER FUNCTIONS ===
# ========================
def livecoding(fn):
"""
A decorator that displays exceptions but keeps them from leaking out of a given function. Can be used to halt and
fix (i.e., redeclare) the function at run-time, re-invoke the corrected version, and continue.
"""
def wrapped(*args,**kwargs):
try:
# run the actual function
return fn(*args,**kwargs)
except LatentModule.ModuleCancelled:
# don't hickup if this exception is due to the experimenter cancelling the run
pass
except Exception as e:
# got a regular exception: display it, but eat it
print "Exception " + str(e) + " in " + fn.__name__
try:
send_marker('Experiment Control/Status/Error/%s' % (str(e),))
except:
pass
try:
traceback.print_exc()
except:
print "Traceback failed."
# allow the user to intervene and fix the code
# NOTE: If you get here you can fix fn and re-run it -- once it is fixed replace it by evaluating something like: Main.my_old_broken_function = fn
print "Ignoring / Breakpoint..."
return wrapped
def clamp(x,lo=0.0,hi=1.0):
return min(max(x,lo),hi)
def smoothstep(x,edge0=0.0,edge1=1.0):
""" Sigmoidal interpolation between two values. """
t = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3.0 - 2.0 * t)
def rect(tl,br):
""" Turns a pair of top/left, bottom/right coordinates into a rect (which is left,right,top,bottom). """
return (tl[0],br[0],tl[1],br[1])
@livecoding
def line_of_sight(physics, # bullet physics world
src_pos, # position of the source object (the viewer), as Point3
dst_pos, # position of the destination object, as Point3
src_dir=None, # view direction of the source object, as Vec3
dst_dir=None, # view direction of the destination object, as Vec3
src_maxsight=50, # maximum view distance
src_fov=90, # total field of view of the source object (in degrees)
dst_fov=90, # total field of view of the destination object (in degrees); this is for advanced classification of the constellation between both objects
src_margin=1.5, # maximum bounds of the source object's geometry
dst_margin=1.5 # maximum bounds of the destination object's geometry
):
"""
Do a line-of-sight check between a source position and destination position (optionally including view direction(s)).
This function returns one of the following values:
* None if there is no line of sight, otherwise a string
* 'front' if both objects are facing each other
* 'side' if the source object views the destination object from the side
* 'behind' if the source object views the destination object from behind
* 'undetermined' if the source object views the destination object, but the angle of incidence is undetermined (e.g. if both objects have the same position, or if the destination orientation is not known)
"""
ray = dst_pos - src_pos
distance = ray.length()
if not ray.normalize():
return "undetermined"
if (src_dir is not None) and not src_dir.normalize():
return None
if dst_dir is not None:
if not dst_dir.normalize():
dst_dir = None
if distance < src_maxsight and distance > 0 and (src_dir is None or abs(src_dir.angleDeg(Vec3(ray))) < src_fov/2):
# with line-of-sight?
hittest = physics.rayTestAll(src_pos,dst_pos)
has_los = True
for k in range(hittest.getNumHits()):
hit = hittest.getHit(k)
# make sure that the hit is not within the bounds of the two objects
if (hit.getHitFraction() < 1.0) and (hit.getHitFraction()*distance > src_margin) and (abs(distance - hit.getHitFraction()*distance) > dst_margin):
has_los = False # found a regular world intersection
break
if has_los:
# src has a line-of-sight to dst; classify what type of sighting it is
if dst_dir is None:
return "undetermined"
else:
angle = abs(dst_dir.angleDeg(-Vec3(ray)))
if angle < dst_fov/2:
return "front"
elif angle < 135:
return "side"
else:
return "behind"
else:
return None
@livecoding
def generate_positions(scenegraph, # the scene graph for which the positions shall be generated. Positions will be relative to the root node.
navmesh=None, # optionally a navmesh on the scene graph to enforce reachability constraints
physics=None, # optionally a bullet physics world to enforce line-of-sight constraints
# placement parameters
objectnames=None, # the names of objects to whose surfaces the points should be constrained
num_positions=1, # the number of positions to generate
# position constraints (lists of points)
reachable_from=None, # optionally a set of positions from which the generated positions shall be reachable
invisible_from=None, # optionally a set of positions from which the generated positions shall be invisible
away_from=None, # optionally a set of positions from which the generated positions should be distanced by at least some radius
nearby_to=None, # optionally a set of positions from which the generated positions should be distanced by at most some radius
within_cone=None, # list of conic constraints (each is a tuple/list of (origin, direction)
# extra parameters
nearby_radius=500, # points may be at most this many meters away from any position in nearby_to (can be a scalar or a 3-tuple of numbers for a scaled ellipsoid range)
away_radius=75, # points have to be at least this many meters away from any position in away_from (can be a scalar or a 3-tuple of numbers for a scaled ellipsoid range)
within_cone_angle = 90, # angle (e.g., fov) of the conic constraints
visibility_params=None, # optional parameters to override in the visibility check
reachability_param='all', # if 'all', the position needs to be reachable from all points in reachable_from, if 'any' it suffices
# if the position is reachable from a single point in reachable_from
nearby_param='all', # if 'all', the position must be within radius for all points in nearby_to, if 'any' it's enough if a single position is within range
within_cone_param='all', # if 'all', the position must be within the cone for all constraints in within_cone, if 'any' it's enough if a single constraint is satisfied
snap_to_navmesh_radius=1, # if there is a discrepancy between scene graph geometry and navmesh, this is the radius (in meters) within which to snap positions to the navmesh
output_coord_sys='panda', # the coordinate system of the output points; can be 'panda', i.e., Point3(x,y,z), or 'detour', yielding [pyrecast.uintp,pyrecast.floatp]
max_retries=3000, # maximum number of retries per position (returns one less if not satisfiable)
snap_to_navmesh=True # whether to snap the positions to the navmesh; note that the NM is a bit coarse in some areas...
):
"""
Generate a list of world-space positions for an existing scene graph that satisfy a number of criteria, such as being reachable from
a collection of points, being invisible from a collection of points, being on the surface of an object with a particular name, or being
within a given radius around a particular object.
"""
if not visibility_params:
visibility_params = {}
# find all scene nodes with the desired name
if not (type(objectnames) is list or type(objectnames) is tuple):
objectnames = [objectnames]
nodes = []
for n in objectnames:
nodes += scenegraph.findAllMatches('**/' + n + '/-GeomNode')
# reformat into lists
if reachable_from is not None and type(reachable_from) is not list and type(reachable_from) is not tuple:
reachable_from = [reachable_from]
if invisible_from is not None | |
import pyredner
import torch
import math
import redner
from typing import Optional
def compute_vertex_normal(vertices: torch.Tensor,
indices: torch.Tensor,
weighting_scheme: str = 'max'):
"""
Compute vertex normal by weighted average of nearby face normals.
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
weighting_scheme: str
How do we compute the weighting. Currently we support two weighting methods:
'max' and 'cotangent'.
'max' corresponds to Nelson Max's algorithm that uses the inverse length and sine of the angle as the weight
(see `Weights for Computing Vertex Normals from Facet Vectors <https://escholarship.org/content/qt7657d8h3/qt7657d8h3.pdf?t=ptt283>`_),
'cotangent' corresponds to weights derived through a discretization of the gradient of triangle area
(see, e.g., "Implicit Fairing of Irregular Meshes using Diffusion and Curvature Flow" from Desbrun et al.)
Returns
=======
torch.Tensor
float32 Tensor with size num_vertices x 3 representing vertex normal
"""
def dot(v1, v2):
return torch.sum(v1 * v2, dim = 1)
def squared_length(v):
return torch.sum(v * v, dim = 1)
def length(v):
return torch.sqrt(squared_length(v))
def safe_asin(v):
# Hack: asin(1)' is infinite, so we want to clamp the contribution
return torch.asin(v.clamp(0, 1-1e-6))
# XXX: This whole thing is inefficient but it's PyTorch's limitation
normals = torch.zeros(vertices.shape, dtype = torch.float32, device = vertices.device)
v = [vertices[indices[:, 0].long(), :],
vertices[indices[:, 1].long(), :],
vertices[indices[:, 2].long(), :]]
if weighting_scheme == 'max':
for i in range(3):
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
if i == 0:
n = torch.cross(side_a, side_b)
n = torch.where(length(n).reshape(-1, 1).expand(-1, 3) > 0,
n / torch.reshape(length(n), [-1, 1]),
torch.zeros(n.shape, dtype=n.dtype, device=n.device))
# numerically stable angle between two unit direction vectors
# http://www.plunk.org/~hatch/rightway.php
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
sin_angle = torch.sin(angle)
e1e2 = e1_len * e2_len
# contrib is 0 when e1e2 is 0
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
n * (sin_angle / e1e2).reshape(-1, 1).expand(-1, 3),
torch.zeros(n.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
normals.scatter_add_(0, index, contrib)
# Assign 0, 0, 1 to degenerate faces
degenerate_normals = torch.zeros(normals.shape, dtype = torch.float32, device = vertices.device)
degenerate_normals[:, 2] = 1.0
normals = torch.where(length(normals).reshape(-1, 1).expand(-1, 3) > 0,
normals / torch.reshape(length(normals), [-1, 1]),
degenerate_normals)
elif weighting_scheme == 'cotangent':
# Cotangent weighting generates 0-length normal when
# the local surface is planar. Prepare weighted average normal
# computed using Nelson Max's algorithm for those cases.
max_normal = compute_vertex_normal(vertices, indices, 'max')
for i in range(3):
# Loop over each pair of edges sharing the same vertex,
# compute the cotangent and contribute to the third edge.
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
if i == 0:
n = torch.cross(side_a, side_b)
n = torch.where(length(n).reshape(-1, 1).expand(-1, 3) > 0,
n / torch.reshape(length(n), [-1, 1]),
torch.zeros(n.shape, dtype=n.dtype, device=n.device))
# numerically stable angle between two unit direction vectors
# http://www.plunk.org/~hatch/rightway.php
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
cotangent = torch.tensor(1.0) / torch.tan(angle)
v1_index = indices[:, (i + 1) % 3].long().reshape(-1, 1).expand(-1, 3)
v2_index = indices[:, (i + 2) % 3].long().reshape(-1, 1).expand(-1, 3)
contrib = (v2 - v1) * cotangent.reshape([-1, 1])
normals.scatter_add_(0, v1_index, contrib)
normals.scatter_add_(0, v2_index, -contrib)
# Make sure the normals are pointing at the right direction
normals = torch.where(dot(normals, max_normal).reshape(-1, 1).expand(-1, 3) > 0, normals, -normals)
normals = torch.where(length(normals).reshape(-1, 1).expand(-1, 3) > 0.05,
normals / torch.reshape(length(normals), [-1, 1]),
max_normal)
else:
assert False, 'Unknown weighting scheme: {}'.format(weighting_scheme)
assert(torch.isfinite(normals).all())
return normals.contiguous()
def bound_vertices(vertices: torch.Tensor, indices: torch.Tensor):
"""
Calculate the indices of boundary vertices of a mesh
and express it in Tensor form.
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
Returns
=======
bound: torch.Tensor
float32 Tensor with size num_vertices representing vertex normal
bound[i] = 0. if i-th vertices is on boundary of mesh; else 1.
"""
neighbor_sum = torch.zeros(vertices.size(0), device=vertices.device)
for i in range(3):
contrib = indices[:, (i + 2) % 3] - indices[:, (i + 1) % 3]
index = indices[:, i].long()
neighbor_sum.scatter_add_(0, index, contrib.float())
# neighbor_sum[index[i]] += contrib[i]
return torch.where(neighbor_sum == 0,
torch.ones(vertices.size(0), device=vertices.device),
torch.zeros(vertices.size(0), device=vertices.device))
def smooth(vertices: torch.Tensor,
indices: torch.Tensor,
lmd: torch.float32,
weighting_scheme: str = 'reciprocal',
control: torch.Tensor = None):
"""
Update positions of vertices in a mesh. The shift amount of a vertex equals
to lmd times weight sum of all edges to neighbors.
$v_i += lmd * \frac {\sum_{j \in neighbors(i)} w_{ij}(v_j - v_i)} {\sum_{j \in neighbors(i)} w_{ij}}$
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
lmd: torch.float32
step length coefficient
weighting_scheme: str = 'reciprocal'
Different weighting schemes:
'reciprocal': (default)
w[i][j] = 1 / len(v[j] - v[i])
'uniform':
w[i][j] = 1
'cotangent':
w[i][j] = cot(angle(i-m-j)) + cot(angle(i-n-j))
m and n are vertices that form triangles with i and j
control: torch.Tensor
extra coefficient deciding which vertices to be update.
In default case, do not update boundary vertices of the mesh
control (default) = bound_vertices(vertices, indices)
type help(pyredner.bound_vertices)
"""
if control is None:
control = bound_vertices(vertices, indices)
else:
assert control.numel() == vertices.size(0), 'Size of control tensor inconsistent with number of vertices'
def dot(v1, v2):
return torch.sum(v1 * v2, dim=1)
def squared_length(v):
return torch.sum(v * v, dim=1)
def length(v):
return torch.sqrt(squared_length(v))
def safe_asin(v):
# Hack: asin(1)' is infinite, so we want to clamp the contribution
return torch.asin(v.clamp(0, 1 - 1e-6))
total_contrib = torch.zeros(vertices.shape, dtype=torch.float32, device=vertices.device)
total_weight_contrib = torch.zeros(vertices.shape, dtype=torch.float32, device=vertices.device)
v = [vertices[indices[:, 0].long(), :],
vertices[indices[:, 1].long(), :],
vertices[indices[:, 2].long(), :]]
for i in range(3):
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
# XXX: Inefficient but it's PyTorch's limitation
e1e2 = e1_len * e2_len
# contrib is 0 when e1e2 is 0
if weighting_scheme == 'reciprocal':
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
e1 / e1_len.reshape(-1, 1).expand(-1, 3) +
e2 / e2_len.reshape(-1, 1).expand(-1, 3),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
weight_contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
torch.tensor(1.) / e1_len.reshape(-1, 1).expand(-1, 3) +
torch.tensor(1.) / e2_len.reshape(-1, 1).expand(-1, 3),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
total_contrib.scatter_add_(0, index, contrib)
total_weight_contrib.scatter_add_(0, index, weight_contrib)
elif weighting_scheme == 'uniform':
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
e1 + e2,
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
weight_contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
2 * torch.ones(v0.shape, dtype=torch.float32, device=vertices.device),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
total_contrib.scatter_add_(0, index, contrib)
total_weight_contrib.scatter_add_(0, index, weight_contrib)
elif weighting_scheme == 'cotangent':
pass
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
cotangent = torch.tensor(1.0) / torch.tan(angle)
v1_index = indices[:, (i + 1) % 3].long().reshape(-1, 1).expand(-1, 3)
v2_index = indices[:, (i + 2) % 3].long().reshape(-1, 1).expand(-1, 3)
contrib = (v2 - v1) * cotangent.reshape([-1, 1])
weight_contrib = cotangent.reshape([-1, 1]).expand(-1, 3)
total_contrib.scatter_add_(0, v1_index, contrib)
total_contrib.scatter_add_(0, v2_index, -contrib)
total_weight_contrib.scatter_add_(0, v1_index, weight_contrib)
total_weight_contrib.scatter_add_(0, v2_index, weight_contrib)
else:
assert False, 'Unknown weighting_scheme: {}'.format(weighting_scheme)
shift = total_contrib / total_weight_contrib * control.reshape(-1, 1)
vertices.data += shift * lmd
return
def compute_uvs(vertices, indices, print_progress = True):
"""
Compute | |
from collections import defaultdict
from fnmatch import filter
import warnings
import numpy as np
import openmdao.api as om
from openmdao.utils.general_utils import simple_warning
from .pseudospectral_base import PseudospectralBase
from ..common import RadauPSContinuityComp
from ...utils.misc import get_rate_units, get_source_metadata
from ...utils.introspection import get_targets
from ...utils.indexing import get_src_indices_by_row
from ..grid_data import GridData
class Radau(PseudospectralBase):
"""
Radau Pseudospectral Method Transcription.
Parameters
----------
**kwargs : dict
Dictionary of optional arguments.
References
----------
Garg, Divya et al. "Direct Trajectory Optimization and Costate Estimation of General Optimal
Control Problems Using a Radau Pseudospectral Method." American Institute of Aeronautics
and Astronautics, 2009.
"""
def __init__(self, **kwargs):
super(Radau, self).__init__(**kwargs)
self._rhs_source = 'rhs_all'
def init_grid(self):
"""
Setup the GridData object for the Transcription.
"""
self.grid_data = GridData(num_segments=self.options['num_segments'],
transcription='radau-ps',
transcription_order=self.options['order'],
segment_ends=self.options['segment_ends'],
compressed=self.options['compressed'])
def configure_time(self, phase):
"""
Configure the inputs/outputs on the time component.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_time(phase)
options = phase.time_options
# The tuples here are (name, user_specified_targets, dynamic)
for name, usr_tgts, dynamic in [('time', options['targets'], True),
('time_phase', options['time_phase_targets'], True),
('t_initial', options['t_initial_targets'], False),
('t_duration', options['t_duration_targets'], False)]:
targets = get_targets(phase.rhs_all, name=name, user_targets=usr_tgts)
if targets:
src_idxs = self.grid_data.subset_node_indices['all'] if dynamic else None
phase.connect(name, [f'rhs_all.{t}' for t in targets], src_indices=src_idxs)
def configure_controls(self, phase):
"""
Configure the inputs/outputs for the controls.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_controls(phase)
if phase.control_options:
for name, options in phase.control_options.items():
targets = get_targets(ode=phase.rhs_all, name=name,
user_targets=options['targets'])
if targets:
phase.connect(f'control_values:{name}',
[f'rhs_all.{t}' for t in targets])
targets = get_targets(ode=phase.rhs_all, name=f'{name}_rate',
user_targets=options['rate_targets'])
if targets:
phase.connect(f'control_rates:{name}_rate',
[f'rhs_all.{t}' for t in targets])
targets = get_targets(ode=phase.rhs_all, name=f'{name}_rate2',
user_targets=options['rate2_targets'])
if targets:
phase.connect(f'control_rates:{name}_rate2',
[f'rhs_all.{t}' for t in targets])
def configure_polynomial_controls(self, phase):
"""
Configure the inputs/outputs for the polynomial controls.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_polynomial_controls(phase)
for name, options in phase.polynomial_control_options.items():
targets = get_targets(ode=phase.rhs_all, name=name, user_targets=options['targets'])
if targets:
phase.connect(f'polynomial_control_values:{name}',
[f'rhs_all.{t}' for t in targets])
targets = get_targets(ode=phase.rhs_all, name=f'{name}_rate',
user_targets=options['rate_targets'])
if targets:
phase.connect(f'polynomial_control_rates:{name}_rate',
[f'rhs_all.{t}' for t in targets])
targets = get_targets(ode=phase.rhs_all, name=f'{name}_rate2',
user_targets=options['rate2_targets'])
if targets:
phase.connect(f'polynomial_control_rates:{name}_rate2',
[f'rhs_all.{t}' for t in targets])
def setup_ode(self, phase):
"""
Setup the ode for this transcription.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).setup_ode(phase)
ODEClass = phase.options['ode_class']
grid_data = self.grid_data
kwargs = phase.options['ode_init_kwargs']
phase.add_subsystem('rhs_all',
subsys=ODEClass(num_nodes=grid_data.subset_num_nodes['all'],
**kwargs))
def configure_ode(self, phase):
"""
Create connections to the introspected states.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_ode(phase)
grid_data = self.grid_data
map_input_indices_to_disc = grid_data.input_maps['state_input_to_disc']
for name, options in phase.state_options.items():
targets = get_targets(ode=phase.rhs_all, name=name, user_targets=options['targets'])
if targets:
phase.connect('states:{0}'.format(name),
['rhs_all.{0}'.format(tgt) for tgt in targets],
src_indices=om.slicer[map_input_indices_to_disc, ...])
def setup_defects(self, phase):
"""
Create the continuity_comp to house the defects.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).setup_defects(phase)
grid_data = self.grid_data
if grid_data.num_segments > 1:
phase.add_subsystem('continuity_comp',
RadauPSContinuityComp(grid_data=grid_data,
state_options=phase.state_options,
control_options=phase.control_options,
time_units=phase.time_options['units']),
promotes_inputs=['t_duration'])
def configure_defects(self, phase):
"""
Configure the continuity_comp and connect the collocation constraints.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_defects(phase)
grid_data = self.grid_data
if grid_data.num_segments > 1:
phase.continuity_comp.configure_io()
for name, options in phase.state_options.items():
phase.connect('state_interp.staterate_col:{0}'.format(name),
'collocation_constraint.f_approx:{0}'.format(name))
rate_src = options['rate_source']
if rate_src in phase.parameter_options:
# If the rate source is a parameter, which is an input, we need to promote
# f_computed to the parameter name instead of connecting to it.
shape = phase.parameter_options[rate_src]['shape']
param_size = np.prod(shape)
ncn = self.grid_data.subset_num_nodes['col']
src_idxs = np.tile(np.arange(0, param_size, dtype=int), ncn)
src_idxs = np.reshape(src_idxs, (ncn,) + shape)
phase.promotes('collocation_constraint', inputs=[(f'f_computed:{name}', f'parameters:{rate_src}')],
src_indices=src_idxs, flat_src_indices=True, src_shape=shape)
else:
rate_src_path, src_idxs = self.get_rate_source_path(name, 'col', phase)
phase.connect(rate_src_path,
'collocation_constraint.f_computed:{0}'.format(name),
src_indices=src_idxs)
def configure_path_constraints(self, phase):
"""
Handle the common operations for configuration of the path constraints.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
super(Radau, self).configure_path_constraints(phase)
gd = self.grid_data
for var, options in phase._path_constraints.items():
constraint_kwargs = options.copy()
con_name = constraint_kwargs.pop('constraint_name')
src_idxs = None
flat_src_idxs = False
# Determine the path to the variable which we will be constraining
# This is more complicated for path constraints since, for instance,
# a single state variable has two sources which must be connected to
# the path component.
var_type = phase.classify_var(var)
if var_type == 'time':
src = 'time'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'time_phase':
src = 'time_phase'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'state':
state_shape = phase.state_options[var]['shape']
src_idxs = get_src_indices_by_row(gd.input_maps['state_input_to_disc'], state_shape)
flat_src_idxs = True
src = f'states:{var}'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'indep_control':
src = f'control_values:{var}'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'input_control':
src = 'control_values:{0}'.format(var)
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'indep_polynomial_control':
src = f'polynomial_control_values:{var}'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'input_polynomial_control':
src = f'polynomial_control_values:{var}'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'control_rate':
control_name = var[:-5]
src = f'control_rates:{control_name}_rate'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'control_rate2':
control_name = var[:-6]
src = f'control_rates:{control_name}_rate2'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'polynomial_control_rate':
control_name = var[:-5]
src = f'polynomial_control_rates:{control_name}_rate'
tgt = f'path_constraints.all_values:{con_name}'
elif var_type == 'polynomial_control_rate2':
control_name = var[:-6]
src = f'polynomial_control_rates:{control_name}_rate2'
tgt = f'path_constraints.all_values:{con_name}'
else:
# Failed to find variable, assume it is in the ODE
src = f'rhs_all.{var}'
tgt = f'path_constraints.all_values:{con_name}'
phase.connect(src_name=src, tgt_name=tgt,
src_indices=src_idxs, flat_src_indices=flat_src_idxs)
def configure_timeseries_outputs(self, phase):
"""
Create connections from time series to all post-introspection sources.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
gd = self.grid_data
time_units = phase.time_options['units']
for timeseries_name in phase._timeseries:
timeseries_comp = phase._get_subsystem(timeseries_name)
timeseries_comp._add_output_configure('time',
shape=(1,),
units=time_units,
desc='',
src='time')
timeseries_comp._add_output_configure('time_phase',
shape=(1,),
units=time_units,
desc='',
src='time_pahse')
phase.connect(src_name='time', tgt_name=f'{timeseries_name}.input_values:time')
phase.connect(src_name='time_phase', tgt_name=f'{timeseries_name}.input_values:time_phase')
for state_name, options in phase.state_options.items():
added_src = timeseries_comp._add_output_configure(f'states:{state_name}',
shape=options['shape'],
units=options['units'],
desc=options['desc'],
src=f'states:{state_name}')
if added_src:
src_rows = gd.input_maps['state_input_to_disc']
phase.connect(src_name=f'states:{state_name}',
tgt_name=f'{timeseries_name}.input_values:states:{state_name}',
src_indices=om.slicer[src_rows, ...])
rate_src = options['rate_source']
if rate_src in phase.parameter_options:
# If the rate source is a parameter, which is an input, we need to promote
# the state rates input value to the parameter name instead of connecting to it.
nn = self.grid_data.subset_num_nodes['all']
shape = phase.parameter_options[rate_src]['shape']
param_size = np.prod(shape)
src_idxs = np.tile(np.arange(0, param_size, dtype=int), nn)
src_idxs = np.reshape(src_idxs, (nn,) + shape)
rate_src_path = f'parameters:{rate_src}'
added_src = timeseries_comp._add_output_configure(f'state_rates:{state_name}',
shape=options['shape'],
units=get_rate_units(
options['units'],
time_units),
desc=f'rate of state {state_name}',
src=rate_src_path)
if added_src:
phase.promotes(f'{timeseries_name}',
inputs=[(f'input_values:state_rates:{state_name}',
f'parameters:{rate_src}')],
src_indices=src_idxs, flat_src_indices=True, src_shape=shape)
else:
rate_src_path, src_idxs = self.get_rate_source_path(state_name, 'all', phase)
added_src = timeseries_comp._add_output_configure(f'state_rates:{state_name}',
shape=options['shape'],
units=get_rate_units(
options['units'],
time_units),
desc=f'rate of state {state_name}',
src=rate_src_path)
if added_src:
phase.connect(src_name=rate_src_path,
tgt_name=f'{timeseries_name}.input_values:state_rates:{state_name}',
src_indices=src_idxs)
for control_name, options in phase.control_options.items():
control_units = options['units']
src_rows = gd.subset_node_indices['all']
src_idxs = get_src_indices_by_row(src_rows, options['shape'])
added_src = timeseries_comp._add_output_configure(f'controls:{control_name}',
shape=options['shape'],
units=control_units,
desc=options['desc'],
src=f'control_values:{control_name}')
if added_src:
phase.connect(src_name=f'control_values:{control_name}',
tgt_name=f'{timeseries_name}.input_values:controls:{control_name}',
src_indices=src_idxs, flat_src_indices=True)
# Control rates
added_src = timeseries_comp._add_output_configure(f'control_rates:{control_name}_rate',
shape=options['shape'],
units=get_rate_units(control_units,
time_units, deriv=1),
desc=f'first time-derivative of {control_name}',
src=f'control_rates:{control_name}_rate')
if added_src:
phase.connect(src_name=f'control_rates:{control_name}_rate',
tgt_name=f'{timeseries_name}.input_values:control_rates:{control_name}_rate')
# Control second derivatives
added_src = timeseries_comp._add_output_configure(f'control_rates:{control_name}_rate2',
shape=options['shape'],
units=get_rate_units(control_units,
time_units, deriv=2),
desc=f'second time-derivative of {control_name}',
src=f'control_rates:{control_name}_rate2')
if added_src:
phase.connect(src_name=f'control_rates:{control_name}_rate2',
tgt_name=f'{timeseries_name}.input_values:control_rates:{control_name}_rate2')
for control_name, options in phase.polynomial_control_options.items():
src_rows = gd.subset_node_indices['all']
src_idxs = get_src_indices_by_row(src_rows, options['shape'])
control_units = options['units']
# Control values
added_src = timeseries_comp._add_output_configure(f'polynomial_controls:{control_name}',
shape=options['shape'],
units=control_units,
desc=options['desc'],
src=f'polynomial_control_values:{control_name}')
if added_src:
phase.connect(src_name=f'polynomial_control_values:{control_name}',
tgt_name=f'{timeseries_name}.input_values:polynomial_controls:{control_name}',
src_indices=src_idxs, flat_src_indices=True)
# Control rates
added_src = timeseries_comp._add_output_configure(f'polynomial_control_rates:{control_name}_rate',
shape=options['shape'],
units=get_rate_units(control_units,
time_units, deriv=1),
desc=f'first time-derivative of {control_name}',
src=f'polynomial_control_rates:{control_name}_rate')
if added_src:
phase.connect(src_name=f'polynomial_control_rates:{control_name}_rate',
tgt_name=f'{timeseries_name}.input_values:polynomial_control_rates:{control_name}_rate')
# Control second derivatives
added_src = timeseries_comp._add_output_configure(f'polynomial_control_rates:{control_name}_rate2',
shape=options['shape'],
units=get_rate_units(control_units,
time_units, deriv=2),
desc=f'second time-derivative of {control_name}',
src=f'polynomial_control_rates:{control_name}_rate2')
if added_src:
phase.connect(src_name=f'polynomial_control_rates:{control_name}_rate2',
tgt_name=f'{timeseries_name}.input_values:polynomial_control_rates:{control_name}_rate2')
for param_name, options in phase.parameter_options.items():
if options['include_timeseries']:
prom_name = f'parameters:{param_name}'
tgt_name = f'input_values:parameters:{param_name}'
# Add output.
timeseries_comp = phase._get_subsystem(timeseries_name)
added_src = timeseries_comp._add_output_configure(prom_name,
desc='',
shape=options['shape'],
units=options['units'],
src=prom_name)
if added_src:
src_idxs_raw = np.zeros(gd.subset_num_nodes['all'], dtype=int)
src_idxs = get_src_indices_by_row(src_idxs_raw, options['shape'])
phase.promotes(timeseries_name, inputs=[(tgt_name, prom_name)],
src_indices=src_idxs, flat_src_indices=True)
for var, options in phase._timeseries[timeseries_name]['outputs'].items():
output_name = options['output_name']
units = options.get('units', None)
wildcard_units = options.get('wildcard_units', None)
if '*' in var: # match outputs from the ODE
ode_outputs = {opts['prom_name']: opts for (k, opts) in
phase.rhs_all.get_io_metadata(iotypes=('output',)).items()}
matches = filter(list(ode_outputs.keys()), var)
# A nested ODE can have multiple outputs at different levels that share
# the same name.
# If the user does not use the output_name option to add_timeseries_output
# to disambiguate the variables with the same name, only one of the
# variables will be added. This code warns the user if that is happening.
| |
<filename>GNN/composite_graph_class.py
# coding=utf-8
import sys
import numpy as np
import tensorflow as tf
from scipy.sparse import coo_matrix
from GNN.graph_class import GraphObject, GraphTensor
#######################################################################################################################
## COMPOSITE GRAPH OBJECT CLASS #######################################################################################
#######################################################################################################################
class CompositeGraphObject(GraphObject):
""" Heterogeneous Graph data representation. Composite GNNs are based on this class. """
## CONSTRUCTORS METHODs ###########################################################################################
def __init__(self, nodes, arcs, targets, type_mask, dim_node_label, *args, **kwargs):
""" CONSTRUCTOR METHOD
:param nodes: Ordered Nodes Matrix X where nodes[i, :] = [i-th node Label].
:param arcs: Ordered Arcs Matrix E where arcs[i, :] = [From ID Node | To ID Node | i-th arc Label].
:param targets: Targets Matrix T with shape (Num of arcs/node targeted example or 1, dim_target example).
:param type_mask: boolean np.array with shape (Num of nodes, Num of node's types). type_mask[:,i] refers to dim_node_label[i].
:param dim_node_label: (list/tuple) with len == Num of node's types. i-th element defines label dimension of nodes of type i.
:param focus: (str) The problem on which graph is used: 'a' arcs-focused, 'g' graph-focused, 'n' node-focused.
:param set_mask: Array of boolean {0,1} to define arcs/nodes belonging to a set, when dataset == single GraphObject.
:param output_mask: Array of boolean {0,1} to define the sub-set of arcs/nodes whose target is known.
:param sample_weight: target sample weight for loss computation. It can be int, float or numpy.array of ints or floats:
> If int or float, all targets are weighted as sample_weight * ones.
> If numpy.array, len(sample_weight) and targets.shape[0] must agree.
:param ArcNode: Sparse matrix of shape (num_of_arcs, num_of_nodes) s.t. A[i,j]=value if arc[i,2]==node[j].
:param NodeGraph: Sparse matrix in coo format of shape (nodes.shape[0], {Num graphs or 1}) used only when focus=='g'.
:param aggregation_mode: (str) The aggregation mode for the incoming message based on ArcNode and Adjacency matrices:
---> elem(matrix)={0-1};
> 'average': A'X gives the average of incoming messages, s.t. sum(A[:,i])==1;
> 'normalized': A'X gives the normalized message wrt the total number of g.nodes, s.t. sum(A)==1;
> 'sum': A'X gives the total sum of incoming messages, s.t. A={0,1}.
> 'composite_average': A'X gives the average of incoming messages wrt node's type, s.t. sum(A[:,i])>=1. """
# type_mask[:,i] refers to nodes with DIM_NODE_LABEL[i] label dimension.
# Be careful when initializing a new graph!
self.type_mask = type_mask.astype(bool)
# AFTER initializing type_mask because of self.buildAdjacency method.
super().__init__(nodes, arcs, targets, *args, **kwargs)
# store dimensions: first two columns of arcs contain nodes indices.
self.DIM_NODE_LABEL = np.array(dim_node_label, ndmin=1, dtype=int)
# build Composite Adjacency Matrices. It is a list of Adjacency Matrix as long as the number of nodes' types.
# i-th element corresponds to a composite matrix where only nodes' type 'i' is considered.
# ADJ[k][i,j]=value if and only if an edge (i,j) exists AND node_type(i) == k.
self.CompositeAdjacencies = self.buildCompositeAdjacency()
# -----------------------------------------------------------------------------------------------------------------
def buildCompositeAdjacency(self):
""" Build a list ADJ of Composite Aggregated Adjacency Matrices,
s.t. ADJ[t][i,j]=value if an edge (i,j) exists AND type(i)==k.
:return: list of sparse Matrices in coo format, for memory efficiency. One for each node's type. """
composite_adjacencies = [self.Adjacency.copy() for _ in range(len(self.DIM_NODE_LABEL))]
# set to 0 rows of nodes of incorrect type.
for t, a in zip(self.type_mask.transpose(), composite_adjacencies):
not_type_node_mask = np.in1d(self.arcs[:, 0], np.argwhere(t), invert=True)
a.data[not_type_node_mask] = 0
a.eliminate_zeros()
return composite_adjacencies
# -----------------------------------------------------------------------------------------------------------------
def buildArcNode(self, aggregation_mode):
""" Build ArcNode Matrix A of shape (number_of_arcs, number_of_nodes) where A[i,j]=value if arc[i,2]==node[j].
Compute the matmul(m:=message,A) to get the incoming message on each node, composed of nodes' states and arcs' labels.
:return: sparse ArcNode Matrix in coo format, for memory efficiency.
:raise: Error if <aggregation_mode> is not in ['average', 'sum', 'normalized', 'composite_average']."""
if aggregation_mode not in ['normalized', 'average', 'sum', 'composite_average']: raise ValueError("ERROR: Unknown aggregation mode")
# initialize matrix. It's useless, just for not having any warning message at the end of the method.
matrix = None
# exploit super function.
if aggregation_mode in ['normalized', 'average', 'sum']:
matrix = super().buildArcNode(aggregation_mode)
# composite average node aggregation - incoming message as sum of averaged type-focused neighbors state,
# e.g. if a node i has 3 neighbors (2 of them belonging to a type k1, the other to a type k2):
# the message coming from k1's nodes is divided by 2,
# while the message coming from k2's node is taken as is, being that the only one neighbor belonging to k2.
elif aggregation_mode == 'composite_average':
# sum node aggregation - incoming message as sum of neighbors states and labels, then process composite average.
matrix = super().buildArcNode('sum')
# set to 0 rows of nodes of incorrect type.
for t in self.type_mask.transpose():
if not np.any(t): continue
type_node_mask = np.in1d(self.arcs[:, 0], np.argwhere(t), invert=False)
val, col_index, destination_node_counts = np.unique(matrix.col[type_node_mask], return_inverse=True, return_counts=True)
matrix.data[type_node_mask] /= destination_node_counts[col_index]
return matrix
# -----------------------------------------------------------------------------------------------------------------
def copy(self):
""" COPY METHOD
:return: a Deep Copy of the GraphObject instance. """
return CompositeGraphObject(arcs=self.getArcs(), nodes=self.getNodes(), targets=self.getTargets(),
set_mask=self.getSetMask(), output_mask=self.getOutputMask(),
sample_weight=self.getSampleWeights(), NodeGraph=self.getNodeGraph(),
aggregation_mode=self.aggregation_mode, dim_node_label=self.DIM_NODE_LABEL,
type_mask=self.getTypeMask())
## REPRESENTATION METHODs #########################################################################################
def __repr__(self):
""" Representation string of the instance of CompositeGraphObject. """
return f"composite_{super().__repr__()}"
## SETTERS ########################################################################################################
def setAggregation(self, aggregation_mode: str):
""" Set ArcNode values for the specified :param aggregation_mode: """
super().setAggregation(aggregation_mode)
self.CompositeAdjacencies = self.buildCompositeAdjacency()
## GETTERS ########################################################################################################
# ALL return a deep copy of the corresponding element.
def getTypeMask(self):
return self.type_mask.copy()
## SAVER METHODs ##################################################################################################
def get_dict_data(self):
""" Return all useful elements for storing a graph :param g:, in a dict format. """
data = super().get_dict_data()
data['type_mask'] = self.type_mask
data['dim_node_label'] = self.DIM_NODE_LABEL
return data
## CLASS METHODs ### MERGER #######################################################################################
@classmethod
def merge(cls, glist, focus: str, aggregation_mode: str, dtype='float32'):
""" Method to merge a list of CompositeGraphObject elements in a single GraphObject element.
:param glist: list of CompositeGraphObject elements to be merged.
> NOTE if focus=='g', new NodeGraph will have dimension (Num nodes, Num graphs).
:param aggregation_mode: (str) incoming message aggregation mode. See BuildArcNode for details.
:param dtype: dtype of elements of new arrays after merging procedure.
:return: a new CompositeGraphObject containing all the information (nodes, arcs, targets, ...) in glist. """
# get new GraphObject, then convert to CompositeGraphObject.
g = super().merge(glist, focus, 'sum', dtype)
dim_node_label, type_mask = zip(*[(i.DIM_NODE_LABEL, i.getTypeMask()) for i in glist])
# check if every graphs has the same DIM_NODE_LABEL attribute.
dim_node_label = set(tuple(i) for i in dim_node_label)
assert len(dim_node_label) == 1, "DIM_NODE_LABEL not unique among graphs in :param glist:"
# get single matrices for new graph.
type_mask = np.concatenate(type_mask, axis=0, dtype=bool)
# resulting CompositeGraphObject.
return CompositeGraphObject(arcs=g.arcs, nodes=g.nodes, targets=g.targets, type_mask=type_mask,
dim_node_label=dim_node_label.pop(), focus=focus,
set_mask=g.set_mask, output_mask=g.output_mask, sample_weight=g.sample_weight,
NodeGraph=g.NodeGraph, aggregation_mode=aggregation_mode)
## CLASS METHODs ### UTILS ########################################################################################
@classmethod
def fromGraphTensor(cls, g, focus: str):
""" Create CompositeGraphObject from CompositeGraphTensor.
:param g: a CompositeGraphTensor element to be translated into a CompositeGraphObject element.
:param focus: (str) 'n' node-focused; 'a' arc-focused; 'g' graph-focused. See __init__ for details.
:return: a CompositeGraphObject element whose tensor representation is g.
"""
nodegraph = coo_matrix((g.NodeGraph.values, tf.transpose(g.NodeGraph.indices))) if focus == 'g' else None
return cls(arcs=g.arcs.numpy(), nodes=g.nodes.numpy(), targets=g.targets.numpy(),
dim_node_label=g.DIM_NODE_LABEL.numpy(), type_mask=g.type_mask, set_mask=g.set_mask.numpy(),
output_mask=g.output_mask.numpy(), sample_weight=g.sample_weight.numpy(), NodeGraph=nodegraph,
aggregation_mode=g.aggregation_mode, focus=focus)
#######################################################################################################################
## COMPOSITE GRAPH TENSOR CLASS #######################################################################################
#######################################################################################################################
class CompositeGraphTensor(GraphTensor):
""" Tensor version of a CompositeGraphObject. Useful to speed up learning processes. """
## CONSTRUCTORS METHODs ###########################################################################################
def __init__(self, *args, type_mask, CompositeAdjacencies, **kwargs):
""" It contains all information to be passed to GNN model,
but described with tensorflow dense/sparse tensors. """
super().__init__(*args, **kwargs)
# constant tensors + sparse tensors.
self.type_mask = tf.constant(type_mask, dtype=bool)
self.CompositeAdjacencies = [tf.sparse.SparseTensor.from_value(i) for i in CompositeAdjacencies]
# -----------------------------------------------------------------------------------------------------------------
def copy(self):
""" COPY METHOD
:return: a Deep Copy of the CompositeGraphTensor instance. """
return CompositeGraphTensor(nodes=self.nodes, dim_node_label=self.DIM_NODE_LABEL, arcs=self.arcs,
targets=self.targets, set_mask=self.set_mask, output_mask=self.output_mask,
sample_weight=self.sample_weight, Adjacency=self.Adjacency, ArcNode=self.ArcNode,
NodeGraph=self.NodeGraph, aggregation_mode=self.aggregation_mode,
type_mask=self.type_mask, CompositeAdjacencies=self.CompositeAdjacencies)
## REPRESENTATION METHODs #########################################################################################
def __repr__(self):
""" Representation string for the instance of CompositeGraphTensor. """
return f"composite_{super().__repr__()}"
## STATIC METHODs ### SAVER #######################################################################################
@staticmethod
def save_graph(graph_path: str, g, compressed: bool = False, **kwargs) -> None:
""" Save a graph in a .npz compressed/uncompressed archive.
:param graph_npz_path: path where a single .npz file will be stored, for saving the graph.
:param g: graph of type GraphObject to be saved.
:param compressed: bool, if True graph will be stored in a compressed npz file, npz uncompressed otherwise.
:param kwargs: kwargs | |
'google.cm',
'google.co.bw',
'google.co.cr',
'google.co.ke',
'google.co.ls',
'google.co.mz',
'google.co.tz',
'google.co.ug',
'google.co.uz',
'google.co.zm',
'google.co.zw',
'google.cv',
'google.dj',
'google.dm',
'google.ee',
'google.ga',
'google.ge',
'google.gg',
'google.gl',
'google.gm',
'google.gp',
'google.gy',
'google.hn',
'google.ht',
'google.im',
'google.in',
'google.io',
'google.iq',
'google.is',
'google.je',
'google.jo',
'google.jp',
'google.kg',
'google.la',
'google.li',
'google.lu',
'google.lv',
'google.md',
'google.me',
'google.mg',
'google.mk',
'google.ml',
'google.mn',
'google.mu',
'google.mv',
'google.mw',
'google.ne',
'google.ng',
'google.org',
'google.ps',
'google.rw',
'google.sc',
'google.si',
'google.sm',
'google.sn',
'google.so',
'google.sr',
'google.st',
'google.td',
'google.tg',
'google.tk',
'google.tl',
'google.tm',
'google.tn',
'google.to',
'google.tt',
'google.tw',
'google.vg',
'google.ws',
'hao123.cn',
'imdb.cn',
'imdb.in.net',
'imdb.yt',
'instagram.fr',
'instagram.org.kz',
'instagram.ru',
'jd.co.th',
'jd.hk',
'jd.id',
'jd.ru',
'linkedin.cn',
'linkedin.github.io',
'linkedin.ie',
'live.ci',
'live.digital',
'live.ge',
'live.is',
'live.ne',
'login.az',
'login.bio',
'login.com.br',
'login.gov',
'login.gov.il',
'login.gov.mo',
'login.gov.pl',
'login.gs',
'login.hu',
'login.org',
'login.squarespace.com',
'mail.3dcartstores.com',
'mail.az',
'mail.be',
'mail.bg',
'mail.ch',
'mail.cloudaccess.net',
'mail.codes',
'mail.com.tr',
'mail.de',
'mail.edu.az',
'mail.edu.tw',
'mail.ee',
'mail.fr',
'mail.go.th',
'mail.gouv.sn',
'mail.gov.af',
'mail.gov.az',
'mail.gov.in',
'mail.gov.me',
'mail.gov.mg',
'mail.gov.ua',
'mail.kz',
'mail.mil',
'mail.nic.in',
'mail.pt',
'mail.uol.com.br',
'microsoft.co.il',
'microsoft.github.io',
'microsoft.net',
'msn.cn',
'msn.de',
'msn.it',
'msn.net',
'msn.se',
'naver.me',
'naver.net',
'office.info.pl',
'paypal.github.io',
'paypal.it',
'paypal.me',
'pinterest.at',
'pinterest.ch',
'pinterest.cl',
'pinterest.co.kr',
'pinterest.dk',
'pinterest.ie',
'pinterest.it',
'pinterest.nz',
'pinterest.ph',
'pinterest.pt',
'pinterest.se',
'pixnet.cc',
'pixnet.io',
'pixnet.systems',
'pixnet.tw',
'pixnet.work',
'qq.by',
'qq.co.kr',
'qq.com.tr',
'reddit.guide',
'salesforce.org',
'sina.cn',
'sina.com.hk',
'sina.com.tw',
'sina.lt',
'sina.net',
'stackoverflow.blog',
't.edu.pk',
't.ks.ua',
't.mk',
't.tools',
'taobao.cn',
'taobao.ge',
'taobao.net',
'taobao.org',
'taobao.ua',
'tmall.com.pl',
'tmall.hk',
'tokopedia.id',
'twitch.center',
'twitter.com.br',
'twitter.github.io',
'twitter.it',
'whatsapp.net',
'wikipedia.de',
'wikipedia.it',
'wikipedia.ma',
'wikipedia.nl',
'wikipedia.nom.al',
'wikipedia.nom.cl',
'wikipedia.nom.si',
'wikipedia.readthedocs.io',
'wikipedia.ru',
'wordpress.co.kr',
'wordpress.net',
'wordpress.tv',
'wordpress.video.hu',
'yahoo.co.uk',
'yahoo.net',
'yandex.az',
'yandex.com.ge',
'yandex.ee',
'yandex.fr',
'yandex.md',
'yandex.net.cn',
'yandex.org.kz',
'yandex.tm',
'yandex.uk.com',
'yandex.uz',
'youtube.be',
'youtube.co.kr',
'youtube.nl',
'1stbk.com',
'1stfed.com',
'2mm.com',
'aab.de',
'aba.ad',
'abanka.si',
'abb.com.lb',
'abbey.com',
'abc.com.jo',
'abnamro.cl',
'abnamro.dk',
'abnamro.se',
'abocn.com',
'absa.co.za',
'acba.am',
'accbank.ie',
'adb.org',
'aeb.am',
'aebank.com',
'aekthun.ch',
'ahli.com',
'aib.ie',
'aibiom.com',
'aibny.com',
'ajb.tel.hr',
'ajib.com',
'akb.ch',
'aktia.fi',
'aku.lt',
'alahli.com',
'alal.ru',
'albank.com',
'allbank.de',
'alpha.gr',
'amb.co.za',
'amb.kz',
'ambkct.com',
'ambro.it',
'amcore.com',
'amibank.ru',
'anb.com',
'anb.com.sa',
'anbnet.com',
'anelik.am',
'anhyp.be',
'anz.co.jp',
'anz.com',
'anz.com.au',
'aob.ag',
'aob.hr',
'apbank.com',
'apobank.de',
'arab.net',
'arcanet.it',
'arcc.or.ke',
'arctic.net',
'armdb.com',
'arsenal.ru',
'arvest.com',
'asb.am',
'ashib.am',
'ask.at',
'atb.com',
'ate.gr',
'aval.dp.ua',
'aval.lg.ua',
'ba-ca.com',
'ba-ca.pl',
'baca.sk',
'bacob.be',
'banco7.pt',
'banesto.es',
'banif.pt',
'bank24.de',
'bank.lv',
'banka.hr',
'bankev.com',
'bankov.com',
'bankri.com',
'banksf.com',
'bankwi.com',
'barcap.com',
'bas.com.br',
'basl.sk',
'bawag.com',
'bay.co.th',
'bbandt.com',
'bbbank.de',
'bbk.es',
'bbl.be',
'bbl.co.th',
'bbr.ro',
'bbsas.no',
'bbt.com',
'bbv.es',
'bbva.es',
'bca.co.id',
'bcb.gov.bo',
'bcb.gov.br',
'bcc.cd',
'bccr.fi.cr',
'bce.fin.ec',
'bceao.int',
'bcee.lu',
'bch.hn',
'bci.ao',
'bci.cl',
'bci.it',
'bci.nc',
'bcibank.ca',
'bcie.hn',
'bcl.lu',
'bcn.gob.ni',
'bconce.cl',
'bcp.com.pe',
'bcp.gov.py',
'bcp.pt',
'bcr.gob.sv',
'bcr.ro',
'bct.gov.tn',
'bcu.gub.uy',
'bcv.ch',
'bcv.org.ve',
'bcz.com.pl',
'bdc.ca',
'bde.es',
'bdf.com.ni',
'bdk.pl',
'bdl.dz',
'bdl.gov.lb',
'bdn.co.id',
'bec.ch',
'bec.com.br',
'becu.org',
'bekbnet.ch',
'ben.com.pl',
'bes.pt',
'besa.ao',
'bfa.ao',
'bfcc.fr',
'bfg.de',
'bfg.fr',
'bgk.com.pl',
'bgl.lu',
'bh.com.pl',
'bhbauer.de',
'bhe.ch',
'bhif.cl',
'bhu.net',
'bhw.de',
'bibank.com',
'bibm.ad',
'bic.com.co',
'bic.pt',
'bice.cl',
'bicis.sn',
'big.com.pl',
'bil.lu',
'bipan.com',
'birel.ro',
'bis.org',
'bise.pl',
'biv.com.ve',
'bkam.ma',
'bkb-br.com',
'bkbank.com',
'bkm.de',
'bkme.com',
'bkme.net',
'bks.at',
'blb.de',
'blf.com.lb',
'blx.com',
'bma.bm',
'bma.gov.bh',
'bmb.co.th',
'bmi.gob.sv',
'bmwbank.de',
'bna.ao',
'bna.com.ar',
'bnb.be',
'bnb.bg',
'bnb.com.bo',
'bnb.gov.br',
'bnbank.com',
'bnbank.no',
'bnc.ca',
'bncr.fi.cr',
'bng.nl',
'bni.co.id',
'bnl.com.ar',
'bnl.it',
'bnm.gov.my',
'bnm.org',
'bnp.ch',
'bnp.com.ar',
'bnp.fr',
'bnro.ro',
'bnt.com',
'bnu.com.mo',
'bnu.pt',
'bnz.co.nz',
'boc.cn',
'bof.fi',
'bofm.com',
'bofp.com',
'bog.gov.gh',
'boh.cl',
'boh.com',
'boh.nc',
'boi.ie',
'boj.com.jo',
'boj.or.jp',
'boj.org.jm',
'bok.or.kr',
'bokf.com',
'bomcm.com',
'bon.com.na',
'boq.com.au',
'borel.com',
'bos.pol.pl',
'bot-tz.org',
'bot.com.tw',
'bot.or.th',
'botc.com',
'bov.com',
'bowc.com',
'boy.co.jp',
'boz.zm',
'bpa.ad',
'bpam.it',
'bpb.it',
'bpba.com',
'bpc.ao',
'bpel.it',
'bph.pl',
'bpi.com.ph',
'bpn.com.ar',
'bpnord.fr',
'bpv.it',
'brb.com.lb',
'brd.ro',
'bred.fr',
'brunet.bn',
'bsa.ad',
'bsa.cl',
'bsch.es',
'bsct.ch',
'bsi.ch',
'bsi.si',
'bsk.com.pl',
'bsn.com.my',
'bsnb.com',
'bsp.gov.ph',
'bsvnet.com',
'bta.am',
'bta.pt',
'btm.co.jp',
'btr.ro',
'btrl.ro',
'bulbank.bg',
'busey.com',
'bvcc.com',
'bve.ch',
'bve.li',
'bw-bank.de',
'byd.top.pl',
'calfed.com',
'calyon.com',
'capfed.com',
'carime.it',
'cariplo.it',
'cashbox.de',
'cba.am',
'cbc.gov.tw',
'cbd.co.ae',
'cbe.be',
'cbg.gm',
'cbj.gov.jo',
'cbk.co.kr',
'cbk.gov.kw',
'cbnk.com',
'cbnv.com',
'cbny.com',
'cbq.com.qa',
'cbr.ru',
'cbs.gov.ws',
'cbsl.lk',
'cbtco.com',
'cbtks.com',
'cbtn.com',
'cbu.uz',
'cc-bank.de',
'ccb.ai',
'ccb.com',
'ccbanc.com',
'ccbg.com',
'ccbt.com',
'ccf.com.lb',
'ccf.fr',
'ccow.com',
'cdm.co.ma',
'cdn.fr',
'ceca.es',
'cef.gov.br',
'cfa.com.ar',
'cfb.com',
'cfb.com.hk',
'cfbx.com',
'cfs1.com',
'cgd.pt',
'chas.ru',
'chat.ru',
'chb.co.kr',
'ci.re.ru',
'cibc.com',
'cibeg.com',
'cisf.pt',
'citynb.com',
'citynj.com',
'ckbanka.hr',
'clf.fr',
'cmb.mc',
'cmbnv.com',
'cnb-ok.com',
'cnb.com',
'cnb.cz',
'cnbtxk.com',
'cnma.dz',
'consors.de',
'conto.ru',
'coopcb.com',
'cpb.net',
'cpbank.com',
'cpbi.com',
'cpp.pt',
'cpr.fr',
'crcarpi.it',
'creberg.it',
'crimola.it',
'crup.it',
'csbchx.com',
'csbi.com',
'csbpal.com',
'csbtc.com',
'csbth.com',
'csbtx.com',
'csfb.com',
'csob.cz',
'csob.sk',
'ctbbank.de',
'ctbi.com',
'ctbnk.com',
'cua.com.au',
'cwbank.com',
'db-nm.si',
'db.com',
'dbs.com.sg',
'dbsa.org',
'dbtc.ch',
'dbz.co.zm',
'dbz.hr',
'dcbank.com',
'dcbl.com',
'ddb.dk',
'deanza.com',
'depfa.de',
'dexia.com',
'dfsc.com',
'dgb.com',
'dgbank.de',
'diel.it',
'diraba.de',
'dirs.com',
'djem.com',
'dkb.co.jp',
'dkbca.com',
'dnb.nl',
'dnb.no',
'dnib.com',
'domus.us',
'dopb.sk',
'dta.de',
'dwinc.com',
'dzbank.de',
'eab.com',
'ebil.co.ae',
'ebrd.com',
'ebs.ie',
'ebsb.com',
'ebtc.com',
'ecb.eu',
'ecb.int',
'ecitic.com',
'eeib.ru',
'efsb.com',
'egs.com.tr',
'eib.org',
'enbpb.com',
'eqsb.com',
'esb.ee',
'esva.net',
'etb.com.ng',
'etba.gr',
'ethniki.gr',
'evb.ee',
'evbank.com',
'exim.go.th',
'exim.gov',
'eyp.ee',
'factorb.si',
'fanb.com',
'fbandt.com',
'fbbh.com',
'fbic.com',
'fbnmb.com',
'fboc.com',
'fcb.co.jp',
'fcbcf.com',
'fcbi.com',
'fcbinc.com',
'fcbok.com',
'fcbsc.com',
'fcnb.com',
'fctc.com',
'febtc.com',
'fedone.com',
'ffb1.com',
'ffb.com',
'ffbh.com',
'fibanc.es',
'fibank.bg',
'fibi.co.il',
'ficohsa.hn',
'fidfed.com',
'fitd.it',
'fjsb.com',
'fleet.com',
'fmb.net',
'bancoamazonia.com.br',
'bradesconetempresa.b.br',
'bradesconetempresa.com.br',
'santandernet.com.br',
'grupobci.com.br',
'hsbc.com.br',
'fmbank.com',
'fnb-sf.com',
'fnb-tc.com',
'fnb.net',
'fnbada.com',
'fnbb.com',
'fnbba.com',
'fnbbh.com',
'fnbfs.com',
'fnbgar.com',
'fnbimk.com',
'fnblg.com',
'fnbm.com',
'fnbmag.com',
'fnbmbg.com',
'fnbmc.com',
'fnbmd.com',
'fnbmwc.com',
'fnbn.com',
'fnbnc.com',
'fnbnd.com',
'fnbneg.com',
'fnbnet.com',
'fnbnet.net',
'fnbnow.com',
'fnboa.com',
'fnbrf.com',
'fnbsf.com',
'fnbt.com',
'fnbtc.com',
'fnbweb.com',
'fncb.com',
'fnfg.com',
'fokus.no',
'forex.ee',
'fortis.com',
'frbchi.org',
'frbsf.org',
'friba.nl',
'frs-l.com',
'fsbfsb.com',
'fsbme.com',
'fsbnh.com',
'fsbot.com',
'fsbw.com',
'fsla.com',
'fsnh.com',
'ftb.com',
'ftbni.com',
'fuib.com',
'fult.com',
'fvnb.com',
'fwbi.com',
'gbank.be',
'gbg.com.pl',
'gbkr.si',
'gdvs.com',
'gemala.com',
'gfbank.com',
'ghb.co.th',
'giza.co.il',
'glsb.com',
'grafic.com',
'grbank.gl',
'grsb.com',
'gsb.or.th',
'gsha.com',
'gtb.com.ng',
'guh.de',
'guta.ru',
'gwf.com',
'hambank.de',
'hanmo.com',
'hansa.ee',
'hba.gr',
'hbl.lv',
'hcb.co.kr',
'hdb.co.uk',
'hebros.bg',
'hei.com',
'helaba.de',
'hermis.lt',
'hgb.hr',
'hhb.com.my',
'hib.li',
'himb.co.kr',
'hkbc.com',
'hkbea.com',
'hlb.com.my',
'hnb.hr',
'hnb.net',
'hsbc.am',
'hsbc.co.uk',
'hsbc.com',
'iadb.org',
'iba.com.hk',
'ibat.com',
'ibc.com',
'ibk.co.kr',
'ibw.com.ni',
'icbny.com',
'icc.ie',
'icon.co.za',
'ics.sm',
'idbi.com',
'idc.co.za',
'ieb.hu',
'ifor.com',
'ifsb.com',
'igb.co.il',
'ikb.de',
'imbplc.com',
'imul.com',
'ing.com',
'ingbank.bg',
'ingbank.nl',
'inkom.ru',
'innet.net',
'iob.com',
'ippa.lu',
'isbank.is',
'isbt.com',
'isdb.org',
'itb.am',
'itbank.com',
'itiltd.com',
'iworld.net',
'ixe.com.mx',
'jak.se',
'jbpb.com',
'jcbank.com',
'jetco.com',
'jncb.com',
'jpbank.se',
'jri.co.jp',
'jsbf.com',
'jtnb.com',
'kb.com.mk',
'kbc.com',
'kbl.lu',
'kbstar.com',
'kbz.hr',
'kc.frb.org',
'kcb.co.ke',
'kdb.co.kr',
'keb.co.kr',
'key.com',
'keyb-t.com',
'keyfin.com',
'kfb.co.kr',
'kfh.com',
'kfw.de',
'kibs.org',
'kimb.co.kr',
'kit.kz',
'kmbc.co.kr',
'koi.aha.ru',
'kookmin.lu',
'kpbank.ru',
'ksklb.de',
'ktb.co.th',
'kutxa.es',
'kuwait.net',
'labank.com',
'lacaixa.es',
'lanb.com',
'land.lv',
'lanka.net',
'lanta.ru',
'lateko.lv',
'lb-sbv.si',
'lbank.lt',
'lbb.de',
'lcbank.com',
'lehman.com',
'leonia.fi',
'leu.com',
'lgbank.de',
'lgt.com',
'lhb.de',
'licb.com',
'llb.li',
'lmbnig.com',
'lnbank.com',
'lnbky.com',
'lrp.de',
'lsb.dk',
'ltb.tdd.lt',
'ltcb.co.jp',
'luba.sk',
'm-banka.si',
'maffei.de',
'mas.gov.sg',
'mba.co.jp',
'mbanx.com',
'mbca.co.zw',
'mbczh.ch',
'mbna.com',
'mbxxs.ru',
'mcb.com.pk',
'mcbank.com',
'mcbf.com',
'mcc.it',
'mchbank.ru',
'mdfed.com',
'mdm.ru',
'mdm.spb.ru',
'meda.it',
'medbank.lt',
'meib.com',
'mellon.com',
'merita.ee',
'merita.fi',
'mfb.hu',
'mfbank.com',
'mfcb.com',
'mhbs.co.uk',
'mibank.com',
'mkb.hu',
'mkn.co.uk',
'mmbank.com',
'mmbank.ru',
'mnb1.com',
'mnb.hu',
'mnb.lv',
'mol.mn',
'most.ru',
'mps.it',
'mpt.com.mk',
'msdx.com',
'msfc.net',
'msnb.com',
'mwbi.com',
'mybank.com',
'mynycb.com',
'n-lb.si',
'nabard.org',
'nacf.co.kr',
'natbk.com',
'natixis.fr',
'nba.ai',
'nba.az',
'nbad.co.ae',
'nbb.be',
'nbcal.com',
'nbcok.com',
'nbctkb.it',
'nbd.co.ae',
'nbd.com',
'nbe.com.eg',
'nbf.co.ae',
'nbg.gov.ge',
'nbg.gr',
'nbg.ro',
'nbk.com',
'nbne.com',
'nbnz.co.nz',
'nbo.co.om',
'nbp.com.pk',
'nbp.pl',
'nbrb.by',
'nbs.co.za',
'nbs.sk',
'nbscnj.com',
'ncb.com',
'ncb.com.sa',
'ncbt.com',
'ncsecu.org',
'ndb.org',
'ndbt.com',
'nebat.com',
'netbank.de',
'netoil.ru',
'netway.at',
'nf-bank.de',
'nhb.ru',
'nimb.com',
'nisc.net',
'nkbm.si',
'nmb.ru',
'nmbt.com',
'nor24.no',
'nor.no',
'nordea.com',
'nordea.dk',
'nordlb.de',
'noroco.com',
'noumea.com',
'novit.no',
'nrb.co.za',
'nrb.org.np',
'ntb.co.th',
'ntrs.com',
'nvbank.com',
'nvtb.ru',
'ny.frb.org',
'nypbt.com',
'nz.anz.com',
'nzco.com',
'nzmbank.ru',
'o-n-b.com',
'ocsb.com',
'oeb.se',
'oekb.co.at',
'oenb.at',
'ohra.nl',
'okb.co.jp',
'olb.de',
'ols.net',
'optiva.ee',
'otpbank.hu',
'oub.com.sg',
'oxbc.com',
'pabk.sk',
'parex.lv',
'pb100.com',
'pbc.gov.cn',
'pbg.pl',
'pbk.pl',
'pbko.sk',
'pbnk.com',
'pbs.si',
'pbz.hr',
'pcib.com',
'penfed.org',
'pfnb.com',
'pgbk.com',
'pibank.com',
'pictet.com',
'pkb.sk',
'pkobp.pl',
'pnm.de',
'pobank.com',
'polbox.com',
'pomi.fi',
'popso.it',
'promdei.hr',
'ptbank.com',
'qcb.gov.qa',
'qcsb.com',
'qnb.com',
'rba.gov.au',
'rba.tel.hr',
'rbankt.com',
'rbi.org.in',
'rbm.mw',
'rbmi.com',
'rbos.co.uk',
'rbs.co.at',
'rbs.co.uk',
'rbs.com',
'rbtt.com',
'rbv.gov.vu',
'rbz.co.zw',
'rcbank.com',
'rhbank.com',
'rietumu.lv',
'rjbank.com',
'rmb.co.za',
'rmbank.com',
'rnb.ch',
'rnbmia.com',
'robeco.fr',
'robeco.lu',
'rosbank.ru',
'rpfb.ru',
'rrsb.com',
'rusgen.ru',
'rzb.at',
'sabb.com',
'salin.com',
'sampath.lk',
'sb.co.za',
'sbbank.com',
'sbbt.com',
'sbhc.com',
'sbi.co.in',
'sbic.co.za',
'sbil.co.uk',
'sbmct.com',
'sbp.org.pk',
'sbrf.ru',
'sbsu.com',
'sbtc.com',
'sbtr.com',
'scb-bc.com',
'scb.co.th',
'scb.net',
'schwab.com',
'scib.co.th',
'scmb.co.za',
'scnb.com',
'scsbank.ch',
'sdccu.com',
'sdnb.com',
'sebank.se',
'secbd.org',
'sella.it',
'sgkb.ch',
'shb.com.sa',
'shbank.com',
'sibank.hr',
'sibfair.ru',
'sjbank.com',
'slbo.hr',
'slsp.sk',
'smbank.com',
'smbc.co.jp',
'snoras.com',
'soba.ch',
'socgen.com',
'socgen.de',
'spabe.ch',
'spar.is',
'sparda.de',
'spron.is',
'sry.com',
'ssbank.com',
'ssbnd.com',
'ssbt.com',
'ssbwa.com',
'ssfcu.org',
'sskba.de',
'ssnb.com',
'start.it',
'stb.com.mk',
'svb.com',
'svpant.se',
'syb.com.tr',
'szkb.ch',
't-i-b.com',
'taib.com',
'tavrich.ru',
'tcac.com',
'tcbank.ru',
'tcbk.com',
'tdbank.ca',
'tdbank.com',
'teche.com',
'tfb.co.th',
'tfnb.com',
'tg.com.pl',
'tgkb.ch',
'the1st.com',
'thefsb.com',
'tiac.net',
'tib.com.ye',
'tinet.ch',
'tokai.com',
'tsb.co.uk',
'tsbbank.ie',
'turtas.lt',
'txbank.com',
'ubaplc.com',
'ubat.com',
'ubdc.com',
'ubfs.com',
'uboc.com',
'ucbh.com',
'ucbi.com',
'ucpb.com',
'unb.co.ae',
'unexim.ru',
'unibank.dk',
'unibank.lu',
'union.cz',
'uob.com.sg',
'upbeat.com',
'urkb.ch',
'usbank.com',
'usit.net',
'utb.ru',
'val.it',
'valcea.ro',
'var.no',
'vbhan.de',
'vibank.com',
'visa.co.at',
'visa.com',
'vmbs.com',
'volfed.com',
'vpbank.com',
'wamu.com',
'wbk.com.pl',
'wbpr.com',
'wegelin.ch',
'westlb.de',
'whbhk.com',
'wnsb.com',
'wvweb.com',
'yanb.com',
'ycbank.com',
'ykb.com.tr',
'zenit.ru',
'zhkb.ch',
'zugerkb.ch',
'1natbanker.com',
'1st-lincolnwood.com',
'1st-of-pryor.com',
'1stamericanbank.com',
'1stbank.com',
'1stcapbank.com',
'1stcapitalbk.com',
'1stfederalbank.com',
'1stfedvw.com',
'1stfidelity.com',
'1stnationalbank.com',
'1stofminden.com',
'1stsource.com',
'1stsummit.com',
'333pine.com',
'356bank.com',
'4thebank.com',
'82bank.co.jp',
'aachener-bank.de',
'aacreditunion.org',
'abanker.com',
'abbeynational.co.uk',
'abbybank.com',
'abcbancorp.com',
'abchina.com',
'abifinancial.com',
'abnamro.com',
'abnamro.com.pk',
'abramsbank.com',
'absolutbank.com',
'abtbank.com',
'acbonline.com',
'accessanb.com',
'acommunitybk.com',
'adamsbank.com',
'adbvrm.org.vn',
'adcbindia.com',
'adcbuae.com',
'adelaidebank.com.au',
'adirondacktrust.com',
'advance-bank.de',
'advancebank.com',
'advancefinancial.com',
'advantanb.com',
'affinbank.com.my',
'afirme.com.mx',
'afribank.net',
'africa.barclays.com',
'ag-banka.sk',
'agrolink.moa.my',
'aibgroup.com',
'aigprivatebank.com',
'ajsmithbank.com',
'akb-bank.de',
'akbank.com.tr',
'akibabank.com',
'akkobank.ru',
'aktienbank.de',
'alandsbanken.fi',
'albaraka-bank.com',
'alexbank.com',
'algerie.bnpparibas.com',
'allahabadbank.com',
'allbank.com',
'allfinanz.co.za',
'alliance-leicester.co.uk',
'alliancebank.com',
'alliancebank.com.my',
'alliantcreditunion.org',
'allied.co.za',
'allonge.com.ua',
'allstates.com.ng',
'almawarid.com.lb',
'alpinebank.com',
'alrajhibank.com.sa',
'alrajhitrading.com.sa',
'altabank.com',
'alwatany.com.eg',
'amagerbanken.dk',
'amarbank.com',
'ambankonline.com',
'ambg.com.my',
'amcm.gov.mo',
'amercombank.com',
'americafirst.com',
'american-bank.com',
'americanbankandtrust.com',
'americanfederalbank.com',
'americanmerchant.com',
'americansavingsbank.com',
'americanstate.com',
'americenbank.com',
'americom-bank.com',
'amexbank.de',
'ampbanking.com.au',
'amsouth.com',
'amsterdamsb.com',
'amtrust.com',
'anbchicago.com',
'anbcorp.com',
'anbfinancial.com',
'anchorbank.com',
'andbanc.com',
'andhratoday.com',
'andinet.lat.net',
'anet.donetsk.ua',
'ankerbank.ch',
'antonveneta.it',
'anytimebank.com',
'apacbank.com.tw',
'apsbank.com.mt',
'arabank.com',
'arabbank.com',
'arabbanking.com',
'arabbanking.com.dz',
'arapturkbank.com.tr',
'arcadian-invest.com',
'areximbank.am',
'argentaria.es',
'argonet.com',
'arknatl.com',
'arkobank.com',
'arminvestbank.am',
'arquired.es',
'artsakhbank.am',
'arubabank.com',
'asahibank.co.jp',
'asbbank.co.nz',
| |
target update period.
"""
# Run a train op at the rate of self.update_period if enough training steps
# have been run. This matches the Nature DQN behaviour.
if self._replay.memory.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
if 'iqn' in self._runtype:
self._sess.run(self._train_op)
else:
q_sup = None; a_sup = None
pv, pa = None, None
a_origin, Ea = None, None
if 'rainbow' in self._runtype or 'c51' in self._runtype:
_, loss, prob, targ, states, allQ = self._sess.run(self._train_op)
if self.training_steps % 100000 == 0:
print (np.sum(prob, -1))
print (loss[0])
tmp = [targ[0], prob[0], allQ[0]]
if pv is not None:
tmp.extend(pv)
tmp.extend(self.v_sup_)
#print (self.training_steps, states.shape, prob.shape, pv.shape, pa.shape, allQ.shape)
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = self._sess.run(self._merged_summaries)
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sess.run(self._sync_qt_ops)
self.training_steps += 1
def _select_action(self):
"""Select an action from the set of available actions.
Chooses an action randomly with probability self._calculate_epsilon(), and
otherwise acts greedily according to the current Q-value estimates.
Returns:
int, the selected action.
"""
if self.eval_mode:
epsilon = self.epsilon_eval
else:
epsilon = self.epsilon_fn(
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_train)
# Choose the action with highest Q-value at the current state.
#q_argmax, p = self._sess.run([self._q_argmax, self._net_outputs.probabilities], {self.state_ph: self.state})
if self.testing:
#print (self.subcontrol)
if 'iqn' not in self._runtype:
q_argmax, v_ = self._sess.run([self._q_argmax, self._net_outputs.probabilities], {self.state_ph: self.state})
v_ = v_[0, q_argmax, :]
self.vis['v'].append(v_)
else:
q_argmax = self._sess.run(self._q_argmax, {self.state_ph: self.state})
else:
q_argmax = self._sess.run(self._q_argmax, {self.state_ph: self.state})
if random.random() <= epsilon:
# Choose a random action with probability epsilon.
return random.randint(0, self.num_actions - 1)
else:
return q_argmax
def _build_target_distribution(self, q_support=None):
"""Builds the C51 target distribution as per Bellemare et al. (2017).
First, we compute the support of the Bellman target, r + gamma Z'. Where Z'
is the support of the next state distribution:
* Evenly spaced in [-vmax, vmax] if the current state is nonterminal;
* 0 otherwise (duplicated num_atoms times).
Second, we compute the next-state probabilities, corresponding to the action
with highest expected value.
Finally we project the Bellman target (support + probabilities) onto the
original support.
Returns:
target_distribution: tf.tensor, the target distribution from the replay.
"""
if q_support is not None:
_support = q_support
batch_size = self._replay.batch_size
# size of rewards: batch_size x 1
rewards = self._replay.rewards[:, None]
# size of tiled_support: batch_size x num_atoms
#tiled_support = tf.tile(_support, [batch_size])
#tiled_support = tf.reshape(tiled_support, [batch_size, self._num_atoms])
tiled_support = _support
# size of target_support: batch_size x num_atoms
is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)
# Incorporate terminal state to discount factor.
# size of gamma_with_terminal: batch_size x 1
gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = gamma_with_terminal[:, None]
target_support = rewards + gamma_with_terminal * tiled_support
# size of next_qt_argmax: 1 x batch_size
next_qt_argmax = tf.argmax(
self._replay_next_target_net_outputs.q_values, axis=1)[:, None]
batch_indices = tf.range(tf.to_int64(batch_size))[:, None]
# size of next_qt_argmax: batch_size x 2
batch_indexed_next_qt_argmax = tf.concat(
[batch_indices, next_qt_argmax], axis=1)
# size of next_probabilities: batch_size x num_atoms
next_probabilities = tf.gather_nd(
self._replay_next_target_net_outputs.probabilities,
batch_indexed_next_qt_argmax)
return project_distribution_1(target_support, next_probabilities,
_support)
else:
_support = self._support
batch_size = self._replay.batch_size
# size of rewards: batch_size x 1
rewards = self._replay.rewards[:, None]
# size of tiled_support: batch_size x num_atoms
tiled_support = tf.tile(_support, [batch_size])
tiled_support = tf.reshape(tiled_support, [batch_size, self._num_atoms])
# size of target_support: batch_size x num_atoms
is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)
# Incorporate terminal state to discount factor.
# size of gamma_with_terminal: batch_size x 1
gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = gamma_with_terminal[:, None]
target_support = rewards + gamma_with_terminal * tiled_support
# size of next_qt_argmax: 1 x batch_size
next_qt_argmax = tf.argmax(
self._replay_next_target_net_outputs.q_values, axis=1)[:, None]
batch_indices = tf.range(tf.to_int64(batch_size))[:, None]
# size of next_qt_argmax: batch_size x 2
batch_indexed_next_qt_argmax = tf.concat(
[batch_indices, next_qt_argmax], axis=1)
# size of next_probabilities: batch_size x num_atoms
next_probabilities = tf.gather_nd(
self._replay_next_target_net_outputs.probabilities,
batch_indexed_next_qt_argmax)
return project_distribution(target_support, next_probabilities,
_support)
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training from replay data.
"""
target_distribution = tf.stop_gradient(self._build_target_distribution(self._replay_net_outputs.q_support))
# size of indices: batch_size x 1.
indices = tf.range(tf.shape(self._replay_net_outputs.probabilities)[0])[:, None]
# size of reshaped_actions: batch_size x 2.
print ("replay_action.shape, ", self._replay.actions.shape)
reshaped_actions = tf.concat([indices, self._replay.actions[:, None]], 1)
# For each element of the batch, fetch the logits for its selected action.
chosen_action_probabilities = tf.gather_nd(self._replay_net_outputs.probabilities,
reshaped_actions)
print ("----------------------------------------------------------")
print (self._replay_net_outputs.probabilities.shape, reshaped_actions.shape, chosen_action_probabilities.shape)
all_action_probabilities = self._replay_net_outputs.probabilities
cross_entropy = -1 * target_distribution * tf.log(chosen_action_probabilities + 1e-8)
loss = tf.reduce_sum(cross_entropy, axis=-1)
original_loss = loss
#loss = tf.reduce_mean(loss, axis=-1)
print (">>>>>>>>>>>>>>loss-prob:", loss.shape)
print (self._replay_net_outputs.a)
if self._replay_net_outputs.a is not None:
chosen_pa = tf.gather_nd(self._replay_net_outputs.a, reshaped_actions)
pa = self._replay_net_outputs.a
'''
# size of indices: batch_size x 1.
indices = tf.range(tf.shape(self._replay_net_outputs.logits)[0])[:, None]
# size of reshaped_actions: batch_size x 2.
reshaped_actions = tf.concat([indices, self._replay.actions[:, None]], 1)
# For each element of the batch, fetch the logits for its selected action.
chosen_action_logits = tf.gather_nd(self._replay_net_outputs.logits,
reshaped_actions)
loss1 = tf.nn.softmax_cross_entropy_with_logits(
labels=target_distribution,
logits=chosen_action_logits)
print (">>>>>>>>>>>>>>loss-logits:", loss1.shape)
'''
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of 0.5
# on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders) suggested
# a fixed exponent actually performs better, except on Pong.
probs = self._replay.transition['sampling_probabilities']
loss_weights = 1.0 / tf.sqrt(probs + 1e-10)
loss_weights /= tf.reduce_max(loss_weights)
# Rainbow and prioritized replay are parametrized by an exponent alpha,
# but in both cases it is set to 0.5 - for simplicity's sake we leave it
# as is here, using the more direct tf.sqrt(). Taking the square root
# "makes sense", as we are dealing with a squared loss.
# Add a small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will cause
# troubles, and also result in 1.0 / 0.0 = NaN correction terms.
update_priorities_op = self._replay.tf_set_priority(
self._replay.indices, tf.sqrt(loss + 1e-10))
# Weight the loss by the inverse priorities.
loss = loss_weights * loss
else:
update_priorities_op = tf.no_op()
with tf.control_dependencies([update_priorities_op]):
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('CrossEntropyLoss', tf.reduce_mean(loss))
# Schaul et al. reports a slightly different rule, where 1/N is also
# exponentiated by beta. Not doing so seems more reasonable, and did not
# impact performance in our experiments.
var = tf.trainable_variables()
print ("all trainable var ----------------------", var)
return self.optimizer.minimize(tf.reduce_mean(loss)), loss, chosen_action_probabilities,\
target_distribution, self._replay.states, all_action_probabilities
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Executes a tf session and executes replay buffer ops in order to store the
following tuple in the replay buffer (last_observation, action, reward,
is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.memory.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority)
def project_distribution(supports, weights, target_support,
validate_args=False):
"""Projects a batch of (support, weights) onto target_support.
Based on equation (7) in (Bellemare et al., 2017):
https://arxiv.org/abs/1707.06887
In the rest of the comments we will refer to this equation simply as Eq7.
This code is not easy to digest, so we will use a running example to clarify
what is going on, with the following sample inputs:
* supports = [[0, 2, 4, 6, 8],
[1, 3, 4, 5, 6]]
* weights = [[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.2, 0.5, 0.1, 0.1]]
* target_support = [4, 5, 6, 7, 8]
In the code below, comments preceded with 'Ex:' will be referencing the above
values.
Args:
supports: Tensor of shape (batch_size, num_dims) defining supports for the
distribution.
weights: Tensor of shape (batch_size, num_dims) defining weights on the
original | |
# Author: <NAME>, <EMAIL>
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
dataset = pd.read_csv('housing3.csv')
Y, X = dataset.Class, dataset.iloc[:, 0:13]
col_names = list(dataset.columns)
unique_class = list(Y.unique())
data_values, class_values = X.values, Y.values
def split_data(X, Y):
"""splits data into 80 % training and 20 % test data."""
return X[:400], X[400:], Y[:400], Y[400:]
def get_classes(labels):
"""transforms data classes to {0,1} class"""
classes = []
for j in range(len(labels)):
value = labels[j]
for i, element in enumerate(unique_class):
if value == element:
classes.append(i)
return np.array(classes)
X_train, X_test, Y_train, Y_test = split_data(data_values, class_values)
Y_train, Y_test = get_classes(Y_train), get_classes(Y_test)
class Node:
def __init__(self, boundary, column_idx):
self.right = None
self.left = None
self.prediction = None
self.node_type = 'Node'
self.boundary = boundary
self.column_idx = column_idx
def build_left(self, left):
"""Builds left tree"""
self.left = left
def build_right(self, right):
"""Builds right tree"""
self.right = right
def define_leaf(self, prediction):
"""Transforms node into leaf and saves predictions"""
self.node_type = 'Leaf'
self.prediction = prediction
def predict_single(self, x):
"""Predicts the class of a single data point"""
if self.node_type != 'Node':
return self.prediction
else:
if x[self.column_idx] <= self.boundary:
return self.left.predict_single(x)
if x[self.column_idx] > self.boundary:
return self.right.predict_single(x)
def predict(self, all_data):
"""Makes a prediction for all data points"""
prediction_list = []
for i, x in enumerate(all_data):
prediction_list.append(self.predict_single(x))
return prediction_list
class Tree:
def __init__(self, rand, get_candidate_columns, min_samples):
self.rand = rand
self.get_candidate_columns = get_candidate_columns
self.min_samples = min_samples
def build_tree(self, x_data, y_data, row_position, begin_classes):
"""Builds a tree and returns a model as an object. It uses Gini Index for splitting"""
# get the splitting boundary, column index for splitting and gini index
# for the splitting, consider only columns that are defined with the get_candidate_columns function
splitting_boundary, column_idx, gini = \
self.get_optimal_column_and_value(x_data, y_data, self.get_candidate_columns(x_data, self.rand))
# make sure it is numpy array
x_data, y_data = np.array(x_data), np.array(y_data)
# separate data points by the splitting boundary
left_idx, right_idx = self.seperate_data(x_data, column_idx, splitting_boundary)
# save splitting boundary and column into a node
current_node = Node(splitting_boundary, column_idx)
# indexes of data points on left
end_class_left = row_position[left_idx[0]]
# if there is only one class in left node, we will make that node a leaf
only_one_class_left, left_prediction = self.find_majority_class(begin_classes[end_class_left])
# indexes of data points on right
end_class_right = row_position[right_idx[0]]
# if there is only one class in right node, we will make that node a leaf
only_one_class_right, right_prediction = self.find_majority_class(begin_classes[end_class_right])
# making a left node a leaf if:
# 1) in node less data points than min_samples
# OR 2) there is only one class left
if ((len(left_idx[0]) < self.min_samples) and (len(left_idx[0]) > 0)) | only_one_class_left:
left_node = Node(None, None)
left_node.define_leaf(left_prediction)
current_node.build_left(left_node)
# if left node not a leaf, build a tree from a left node
else:
current_node.build_left(
self.build_tree(x_data[left_idx[0]], y_data[left_idx[0]], row_position[left_idx[0]], begin_classes))
# making a right node a leaf if:
# 1) in node less data points than min_samples
# OR 2) there is only one class right
if ((len(right_idx[0]) < self.min_samples) and (len(right_idx[0]) > 0)) | only_one_class_right:
right_node = Node(None, None)
right_node.define_leaf(right_prediction)
current_node.build_right(right_node)
# if right node not a leaf, build a tree from a right node
else:
current_node.build_right(
self.build_tree(x_data[right_idx[0]], y_data[right_idx[0]], row_position[right_idx[0]], begin_classes))
return current_node
def build(self, x_data, y_data):
"""Builds a tree and satisfies unit test"""
return self.build_tree(x_data, y_data, np.array(list(range(len(x_data)))), y_data)
def get_column_values_and_set(self, data, col):
"""For each data point, returns its values and middle points for boundary decisions"""
column, boundary_values = list(), list()
for i in range(len(data)):
column.append(data[i][col])
boundary_values_tmp = sorted(set(column))
for j in range(len(boundary_values_tmp)):
try:
boundary_values.append((boundary_values_tmp[j + 1] + boundary_values_tmp[j]) / 2)
except:
continue
return column, sorted(boundary_values)
def get_gini_index(self, column, boundary, classes):
"""returns gini index for selected data point and boundary"""
freq_c1, freq_c2, freq_c3, freq_c4 = 0, 0, 0, 0
row_number_c1, row_number_c2 = 0, 0
for i, elem in enumerate(column):
if elem <= boundary:
row_number_c1 += 1
if classes[i] == 0:
freq_c1 += 1
else:
freq_c3 += 1
else:
row_number_c2 += 1
if classes[i] == 1:
freq_c2 += 1
else:
freq_c4 += 1
if row_number_c1 == 0:
g1 = 0
else:
p1 = freq_c1 / row_number_c1
p2 = freq_c3 / row_number_c1
g1 = p1 * (1-p1) + p2 * (1 - p2)
if row_number_c2 == 0:
g2 = 0
else:
p3 = freq_c2 / row_number_c2
p4 = freq_c4 / row_number_c2
g2 = p3 * (1-p3) + p4 * (1-p4)
return (row_number_c1 * g1 + row_number_c2 * g2) / (row_number_c1 + row_number_c2)
def get_optimal_split(self, column, column_set, data_y):
"""Returns the feature that splits the data best"""
gini_index_g = 2
col_set = [0]
for position, bound in enumerate(column_set):
gini_index_l = self.get_gini_index(column, bound, data_y)
if gini_index_l < gini_index_g:
gini_index_g = gini_index_l
position_g = position
col_set = column_set[position_g]
return col_set, gini_index_g
def get_optimal_column_and_value(self, data_x, data_y, column_indexes):
"""Searches for the optimal column and boundary that splits the data best"""
global_boundary = 0
global_gini_index = 2
position = -1
for i in range(len(data_x[1])):
if i in column_indexes:
column_i, column_set_i = self.get_column_values_and_set(data_x, i)
parameter_position, gini_index = self.get_optimal_split(column_i, column_set_i, data_y)
if gini_index <= global_gini_index:
global_gini_index = gini_index
global_boundary = parameter_position
position = i
return global_boundary, position, global_gini_index
def get_row_index(self, x, y, list_boolean):
"""Returns indexes of the row"""
lst_x, lst_y = list(), list()
y = list(y)
if len(x) == 0:
return [], []
else:
for j, elem in enumerate(list_boolean):
if list_boolean == 1:
lst_x.append(x[j])
lst_y.append(y[j])
return lst_x, lst_y
def seperate_data(self, data, column_idx, boundary):
"""Separates data to the left and right side by the selected boundary"""
some = data[:, column_idx] <= boundary
other = data[:, column_idx] > boundary
left_dataset = np.where(some)
right_dataset = np.where(other)
return left_dataset, right_dataset
def find_majority_class(self, lst):
"""Returns majority class. Randomly if 50/50"""
first_class, second_class = 0, 0
for elem in lst:
if elem == 0:
first_class += 1
elif elem == 1:
second_class += 1
if first_class == 0:
return True, 1
elif second_class == 0:
return True, 0
elif first_class > second_class:
return False, 0
elif second_class > first_class:
return False, 1
else:
return False, self.rand.choice(lst)
class EndModel:
def __init__(self, list_of_trees):
self.list_of_trees = list_of_trees
def predict(self, x_data):
"""Predicts the most common class"""
tree_prediction = list()
for tree in self.list_of_trees:
tree_prediction.append(tree.predict(x_data))
return self.find_majority_classes(tree_prediction)
def predict_k(self, x_data, k):
"""Predicts the most common class for first k < len(list_of_trees) trees"""
tree_prediction = list()
n = 0
for tree in self.list_of_trees:
if n < k:
tree_prediction.append(tree.predict(x_data))
n += 1
return self.find_majority_classes(tree_prediction)
def find_majority_classes(self, classes_list):
"""Finds the most common class in list of classes"""
end_classes = [0] * len(classes_list[0])
for class_list in classes_list:
end_classes = [sum(cls) for cls in zip(end_classes, class_list)]
return [int(round(x / len(classes_list))) for x in end_classes]
class Bagging:
def __init__(self, rand, tree_builder, n):
self.rand = rand
self.tree_builder = tree_builder
self.n = n
def build(self, x_data, y_data):
"""Builds trees and returns an object"""
all_trees = list()
tree_object = self.tree_builder
for t in range(self.n):
bootstrap_sample_X, bootstrap_sample_Y = self.resample_data(x_data, y_data)
bootstrap_tree = tree_object.build(bootstrap_sample_X, bootstrap_sample_Y)
all_trees.append(bootstrap_tree)
return EndModel(all_trees)
def resample_data(self, X_data, Y_data):
"""Resample data with replacement"""
sample_X, sample_Y = list(), list()
n_sample = round(len(X_data))
while len(sample_X) < n_sample:
index = self.rand.randrange(len(X_data))
sample_X.append(X_data[index].tolist())
sample_Y.append(Y_data[index].tolist())
return np.array(sample_X), np.array(sample_Y)
class RandomForest:
def __init__(self, rand, n, min_samples):
self.rand = rand
self.n = n
self.min_samples = min_samples
def build(self, x_data, y_data):
t = Tree(rand=self.rand,
get_candidate_columns=self.get_random_feature,
min_samples=self.min_samples)
b = Bagging(rand=self.rand, tree_builder=t, n=self.n)
return b.build(x_data, y_data)
def get_random_feature(self, x_data, rnd):
n_sample = len(x_data[0])
sqrt_sample = round(math.sqrt(n_sample))
lst = rnd.sample(range(n_sample), sqrt_sample)
return sorted(lst)
def misclassification_rate_tree(predicted_class_list, true_class_list):
"""Calculates the misclassification rate!"""
wrong_classification = 0
for pred, act in zip(predicted_class_list, true_class_list):
if pred != act:
wrong_classification += 1
return round(wrong_classification / len(predicted_class_list), 4)
def return_same_idx(X, rand):
"""Returns indexes of all columns"""
return list(range(len(X[0])))
def hw_tree_full(train, test):
"""Calculates misclassification rates for tree"""
print('---CLASSIFICATION TREE---')
Xtrain, Ytrain = train
Xtest, Ytest = test
print('Constructing classification tree!')
t = Tree(rand=random.Random(1), get_candidate_columns=return_same_idx, min_samples=2)
print('Building a model')
b = t.build(Xtrain, Ytrain)
print('Predicting train classes')
train_prediction = b.predict(Xtrain)
print('Predicted! ... Predicting test classes')
test_prediction = b.predict(Xtest)
train_misclassification = misclassification_rate_tree(train_prediction, list(Ytrain))
test_misclassification = misclassification_rate_tree(test_prediction, list(Ytest))
print('(Train error, Test error) = ')
return train_misclassification, test_misclassification
def split_index(x_data, k):
"""Splits data into k folds"""
folds = list()
indexes = list(range(len(x_data)))
for j in range(k):
fold = random.Random(0).sample(indexes, round(len(x_data) | |
<gh_stars>1-10
#%%
import os
import sys
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import networkx as nx
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.cluster_analysis as clust
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
adj = pm.Promat.pull_adj('ad', subgraph='brain and accessory')
ad_edges = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
graph = pg.Analyze_Nx_G(ad_edges, split_pairs=False)
pairs = pm.Promat.get_pairs()
dVNCs = pymaid.get_skids_by_annotation('mw dVNC')
# %%
# connection probability between ipsi/bilateral/contra
dVNC_pairs = pm.Promat.load_pairs_from_annotation('dVNCs', pairs, return_type='all_pair_ids_bothsides', skids=dVNCs, use_skids=True)
dSEZ_pairs = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids_bothsides')
RGN_pairs = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids_bothsides')
ascendings_all = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw A1 ascending')
asc_pairs = pm.Promat.load_pairs_from_annotation('ascendings', pairs, return_type='all_pair_ids_bothsides', skids=ascendings_all, use_skids=True)
non_outputs_brain = np.intersect1d(pymaid.get_skids_by_annotation('mw brain paper clustered neurons'), pymaid.get_skids_by_annotation('mw brain neurons'))
non_outputs_brain = np.setdiff1d(non_outputs_brain, ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain outputs'))
non_outputs_brain_pairs = pm.Promat.load_pairs_from_annotation('non-outputs', pairs, return_type='all_pair_ids_bothsides', skids=non_outputs_brain, use_skids=True)
# ascendings/dVNCs to outputs and ascendings
data_adj = ad_edges.set_index(['upstream_pair_id', 'downstream_pair_id'])
celltypes_pre = [list(asc_pairs.leftid), list(dVNC_pairs.leftid)]
celltypes_post = [list(dVNC_pairs.leftid), list(dSEZ_pairs.leftid), list(RGN_pairs.leftid), list(non_outputs_brain_pairs.leftid), list(asc_pairs.leftid)]
mat = np.zeros(shape=(len(celltypes_pre), len(celltypes_post)))
for i, pair_type1 in enumerate(celltypes_pre):
for j, pair_type2 in enumerate(celltypes_post):
connection = []
for skid1 in pair_type1:
for skid2 in pair_type2:
if((skid1, skid2) in graph.G.edges): connection.append(1)
if((skid1, skid2) not in graph.G.edges): connection.append(0)
mat[i, j] = sum(connection)/len(connection)
df = pd.DataFrame(mat, columns = ['dVNC', 'dSEZ', 'RGN', 'brain-non-outputs', 'A1-ascending'],
index = ['A1-ascending', 'dVNC'])
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.heatmap(df, square=True, cmap='Blues', vmax=0.007)
plt.savefig(f'VNC_interaction/plots/connection-probability_brain-outputs_ascendings.pdf', format='pdf', bbox_inches='tight')
# ascendings to brain
_, celltypes = ct.Celltype_Analyzer.default_celltypes(exclude=pymaid.get_skids_by_annotation('mw dVNC to A1'))
celltypes = celltypes + [ct.Celltype(name='dVNCs-A1', skids=pymaid.get_skids_by_annotation('mw dVNC to A1'))]
celltypes_pairs = [pm.Promat.load_pairs_from_annotation('', pairs, return_type='all_pair_ids_bothsides', skids=celltype.get_skids(), use_skids=True) for celltype in celltypes]
celltypes_pre = [list(asc_pairs.leftid)]
celltypes_post = [list(pairs_from_list.leftid) for pairs_from_list in celltypes_pairs]
mat = np.zeros(shape=(len(celltypes_pre), len(celltypes_post)))
for i, pair_type1 in enumerate(celltypes_pre):
for j, pair_type2 in enumerate(celltypes_post):
connection = []
for skid1 in pair_type1:
for skid2 in pair_type2:
if((skid1, skid2) in graph.G.edges): connection.append(1)
if((skid1, skid2) not in graph.G.edges): connection.append(0)
mat[i, j] = sum(connection)/len(connection)
df = pd.DataFrame(mat, columns = [celltype.get_name() for celltype in celltypes],
index = ['A1-ascending'])
# modify 'Blues' cmap to have a white background
cmap = plt.cm.get_cmap('Blues')
blue_cmap = cmap(np.linspace(0, 1, 20))
blue_cmap[0] = np.array([1, 1, 1, 1])
blue_cmap = mpl.colors.LinearSegmentedColormap.from_list(name='New_Blues', colors=blue_cmap)
cmap = blue_cmap
vmax = 0.02
fig, ax = plt.subplots(1,1, figsize=(5,2))
sns.heatmap(df, square=True, cmap=cmap, vmax=vmax, annot=True, fmt='.3f')
plt.savefig(f'VNC_interaction/plots/connection-probability_ascendings_all-brain-celltypes.pdf', format='pdf', bbox_inches='tight')
# dVNCs to A1
motorneuron_pairs = pm.Promat.load_pairs_from_annotation('mw A1 MN', pairs)
pre_motorneuron_pairids = ad_edges.set_index('downstream_pair_id').loc[np.intersect1d(motorneuron_pairs.leftid, ad_edges.downstream_pair_id), 'upstream_pair_id']
pre_motorneuron_pairids = list(np.unique(pre_motorneuron_pairids))
pre_motorneurons = pre_motorneuron_pairids + list(pairs.set_index('leftid').loc[pre_motorneuron_pairids, 'rightid'])
A1_cells = np.setdiff1d(pymaid.get_skids_by_annotation('mw A1 neurons paired'), pymaid.get_skids_by_annotation('mw A1 MN') + pre_motorneurons + ascendings_all)
A1_pairs = pm.Promat.load_pairs_from_annotation('A1', pairs, return_type='all_pair_ids_bothsides', skids=A1_cells, use_skids=True)
dVNC_A1_pairs = pm.Promat.load_pairs_from_annotation('mw dVNC to A1', pairs, return_type='all_pair_ids_bothsides')
dVNC_nonA1 = np.setdiff1d(pymaid.get_skids_by_annotation('mw dVNC'), pymaid.get_skids_by_annotation('mw dVNC to A1'))
dVNC_nonA1_pairs = pm.Promat.load_pairs_from_annotation('mw dVNC not to A1', pairs, return_type='all_pair_ids_bothsides', skids=dVNC_nonA1, use_skids=True)
celltypes_pre = [list(dVNC_A1_pairs.leftid), list(dVNC_nonA1_pairs.leftid)]
celltypes_post = [list(asc_pairs.leftid), list(A1_pairs.leftid), pre_motorneuron_pairids, list(motorneuron_pairs.leftid)]
mat = np.zeros(shape=(len(celltypes_pre), len(celltypes_post)))
for i, pair_type1 in enumerate(celltypes_pre):
for j, pair_type2 in enumerate(celltypes_post):
connection = []
for skid1 in pair_type1:
for skid2 in pair_type2:
if((skid1, skid2) in graph.G.edges): connection.append(1)
if((skid1, skid2) not in graph.G.edges): connection.append(0)
mat[i, j] = sum(connection)/len(connection)
df = pd.DataFrame(mat, columns = ['A1-ascending', 'A1-interneuron', 'A1-pre-motorneuron', 'A1-motorneuron'],
index = ['dVNC to A1', 'dVNC not to A1'])
cmap = blue_cmap
vmax = 0.02
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.heatmap(df, square=True, cmap=cmap, vmax=vmax)
plt.savefig(f'VNC_interaction/plots/connection-probability_dVNCs_A1cells.pdf', format='pdf', bbox_inches='tight')
# summary connectivity probability plot
celltypes_pre = [list(dVNC_A1_pairs.leftid), list(dVNC_nonA1_pairs.leftid), list(dSEZ_pairs.leftid), list(RGN_pairs.leftid), list(asc_pairs.leftid), list(A1_pairs.leftid), pre_motorneuron_pairids, list(motorneuron_pairs.leftid)]
celltypes_post = [list(dVNC_A1_pairs.leftid), list(dVNC_nonA1_pairs.leftid), list(dSEZ_pairs.leftid), list(RGN_pairs.leftid), list(asc_pairs.leftid), list(A1_pairs.leftid), pre_motorneuron_pairids, list(motorneuron_pairs.leftid)]
mat = np.zeros(shape=(len(celltypes_pre), len(celltypes_post)))
for i, pair_type1 in enumerate(celltypes_pre):
for j, pair_type2 in enumerate(celltypes_post):
connection = []
for skid1 in pair_type1:
for skid2 in pair_type2:
if((skid1, skid2) in graph.G.edges): connection.append(1)
if((skid1, skid2) not in graph.G.edges): connection.append(0)
mat[i, j] = sum(connection)/len(connection)
df = pd.DataFrame(mat, columns = ['dVNC to A1', 'dVNC not to A1', 'dSEZ', 'RGN', 'A1-ascending', 'A1-interneuron', 'A1-pre-motorneuron', 'A1-motorneuron'],
index = ['dVNC to A1', 'dVNC not to A1', 'dSEZ', 'RGN', 'A1-ascending', 'A1-interneuron', 'A1-pre-motorneuron', 'A1-motorneuron'])
cmap = blue_cmap
vmax = 0.04
fig, ax = plt.subplots(1,1, figsize=(3,3))
sns.heatmap(df, square=True, cmap=cmap, vmax=vmax, annot=True, fmt='.3f')
plt.savefig(f'VNC_interaction/plots/connection-probability_brain-A1_summary.pdf', format='pdf', bbox_inches='tight')
# %%
# connection probability of self loop (dVNC<->ascending-A1) vs zigzag motif (dVNC1->ascending-A1->dVNC2)
# generate graph for dVNCs and A1
def dVNC_asc_loop_probability(graph, pairs, length, pre=[], use_pre=False):
# requires Analyze_Nx_G(..., split_pairs=True)
ascs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw A1 ascending')
if(length<2):
print('length must be 2 or greater!')
return
dVNC_ascending_loop = []
all_paths = []
loop_paths = []
for i in pairs.index:
leftid = pairs.loc[i].leftid
if(leftid in graph.G.nodes):
paths = graph.all_simple_self_loop_paths(source = leftid, cutoff=length)
paths = [path for path in paths if len(path)==(length+1)]
all_paths.append(paths)
# when loops exist
if(len(paths)>0):
loop_partners = [path[1:length] for path in paths] # collect all partners that mediate loops
if(type(loop_partners[0])==list): loop_partners = [x for sublist in loop_partners for x in sublist]
loop_partners = list(np.unique(loop_partners))
if(use_pre):
asc_present = sum([1 for x in loop_partners if x in ascs])>0
pre_not_in_middle = sum([1 for x in loop_partners[0:(len(loop_partners)-1)] if x in pre])==0
if(asc_present & pre_not_in_middle):
dVNC_ascending_loop.append(1)
loop_paths.append(path)
if((asc_present==False) | (pre_not_in_middle==False)): dVNC_ascending_loop.append(0)
if(use_pre==False):
asc_present = sum([1 for x in loop_partners if x in ascs])>0
if(asc_present): dVNC_ascending_loop.append(1)
if(asc_present==False): dVNC_ascending_loop.append(0)
# when loops don't exist
if(len(paths)==0): dVNC_ascending_loop.append(0)
if(leftid not in graph.G.nodes):
dVNC_ascending_loop.append(0)
prob_dVNC_ascending_loop = sum(dVNC_ascending_loop)/len(dVNC_ascending_loop)
return(prob_dVNC_ascending_loop, all_paths, loop_paths)
def dVNC_asc_zigzag_probability(graph, pairs, targets, length, exclude_from_path=[], pre=[], use_pre=False):
# requires Analyze_Nx_G(..., split_pairs=True)
ascending = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw A1 ascending')
brain_neurons = pymaid.get_skids_by_annotation('mw brain neurons')
if(length<2):
print('length must be 2 or greater!')
return
# generate appropriate paths
all_paths = []
for i in pairs.index:
leftid = pairs.loc[i].leftid
if(leftid in graph.G.nodes):
if(leftid in targets): targets.remove(leftid) # remove the current neuron from the list of targets (not looking for self loops)
paths = nx.all_simple_paths(G=graph.G, source = leftid, target = targets, cutoff=length)
paths = [path for path in paths if len(path)==(length+1)]
all_paths.append(paths)
# check how many paths exist with ascending and dVNCs present
zigzag_paths = []
dVNC_ascending_zigzag = []
for paths in all_paths:
path_exists = []
for path in paths:
# must talk to A1 neuron in first hop
if(path[1] in brain_neurons):
continue
# celltypes to compare against
ascs = ascending
# are there ascending and dVNCs present in each path?
asc_present = sum([1 for x in path[1:len(path)] if x in ascs])>0
target_present = sum([1 for x in path[1:(len(path)-1)] if x in (targets+exclude_from_path)])==0 # should only be the target type at the end of the path
if(use_pre):
if(length>=4):
pre_not_in_middle = sum([1 for x in path[1:(len(path)-2)] if x in pre])==0
if((asc_present) & (target_present) & (pre_not_in_middle)):
path_exists.append(1)
zigzag_paths.append(path)
if(length<4):
if((asc_present) & (target_present)):
path_exists.append(1)
zigzag_paths.append(path)
if(use_pre==False):
if((asc_present) & (target_present)):
path_exists.append(1)
zigzag_paths.append(path)
if(sum(path_exists)>0): dVNC_ascending_zigzag.append(1)
dVNC_ascending_zigzag_prob = sum(dVNC_ascending_zigzag)/len(pairs.leftid)
return(dVNC_ascending_zigzag_prob, all_paths, zigzag_paths)
A1 = pymaid.get_skids_by_annotation('mw A1 neurons paired')
pre_dVNC = pymaid.get_skids_by_annotation('mw pre-dVNC')
graph_dVNC_A1 = pg.Analyze_Nx_G(ad_edges, split_pairs=False, select_neurons = list(dVNCs) + A1 + pre_dVNC) # dVNCs includes the one skid that needs to be manually added (in chunk #1)
from joblib import Parallel, delayed
from tqdm import tqdm
sources = dVNC_A1_pairs
targets = dVNC_pairs.leftid.to_list()
lengths = [2,3,4,5]
loops = Parallel(n_jobs=-1)(delayed(dVNC_asc_loop_probability)(graph_dVNC_A1, sources, length=lengths[i], pre=pre_dVNC, use_pre=True) for i in tqdm(range(len(lengths))))
loop_probs = [x[0] for x in loops]
loop_all_paths = [x[1] for x in loops]
loop_paths = [x[2] for x in loops]
zigzag_dVNC = Parallel(n_jobs=-1)(delayed(dVNC_asc_zigzag_probability)(graph_dVNC_A1, sources, targets, length=lengths[i], pre=pre_dVNC, use_pre=True) for i in tqdm(range(len(lengths))))
zigzag_dVNC_probs = [x[0] for x in zigzag_dVNC]
zigzag_all_dVNC_paths = [x[1] for x in zigzag_dVNC]
zigzag_dVNC_paths = [x[2] for x in zigzag_dVNC[0:3]]
# zigzags to other output types
dSEZs = pymaid.get_skids_by_annotation('mw dSEZ')
RGNs = pymaid.get_skids_by_annotation('mw RGN')
pre_dSEZ = pymaid.get_skids_by_annotation('mw pre-dSEZ')
graph_outputs_A1 = pg.Analyze_Nx_G(ad_edges, split_pairs=False, select_neurons = np.unique(list(dVNCs) + dSEZs + pre_dSEZ + A1)) # dVNCs includes the one skid that needs to be manually added (in chunk #1)
targets = dSEZ_pairs.leftid.to_list()
zigzag_dSEZ = Parallel(n_jobs=-1)(delayed(dVNC_asc_zigzag_probability)(graph_outputs_A1, sources, targets, length=lengths[i], pre=pre_dSEZ, use_pre=True) for i in tqdm(range(len(lengths))))
zigzag_dSEZ_probs = [x[0] for x in zigzag_dSEZ]
zigzag_all_dSEZ_paths = [x[1] for x in zigzag_dSEZ]
zigzag_dSEZ_paths = [x[2] for x in zigzag_dSEZ[0:3]]
pre_RGN = pymaid.get_skids_by_annotation('mw pre-RGN')
graph_outputs_A1 = pg.Analyze_Nx_G(ad_edges, split_pairs=False, select_neurons = np.unique(list(dVNCs) + RGNs + pre_RGN + A1)) # dVNCs includes the one skid that needs to be manually added (in chunk #1)
targets = RGN_pairs.leftid.to_list()
zigzag_RGN = Parallel(n_jobs=-1)(delayed(dVNC_asc_zigzag_probability)(graph_outputs_A1, sources, targets, length=lengths[i], pre=pre_RGN, use_pre=True) for i in tqdm(range(len(lengths))))
zigzag_RGN_probs = [x[0] for x in zigzag_RGN]
zigzag_all_RGN_paths = [x[1] for x in zigzag_RGN]
zigzag_RGN_paths = [x[2] for x in zigzag_RGN[0:3]]
graph_outputs_A1 = pg.Analyze_Nx_G(ad_edges, split_pairs=False, select_neurons = np.unique(list(dVNCs) + dSEZs + RGNs + pre_RGN + pre_dSEZ + pre_RGN + A1)) # dVNCs includes the one skid that needs to be manually added (in chunk #1)
targets = list(np.unique(RGN_pairs.leftid.to_list() + dSEZ_pairs.leftid.to_list() + dVNC_pairs.leftid.to_list()))
zigzag_outputs = Parallel(n_jobs=-1)(delayed(dVNC_asc_zigzag_probability)(graph_outputs_A1, sources, targets, length=lengths[i], pre=pre_RGN + pre_dSEZ + pre_dVNC, use_pre=True) for i in tqdm(range(len(lengths))))
zigzag_outputs_probs = [x[0] for x in zigzag_outputs]
zigzag_all_outputs_paths = [x[1] | |
<reponame>kevinykuo/CTGAN
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from sklearn.mixture import BayesianGaussianMixture
from torch.nn import (
BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential)
from torch.nn import functional as F
CONTINUOUS = "continuous"
CATEGORICAL = "categorical"
ORDINAL = "ordinal"
class DataTransformer(object):
"""Model continuous columns with a BayesianGMM and normalized to a scalar
[0, 1] and a vector.
Discrete and ordinal columns are converted to a one-hot vector.
"""
def __init__(self, n_clusters=10, eps=0.005):
"""n_cluster is the upper bound of modes."""
self.meta = None
self.n_clusters = n_clusters
self.eps = eps
@staticmethod
def get_metadata(data, categorical_columns=tuple(), ordinal_columns=tuple()):
meta = []
df = pd.DataFrame(data)
for index in df:
column = df[index]
if index in categorical_columns:
mapper = column.value_counts().index.tolist()
meta.append({
"name": index,
"type": CATEGORICAL,
"size": len(mapper),
"i2s": mapper
})
elif index in ordinal_columns:
value_count = list(dict(column.value_counts()).items())
value_count = sorted(value_count, key=lambda x: -x[1])
mapper = list(map(lambda x: x[0], value_count))
meta.append({
"name": index,
"type": ORDINAL,
"size": len(mapper),
"i2s": mapper
})
else:
meta.append({
"name": index,
"type": CONTINUOUS,
"min": column.min(),
"max": column.max(),
})
return meta
def fit(self, data, categorical_columns=tuple(), ordinal_columns=tuple()):
self.meta = self.get_metadata(
data, categorical_columns, ordinal_columns)
model = []
self.output_info = []
self.output_dim = 0
self.components = []
for id_, info in enumerate(self.meta):
if info['type'] == CONTINUOUS:
gm = BayesianGaussianMixture(
self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
n_init=1)
gm.fit(data[:, id_].reshape([-1, 1]))
model.append(gm)
comp = gm.weights_ > self.eps
self.components.append(comp)
self.output_info += [(1, 'tanh'), (np.sum(comp), 'softmax')]
self.output_dim += 1 + np.sum(comp)
else:
model.append(None)
self.components.append(None)
self.output_info += [(info['size'], 'softmax')]
self.output_dim += info['size']
self.model = model
def transform(self, data):
values = []
for id_, info in enumerate(self.meta):
current = data[:, id_]
if info['type'] == CONTINUOUS:
current = current.reshape([-1, 1])
means = self.model[id_].means_.reshape((1, self.n_clusters))
stds = np.sqrt(self.model[id_].covariances_).reshape(
(1, self.n_clusters))
features = (current - means) / (4 * stds)
probs = self.model[id_].predict_proba(current.reshape([-1, 1]))
n_opts = sum(self.components[id_])
features = features[:, self.components[id_]]
probs = probs[:, self.components[id_]]
opt_sel = np.zeros(len(data), dtype='int')
for i in range(len(data)):
pp = probs[i] + 1e-6
pp = pp / sum(pp)
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
values += [features, probs_onehot]
else:
col_t = np.zeros([len(data), info['size']])
col_t[np.arange(len(data)), current.astype('int32')] = 1
values.append(col_t)
return np.concatenate(values, axis=1)
def inverse_transform(self, data, sigmas):
data_t = np.zeros([len(data), len(self.meta)])
st = 0
for id_, info in enumerate(self.meta):
if info['type'] == CONTINUOUS:
u = data[:, st]
v = data[:, st + 1:st + 1 + np.sum(self.components[id_])]
if sigmas is not None:
sig = sigmas[st]
u = np.random.normal(u, sig)
u = np.clip(u, -1, 1)
v_t = np.ones((data.shape[0], self.n_clusters)) * -100
v_t[:, self.components[id_]] = v
v = v_t
st += 1 + np.sum(self.components[id_])
means = self.model[id_].means_.reshape([-1])
stds = np.sqrt(self.model[id_].covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
std_t = stds[p_argmax]
mean_t = means[p_argmax]
tmp = u * 4 * std_t + mean_t
data_t[:, id_] = tmp
else:
current = data[:, st:st + info['size']]
st += info['size']
data_t[:, id_] = np.argmax(current, axis=1)
return data_t
class Discriminator(Module):
def __init__(self, input_dim, dis_dims, pack=10):
super(Discriminator, self).__init__()
dim = input_dim * pack
self.pack = pack
self.packdim = dim
seq = []
for item in list(dis_dims):
seq += [
Linear(dim, item),
LeakyReLU(0.2),
Dropout(0.5)
]
dim = item
seq += [Linear(dim, 1)]
self.seq = Sequential(*seq)
def forward(self, input):
assert input.size()[0] % self.pack == 0
return self.seq(input.view(-1, self.packdim))
class Residual(Module):
def __init__(self, i, o):
super(Residual, self).__init__()
self.fc = Linear(i, o)
self.bn = BatchNorm1d(o)
self.relu = ReLU()
def forward(self, input):
out = self.fc(input)
out = self.bn(out)
out = self.relu(out)
return torch.cat([out, input], dim=1)
class Generator(Module):
def __init__(self, embedding_dim, gen_dims, data_dim):
super(Generator, self).__init__()
dim = embedding_dim
seq = []
for item in list(gen_dims):
seq += [
Residual(dim, item)
]
dim += item
seq.append(Linear(dim, data_dim))
self.seq = Sequential(*seq)
def forward(self, input):
data = self.seq(input)
return data
def apply_activate(data, output_info):
data_t = []
st = 0
for item in output_info:
if item[1] == 'tanh':
ed = st + item[0]
data_t.append(torch.tanh(data[:, st:ed]))
st = ed
elif item[1] == 'softmax':
ed = st + item[0]
data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2))
st = ed
else:
assert 0
return torch.cat(data_t, dim=1)
def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
return (a.cumsum(axis=axis) > r).argmax(axis=axis)
class Cond(object):
def __init__(self, data, output_info):
# self.n_col = self.n_opt = 0
# return
self.model = []
st = 0
skip = False
max_interval = 0
counter = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
skip = True
continue
elif item[1] == 'softmax':
if skip:
skip = False
st += item[0]
continue
ed = st + item[0]
max_interval = max(max_interval, ed - st)
counter += 1
self.model.append(np.argmax(data[:, st:ed], axis=-1))
st = ed
else:
assert 0
assert st == data.shape[1]
self.interval = []
self.n_col = 0
self.n_opt = 0
skip = False
st = 0
self.p = np.zeros((counter, max_interval))
for item in output_info:
if item[1] == 'tanh':
skip = True
st += item[0]
continue
elif item[1] == 'softmax':
if skip:
st += item[0]
skip = False
continue
ed = st + item[0]
tmp = np.sum(data[:, st:ed], axis=0)
tmp = np.log(tmp + 1)
tmp = tmp / np.sum(tmp)
self.p[self.n_col, :item[0]] = tmp
self.interval.append((self.n_opt, item[0]))
self.n_opt += item[0]
self.n_col += 1
st = ed
else:
assert 0
self.interval = np.asarray(self.interval)
def sample(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec1 = np.zeros((batch, self.n_opt), dtype='float32')
mask1 = np.zeros((batch, self.n_col), dtype='float32')
mask1[np.arange(batch), idx] = 1
opt1prime = random_choice_prob_index(self.p[idx])
opt1 = self.interval[idx, 0] + opt1prime
vec1[np.arange(batch), opt1] = 1
return vec1, mask1, idx, opt1prime
def sample_zero(self, batch):
if self.n_col == 0:
return None
vec = np.zeros((batch, self.n_opt), dtype='float32')
idx = np.random.choice(np.arange(self.n_col), batch)
for i in range(batch):
col = idx[i]
pick = int(np.random.choice(self.model[col]))
vec[i, pick + self.interval[col, 0]] = 1
return vec
def cond_loss(data, output_info, c, m):
loss = []
st = 0
st_c = 0
skip = False
for item in output_info:
if item[1] == 'tanh':
st += item[0]
skip = True
elif item[1] == 'softmax':
if skip:
skip = False
st += item[0]
continue
ed = st + item[0]
ed_c = st_c + item[0]
tmp = F.cross_entropy(
data[:, st:ed],
torch.argmax(c[:, st_c:ed_c], dim=1),
reduction='none'
)
loss.append(tmp)
st = ed
st_c = ed_c
else:
assert 0
loss = torch.stack(loss, dim=1)
return (loss * m).sum() / data.size()[0]
class Sampler(object):
"""docstring for Sampler."""
def __init__(self, data, output_info):
super(Sampler, self).__init__()
self.data = data
self.model = []
self.n = len(data)
st = 0
skip = False
for item in output_info:
if item[1] == 'tanh':
st += item[0]
skip = True
elif item[1] == 'softmax':
if skip:
skip = False
st += item[0]
continue
ed = st + item[0]
tmp = []
for j in range(item[0]):
tmp.append(np.nonzero(data[:, st + j])[0])
self.model.append(tmp)
st = ed
else:
assert 0
assert st == data.shape[1]
def sample(self, n, col, opt):
if col is None:
idx = np.random.choice(np.arange(self.n), n)
return self.data[idx]
idx = []
for c, o in zip(col, opt):
idx.append(np.random.choice(self.model[c][o]))
return self.data[idx]
def calc_gradient_penalty(netD, real_data, fake_data, device='cpu',
pac=10, lambda_=10):
alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device)
alpha = alpha.repeat(1, pac, real_data.size(1))
alpha = alpha.view(-1, real_data.size(1))
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = netD(interpolates)
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.view(-1, pac * real_data.size(1))
.norm(2, dim=1) - 1) ** 2).mean() * lambda_
return gradient_penalty
class CTGANSynthesizer(object):
"""docstring for IdentitySynthesizer."""
def __init__(self,
embedding_dim=128,
gen_dim=(256, 256),
dis_dim=(256, 256),
l2scale=1e-6,
batch_size=500,
epochs=300):
self.embedding_dim = embedding_dim
self.gen_dim = gen_dim
self.dis_dim = dis_dim
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self,
train_data,
categorical_columns=tuple(),
ordinal_columns=tuple()):
self.transformer = DataTransformer()
self.transformer.fit(train_data, categorical_columns, ordinal_columns)
train_data = self.transformer.transform(train_data)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
self.generator = Generator(
self.embedding_dim + self.cond_generator.n_opt,
self.gen_dim,
data_dim).to(self.device)
discriminator = Discriminator(
data_dim + self.cond_generator.n_opt,
self.dis_dim).to(self.device)
optimizerG = optim.Adam(
self.generator.parameters(), lr=2e-4, betas=(0.5, 0.9),
weight_decay=self.l2scale)
optimizerD = optim.Adam(
discriminator.parameters(), lr=2e-4, betas=(0.5, 0.9))
assert self.batch_size % 2 == 0
mean = torch.zeros(
self.batch_size, self.embedding_dim, device=self.device)
std = mean + 1
steps_per_epoch = len(train_data) // self.batch_size
for i in | |
<filename>scripts/automation/trex_control_plane/interactive/trex/common/services/trex_service_ap.py
from trex.stl.api import *
from trex.utils.text_opts import *
from trex.utils.common import natural_sorted_key
from .trex_service import Service, ServiceFilter
from .trex_service_int import ServiceCtx, simpy, TXBuffer
import time
from collections import deque
from scapy.all import *
from scapy.contrib.capwap import *
from trex_openssl import *
import threading
import struct
import sys
import time
import base64
'''
FSMs for AP:
* Discover WLC
* Establish DTLS session
* Join WLC
* Add client (station)
* Shutdown DTLS session
* Maintenance (arp, ping, capwap echo request, fetches rx and dispatches to rx_buffer of APs)
'''
class ServiceBufferedCtx(ServiceCtx):
''' Same as parent, but does not use capture to get packets, uses AP's rx_buffer '''
def _run(self, services):
self._reset()
self._add(services)
if len(self.filters) > 1:
raise Exception('Services here should have one common filter per AP')
self.filter = list(self.filters.values())[0]['inst']
if not hasattr(self.filter, 'services_per_ap'):
raise Exception('Services here should have filter with attribute services_per_ap, got %s, type: %s' % (self.filter, type(self.filter)))
# create an environment
self.env = simpy.rt.RealtimeEnvironment(factor = 1, strict = False)
self.tx_buffer = TXBuffer(self.env, self.client, self.port, 99, 1)
# create processes
for service in self.services:
pipe = self._pipe()
self.services[service]['pipe'] = pipe
p = self.env.process(service.run(pipe))
self._on_process_create(p)
try:
tick_process = self.env.process(self._tick_process())
self.env.run(until = tick_process)
finally:
self._reset()
def _tick_process (self):
while True:
self.tx_buffer.send_all()
for ap, services in self.filter.services_per_ap.items():
for _ in range(len(ap.rx_buffer)):
try:
scapy_pkt = ap.rx_buffer.popleft()
except IndexError:
break
for service in services:
self.services[service]['pipe']._on_rx_pkt(scapy_pkt, None)
# if no other process exists - exit
if self.is_done():
return
else:
# backoff
yield self.env.timeout(0.05)
'''
Just assign services to AP, it will get packets from AP's rx_buffer
'''
class ServiceFilterPerAp(ServiceFilter):
def __init__(self):
self.services_per_ap = {}
def add(self, service):
if service.ap in self.services_per_ap:
self.services_per_ap[service.ap].append(service)
else:
self.services_per_ap[service.ap] = [service]
'''
Used to fetch RX packets for all APs
Decrypts them if possible
Sends echo request (control keep alive)
Answers to async config changes
Does not use SimPy
'''
class ServiceApBgMaintenance:
bpf_filter = ('arp or (ip and (icmp or udp src port 5246 or ' # arp, ping, capwap control
+ '(udp src port 5247 and (udp[11] & 8 == 8 or ' # capwap data keep-alive
+ 'udp[16:2] == 16 or ' # client assoc. resp
+ 'udp[48:2] == 2054 or ' # client arp
+ '(udp[48:2] == 2048 and udp[59] == 1)))))') # client ping
ARP_ETHTYPE = b'\x08\x06'
IP_ETHTYPE = b'\x08\x00'
ICMP_PROTO = b'\x01'
UDP_PROTO = b'\x11'
CAPWAP_CTRL_PORT = b'\x14\x7e'
CAPWAP_DATA_PORT = b'\x14\x7f'
WLAN_ASSOC_RESP = b'\x00\x10'
ARP_REQ = b'\x00\x01'
ARP_REP = b'\x00\x02'
ICMP_REQ = b'\x08'
def __init__(self, ap_mngr, port_id):
self.ap_mngr = ap_mngr
self.port_id = port_id
self.ap_per_ip = {}
self.client_per_mac = {}
self.client_per_ip = {}
self.bg_client = self.ap_mngr.bg_client
self.port = self.bg_client.ports[port_id]
self.capture_id = None
self.bg_thread = None
self.send_pkts = []
################
# API #
################
def run(self):
self.bg_thread = threading.Thread(target = self.main_loop_wrapper)
self.bg_thread.name = 'BG Thread (port %s)' % self.port_id
self.bg_thread.daemon = True
self.bg_thread.start()
def is_running(self):
return self.bg_thread and self.bg_thread.is_alive()
def stop(self):
capture_id = self.capture_id
self.capture_id = None
try:
self.bg_client.stop_capture(capture_id)
except:
pass
##################
# INTERNAL #
##################
def AP_ARP_RESP_TEMPLATE(self, src_mac, dst_mac, src_ip, dst_ip):
return (
dst_mac + src_mac + self.ARP_ETHTYPE + # Ethernet
b'\x00\x01\x08\x00\x06\x04\x00\x02' + src_mac + src_ip + dst_mac + dst_ip # ARP
)
def log(self, msg, level = Logger.VERBOSES['warning']):
if not msg.startswith('(WLC) '):
msg = '(WLC) %s' % msg
self.ap_mngr.trex_client.logger.async_log('\n' + bold(msg), level)
def err(self, msg):
self.log(msg, Logger.VERBOSES['error'])
def fatal(self, msg):
self.log(msg, Logger.VERBOSES['critical'])
self.stop()
def send(self, pkts):
assert type(pkts) is list
push_pkts = [{'binary': base64.b64encode(bytes(p) if isinstance(p, Ether) else p).decode(),
'use_port_dst_mac': False,
'use_port_src_mac': False} for p in pkts]
rc = self.port.push_packets(push_pkts, False, ipg_usec = 1)
#if not rc:
# self.err(rc.err())
def recv(self):
pkts = []
self.bg_client.fetch_capture_packets(self.capture_id, pkts, 10000)
if len(pkts) > 9995:
self.err('Too much packets in rx queue (%s)' % len(pkts))
return pkts
def shutdown_ap(self, ap):
try:
for client in ap.clients:
client.disconnect()
if ap.is_dtls_established:
with ap.ssl_lock:
libssl.SSL_shutdown(ap.ssl)
tx_pkt = ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.ssl_read())
self.send([tx_pkt])
finally:
ap.reset_vars()
def main_loop_wrapper(self):
err_msg = ''
self.capture_id = self.bg_client.start_capture(rx_ports = self.port_id, bpf_filter = self.bpf_filter, limit = 10000)['id']
try:
#with Profiler_Context(20):
self.main_loop()
except KeyboardInterrupt:
pass
except Exception as e:
if self.capture_id: # if no id -> got stop()
if not isinstance(e, STLError):
import traceback
traceback.print_exc()
err_msg = ' (Exception: %s)' % e
finally:
if not self.capture_id:
return
try:
self.bg_client.stop_capture(self.capture_id)
except:
pass
if self.port_id in self.ap_mngr.service_ctx:
if self.ap_per_ip:
self.err('Background thread on port %s died%s. Disconnecting APs.' % (self.port_id, err_msg))
else:
self.err('Background thread on port %s died%s.' % (self.port_id, err_msg))
for ap in self.ap_per_ip.values():
self.shutdown_ap(ap)
def handle_ap_arp(self, rx_bytes):
src_ip = rx_bytes[28:32]
dst_ip = rx_bytes[38:42]
if src_ip == dst_ip: # GARP
return
if dst_ip not in self.ap_per_ip: # check IP
return
ap = self.ap_per_ip[dst_ip]
src_mac = rx_bytes[6:12]
dst_mac = rx_bytes[:6]
if dst_mac not in (b'\xff\xff\xff\xff\xff\xff', ap.mac_bytes): # check MAC
ap.err('Bad MAC (%s) of AP %s' % (str2mac(dst_mac), ap.name))
return
if ap.is_debug:
ap.debug('AP %s got ARP' % ap.name)
Ether(rx_bytes).show2()
if rx_bytes[20:22] == self.ARP_REQ: # 'who-has'
tx_pkt = self.AP_ARP_RESP_TEMPLATE(
src_mac = ap.mac_bytes,
dst_mac = src_mac,
src_ip = dst_ip,
dst_ip = src_ip,
)
#Ether(tx_pkt).show2()
self.send_pkts.append(tx_pkt)
elif rx_bytes[20:22] == self.ARP_REP: # 'is-at'
# ap.rx_buffer.append(Ether(rx_bytes))
if src_ip == ap.wlc_ip_bytes:
ap.mac_dst_bytes = src_mac
ap.mac_dst = str2mac(src_mac)
def handle_ap_icmp(self, rx_bytes, ap):
rx_pkt = Ether(rx_bytes)
icmp_pkt = rx_pkt[ICMP]
if icmp_pkt.type == 8: # echo-request
#print 'Ping to AP!'
#rx_pkt.show2()
if rx_pkt[IP].dst == ap.ip: # ping to AP
tx_pkt = rx_pkt.copy()
tx_pkt.src, tx_pkt.dst = tx_pkt.dst, tx_pkt.src
tx_pkt[IP].src, tx_pkt[IP].dst = tx_pkt[IP].dst, tx_pkt[IP].src
tx_pkt[ICMP].type = 'echo-reply'
del tx_pkt[ICMP].chksum
#tx_pkt.show2()
self.send_pkts.append(tx_pkt)
#elif icmp_pkt.type == 0: # echo-reply
# ap.rx_buffer.append(rx_pkt)
def process_capwap_ctrl(self, rx_bytes, ap):
ap.info('Got CAPWAP CTRL at AP %s' % ap.name)
if ap.is_debug:
rx_pkt = Ether(rx_bytes)
rx_pkt.show2()
rx_pkt.dump_offsets_tree()
if not ap.is_dtls_established:
if rx_bytes[42:43] == b'\0': # discovery response
capwap_bytes = rx_bytes[42:]
capwap_hlen = (struct.unpack('!B', capwap_bytes[1:2])[0] & 0b11111000) >> 1
ctrl_header_type = struct.unpack('!B', capwap_bytes[capwap_hlen+3:capwap_hlen+4])[0]
if ctrl_header_type != 2:
return
ap.mac_dst_bytes = rx_bytes[6:12]
ap.mac_dst = str2mac(ap.mac_dst_bytes)
ap.wlc_ip_bytes = rx_bytes[26:30]
ap.ip_dst = str2ip(ap.wlc_ip_bytes)
result_code = CAPWAP_PKTS.parse_message_elements(capwap_bytes, capwap_hlen, ap, self.ap_mngr)
ap.rx_responses[2] = result_code
elif rx_bytes[42:43] == b'\1': # dtls handshake
ap.rx_buffer.append(rx_bytes)
return
is_dtls = struct.unpack('?', rx_bytes[42:43])[0]
if not is_dtls: # dtls is established, ctrl should be encrypted
return
if (rx_bytes[46:47] == b'\x15'): # DTLS alert
ap.is_dtls_closed = True
ap.is_connected = False
self.err("Server sent DTLS alert to AP '%s'." % ap.name)
rx_pkt_buf = ap.decrypt(rx_bytes[46:])
if not rx_pkt_buf:
return
if rx_pkt_buf[0:1] not in (b'\0', b'\1'): # definitely not CAPWAP... should we debug it?
ap.debug('Not CAPWAP, skipping: %s' % hex(rx_pkt_buf))
return
#rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
ap.last_recv_ts = time.time()
if ap.is_debug:
rx_pkt.show2()
capwap_assemble = ap.capwap_assemble
if struct.unpack('!B', rx_pkt_buf[3:4])[0] & 0x80: # is_fragment
rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
if capwap_assemble:
assert ap.capwap_assemble['header'].fragment_id == rx_pkt.header.fragment_id, 'Got CAPWAP fragments with out of order (different fragment ids)'
control_str = bytes(rx_pkt[CAPWAP_Control_Header_Fragment])
if rx_pkt.header.fragment_offset * 8 != len(capwap_assemble['buf']):
self.err('Fragment offset and data length mismatch')
capwap_assemble.clear()
return
#if rx_pkt.header.fragment_offset * 8 > len(capwap_assemble['buf']):
# print('Fragment offset: %s, data so far length: %s (not enough data)' % (rx_pkt.header.fragment_offset, len(capwap_assemble['buf'])))
#elif rx_pkt.header.fragment_offset * 8 < len(capwap_assemble['buf']):
# capwap_assemble['buf'] = capwap_assemble['buf'][:rx_pkt.header.fragment_offset * 8]
capwap_assemble['buf'] += control_str
if rx_pkt.is_last_fragment():
capwap_assemble['assembled'] = CAPWAP_CTRL(
header = capwap_assemble['header'],
control_header = CAPWAP_Control_Header(capwap_assemble['buf'])
)
else:
if rx_pkt.is_last_fragment():
self.err('Got CAPWAP first fragment that is also last fragment!')
return
if rx_pkt.header.fragment_offset != 0:
rx_pkt.show2()
self.err('Got out of order CAPWAP fragment, does not start with zero offset')
return
capwap_assemble['header'] = rx_pkt.header
capwap_assemble['header'].flags &= ~0b11000
capwap_assemble['buf'] = bytes(rx_pkt[CAPWAP_Control_Header_Fragment])
capwap_assemble['ap'] = ap
elif capwap_assemble:
self.err('Got not fragment in middle of assemble of fragments (OOO).')
capwap_assemble.clear()
else:
capwap_assemble['assembled'] = rx_pkt_buf
rx_pkt_buf = capwap_assemble.get('assembled')
if not rx_pkt_buf or rx_pkt_buf[0:1] != b'\0':
return
capwap_assemble.clear()
#rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
#rx_pkt.show2()
#rx_pkt.dump_offsets_tree()
if ap.is_debug:
CAPWAP_CTRL(rx_pkt_buf).show2()
capwap_hlen = (struct.unpack('!B', rx_pkt_buf[1:2])[0] & 0b11111000) >> 1
ctrl_header_type = struct.unpack('!B', rx_pkt_buf[capwap_hlen+3:capwap_hlen+4])[0]
if ctrl_header_type == 7: # Configuration Update Request
#rx_pkt.show2()
CAPWAP_PKTS.parse_message_elements(rx_pkt_buf, capwap_hlen, ap, self.ap_mngr) # get info from incoming packet
seq = struct.unpack('!B', rx_pkt_buf[capwap_hlen+4:capwap_hlen+5])[0]
tx_pkt = ap.get_config_update_capwap(seq)
if ap.is_debug:
CAPWAP_CTRL(tx_pkt.value).show2()
self.send_pkts.append(ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.encrypt(tx_pkt)))
elif ctrl_header_type == 14: # Echo Response
ap.echo_resp_timer = None
elif ctrl_header_type == 17: # Reset Request
self.err('AP %s got Reset request, shutting down' % ap.name)
#self.send_pkts.append(ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.encrypt(tx_pkt)))
self.shutdown_ap(ap)
elif ctrl_header_type in (4, 6, 12):
result_code = CAPWAP_PKTS.parse_message_elements(rx_pkt_buf, capwap_hlen, ap, self.ap_mngr)
ap.rx_responses[ctrl_header_type] = result_code
else:
rx_pkt.show2()
ap.err('Got unhandled capwap header type: %s' % ctrl_header_type)
def handle_client_arp(self, dot11_bytes, ap):
ip = dot11_bytes[58:62]
client = self.client_per_ip.get(ip)
if not client:
return
if client.ap is not | |
labels = list()
preds_model = list()
preds_enr = list()
for batch_idx, (input_rss, slf_enr, target_slf) in enumerate(test_loader):
input_rss = input_rss.to(self.device)
slf_enr = slf_enr.to(self.device)
target_slf = target_slf.to(self.device)
slf_recon = model(input_rss)
# test SLF image RMSE
slf_rmse_model += F.mse_loss(slf_recon, target_slf, reduction='sum').item() / (self.args.K0 * self.args.K1)
slf_rmse_enr += F.mse_loss(slf_enr, target_slf, reduction='sum').item() / (self.args.K0 * self.args.K1)
# test SLF image MAE
slf_mae_model += F.l1_loss(slf_recon, target_slf, reduction='sum').item() / (self.args.K0 * self.args.K1)
slf_mae_enr += F.l1_loss(slf_enr, target_slf, reduction='sum').item() / (self.args.K0 * self.args.K1)
labels.append(target_slf.detach().cpu().numpy())
preds_model.append(slf_recon.detach().cpu().numpy())
preds_enr.append(slf_enr.detach().cpu().numpy())
slf_rmse_model /= len(test_loader.dataset)
slf_rmse_model = np.sqrt(slf_rmse_model)
slf_mae_model /= len(test_loader.dataset)
slf_rmse_enr /= len(test_loader.dataset)
slf_rmse_enr = np.sqrt(slf_rmse_enr)
slf_mae_enr /= len(test_loader.dataset)
print('====> Test Model:')
print('====> Noise Level: ' + noise_level)
print('====> Recon SLF rmse: {:.6f}'.format(slf_rmse_model))
print('====> ENR SLF rmse: {:.6f}'.format(slf_rmse_enr))
print('====> Recon SLF mae: {:.6f}'.format(slf_mae_model))
print('====> ENR SLF mae: {:.6f}'.format(slf_mae_enr))
## revise part for manual ploting roc curve
labels = np.concatenate(labels, axis=0)
np.save('results/compare/ROC_data/labels_' + noise_level + '.npy', labels)
preds_model = np.concatenate(preds_model, axis=0)
np.save('results/compare/ROC_data/slf_estimator_preds_'+noise_level+'.npy', preds_model)
preds_enr = np.concatenate(preds_enr, axis=0)
np.save('results/compare/ROC_data/enr_preds_' + noise_level + '.npy', preds_enr)
# # code for auto ploting roc curve
# labels = np.concatenate(labels, axis=0).reshape(-1)
# preds1 = np.concatenate(preds1, axis=0).reshape(-1)
# preds2 = np.concatenate(preds2, axis=0).reshape(-1)
# labels[labels > 0] = 1
# save_path1 = self.args.run + 'ENR/' + 'roc_' + noise_level + '.npz'
# save_path2 = self.path + 'roc_' + noise_level + '.npz'
# fpr, tpr, thresholds = roc_curve(labels, preds1, pos_label=1)
# np.savez(save_path1, fpr=fpr, tpr=tpr, thresholds=thresholds)
# fpr, tpr, thresholds = roc_curve(labels, preds2, pos_label=1)
# np.savez(save_path2, fpr=fpr, tpr=tpr, thresholds=thresholds)
def estimate(self, noise_level='low'):
if noise_level != 'low' and noise_level != 'mid' and noise_level != 'high' :
raise NotImplementedError('Noise level {} not understood.'.format(noise_level))
model = SLF_Estimator(M=self.args.M, P=self.args.P, K=(self.args.K0, self.args.K1))
model = model.to(self.device)
model_path = self.path + 'model_' + str(self.args.n_epochs) + '.pth'
model.load_state_dict(torch.load(model_path, map_location=self.device))
model.eval()
test_loader = self.load_data_metrics(noise_level)
path_clean = 'results/compare/YOLO/Origin/images/clean/'+noise_level+'/'
path_enr = 'results/compare/YOLO/Origin/images/enr/' + noise_level + '/'
path_model = 'results/compare/YOLO/Origin/images/estimator/' + noise_level + '/'
if not os.path.exists(path_clean):
os.makedirs(path_clean)
if not os.path.exists(path_enr):
os.makedirs(path_enr)
if not os.path.exists(path_model):
os.makedirs(path_model)
print('====> Test Model with estimation:')
print('====> Noise Level: ' + noise_level)
thr = 0.4
# test model
for batch_idx, (input_rss, slf_enr, target_slf) in enumerate(test_loader):
input_rss = input_rss.to(self.device)
slf_enr = slf_enr.to(self.device)
target_slf = target_slf.to(self.device)
slf_recon = model(input_rss)
target_slf = target_slf.detach().cpu().numpy()
slf_enr = slf_enr.detach().cpu().numpy()
slf_recon = slf_recon.detach().cpu().numpy()
for idx in range(len(target_slf)):
clean_img = target_slf[idx, 0, :, :]
enr_img = slf_enr[idx, 0, :, :]
# enr_img[enr_img > thr] = 1
# enr_img[enr_img <= thr] = 0
estimator_img = slf_recon[idx, 0, :, :]
# estimator_img[estimator_img > thr] = 1
# estimator_img[estimator_img <= thr] = 0
clean_img = cv2.resize(clean_img, (320, 320), interpolation=cv2.INTER_NEAREST)*255
enr_img = cv2.resize(enr_img, (320, 320), interpolation=cv2.INTER_NEAREST)*255
estimator_img = cv2.resize(estimator_img, (320, 320), interpolation=cv2.INTER_NEAREST)*255
cv2.imwrite(path_clean + 'im'+str(idx+batch_idx*self.args.batch_size+1) + '.jpg', clean_img)
cv2.imwrite(path_enr + 'im' + str(idx+batch_idx*self.args.batch_size+1) +'.jpg', enr_img)
cv2.imwrite(path_model + 'im' + str(idx+batch_idx*self.args.batch_size+1) +'.jpg', estimator_img)
# image = cv2.imread(path_enr + 'im' + str(1) +'.jpg', cv2.IMREAD_GRAYSCALE)
# print(np.amax(image))
# print(np.shape(image))
# break
# runner for SLF-VAE
class SLF_VAE_runner():
def __init__(self, args):
self.args = args
cuda_flag = not self.args.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if cuda_flag else "cpu")
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
self.path = self.args.run + 'SLF_VAE/'
if not os.path.exists(self.path):
os.makedirs(self.path)
def get_optimizer(self, parameters):
if self.args.optimizer == 'Adam':
return optim.Adam(parameters, lr=self.args.lr, weight_decay=self.args.weight_decay, betas=(0.9, 0.999))
elif self.args.optimizer == 'RMSProp':
return optim.RMSprop(parameters, lr=self.args.lr, weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'SGD':
return optim.SGD(parameters, lr=self.args.lr, momentum=0.9)
else:
raise NotImplementedError('Optimizer {} not understood.'.format(self.args.optimizer))
def load_data(self, noise_level='all'):
# Load training data
training_data = sio.loadmat('./data/training_data.mat')
# normalized noisy RSS measurement input (shape: [num_sample, NxPxP])
train_RSS = training_data['RSS']
RSS_min = np.amin(train_RSS, axis=0, keepdims=True)
RSS_max = np.amax(train_RSS, axis=0, keepdims=True)
train_RSS = (train_RSS - RSS_min) / (RSS_max - RSS_min)
train_RSS = train_RSS.reshape(-1, self.args.N, self.args.P, self.args.P)
# SLF image (shape: [num_sample, K0, K1])
train_slf_img = training_data['slf_img']
train_slf_img = train_slf_img.reshape(-1, 1, self.args.K0, self.args.K1)
# SLF image ENR (shape: [num_sample, K0, K1])
train_slf_img_ENR = training_data['slf_img_ENR']
train_slf_img_ENR = train_slf_img_ENR.reshape(-1, 1, self.args.K0, self.args.K1)
train_RSS = torch.from_numpy(train_RSS).float()
train_slf_img = torch.from_numpy(train_slf_img).float()
train_slf_img_ENR = torch.from_numpy(train_slf_img_ENR).float()
dataset = TensorDataset(train_RSS, train_slf_img_ENR, train_slf_img)
train_size = int(len(dataset) * 0.9)
val_size = int(len(dataset) * 0.1)
dataset_train, dataset_val = random_split(dataset, [train_size, val_size],
generator=torch.Generator().manual_seed(self.args.seed))
# Load testing data
testing_data = sio.loadmat('./data/testing_data.mat')
# noise level classes (shape: [num_sample, 1]) (classes: 0, 1, 2)
noise_class = testing_data['sig_epsilon_class']
noise_class = np.squeeze(noise_class)
idx = (noise_class == 0) | (noise_class == 1) | (noise_class == 2)
if noise_level == 'all':
pass
elif noise_level == 'low':
idx = (noise_class == 0)
elif noise_level == 'mid':
idx = (noise_class == 1)
elif noise_level == 'high':
idx = (noise_class == 2)
else:
raise NotImplementedError('Noise level {} not understood.'.format(noise_level))
# normalized noisy RSS measurement input (shape: [num_sample, NxPxP])
test_RSS = testing_data['RSS'][idx]
test_RSS = (test_RSS - RSS_min) / (RSS_max - RSS_min)
test_RSS = test_RSS.reshape(-1, self.args.N, self.args.P, self.args.P)
# SLF image (shape: [num_sample, K0, K1])
test_slf_img = testing_data['slf_img'][idx]
test_slf_img = test_slf_img.reshape(-1, 1, self.args.K0, self.args.K1)
# SLF image ENR (shape: [num_sample, K0, K1])
test_slf_img_ENR = testing_data['slf_img_ENR'][idx]
test_slf_img_ENR = test_slf_img_ENR.reshape(-1, 1, self.args.K0, self.args.K1)
test_RSS = torch.from_numpy(test_RSS).float()
test_slf_img = torch.from_numpy(test_slf_img).float()
test_slf_img_ENR = torch.from_numpy(test_slf_img_ENR).float()
dataset_test = TensorDataset(test_RSS, test_slf_img_ENR, test_slf_img)
train_loader = DataLoader(
dataset_train,
batch_size=self.args.batch_size, shuffle=True, num_workers=2)
val_loader = DataLoader(
dataset_val,
batch_size=self.args.batch_size, shuffle=True, num_workers=2)
test_loader = DataLoader(
dataset_test,
batch_size=self.args.batch_size, shuffle=False, num_workers=2)
print("Data Loaded!")
return train_loader, val_loader, test_loader
def train(self, model, train_loader, optimizer, epoch):
model.train()
train_loss = 0 # total loss
train_recon_loss = 0 # recon loss
train_kld = 0 # kld loss
train_rmse1 = 0 # rmse for enr
train_rmse2 = 0 # rmse for our model
for batch_idx, (input_rss, slf_coarse, target_slf) in enumerate(train_loader):
input_rss = input_rss.to(self.device)
slf_coarse = slf_coarse.to(self.device)
target_slf = target_slf.to(self.device)
optimizer.zero_grad()
mu, logvar, slf_recon = model.train_model(input_rss, target_slf, slf_coarse)
recon_loss, kld = model.train_loss(mu, logvar, slf_recon, target_slf)
loss = self.args.lambda0 * kld + self.args.lambda1 * recon_loss
train_loss += loss.item()
train_recon_loss += recon_loss.item()
train_kld += kld.item()
mse1 = F.mse_loss(slf_coarse, target_slf, reduction='sum')/(self.args.K0*self.args.K1)
mse2 = F.mse_loss(slf_recon, target_slf, reduction='sum') / (self.args.K0 * self.args.K1)
train_rmse1 += mse1.item()
train_rmse2 += mse2.item()
loss /= input_rss.size(0)
loss.backward()
optimizer.step()
if batch_idx % self.args.log_interval == 0:
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tRecon_Loss: {:.6f}\tKLD: {:.6f}'.format(
epoch, batch_idx * len(input_rss), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item(), recon_loss.item() / len(input_rss),
kld.item() / len(input_rss)))
train_loss /= len(train_loader.dataset)
train_recon_loss /= len(train_loader.dataset)
train_kld /= len(train_loader.dataset)
train_rmse1 /= len(train_loader.dataset)
train_rmse1 = np.sqrt(train_rmse1)
train_rmse2 /= len(train_loader.dataset)
train_rmse2 = np.sqrt(train_rmse2)
print('====> Epoch: {} Average loss: {:.6f}'.format(epoch, train_loss))
print('====> Recon loss: {:.6f}'.format(train_recon_loss))
print('====> KLD: {:.6f}'.format(train_kld))
print('====> RMSE for ENR: {:.6f}'.format(train_rmse1))
print('====> RMSE for our model: {:.6f}'.format(train_rmse2))
return train_loss, train_kld, train_recon_loss, train_rmse1, train_rmse2
def validate(self, model, val_loader):
model.eval()
val_recon_loss = 0 # recon loss
val_rmse1 = 0 # rmse for ENR
val_rmse2 = 0 # rmse for our model
for batch_idx, (input_rss, slf_coarse, target_slf) in enumerate(val_loader):
input_rss = input_rss.to(self.device)
slf_coarse = slf_coarse.to(self.device)
target_slf = target_slf.to(self.device)
slf_recon = model.test_model(input_rss, slf_coarse)
recon_loss = model.test_loss(slf_recon, target_slf)
val_recon_loss += recon_loss.item()
mse1 = F.mse_loss(slf_coarse, target_slf, reduction='sum')/(self.args.K0*self.args.K1)
mse2 = F.mse_loss(slf_recon, target_slf, reduction='sum')/(self.args.K0*self.args.K1)
val_rmse1 += mse1.item()
val_rmse2 += mse2.item()
val_recon_loss /= len(val_loader.dataset)
val_rmse1 /= len(val_loader.dataset)
val_rmse1 = np.sqrt(val_rmse1)
val_rmse2 /= len(val_loader.dataset)
val_rmse2 = np.sqrt(val_rmse2)
print('====> Recon loss: {:.6f}'.format(val_recon_loss))
print('====> RMSE for ENR: {:.6f}'.format(val_rmse1))
print('====> RMSE for our model: {:.6f}'.format(val_rmse2))
return val_recon_loss, val_rmse1, val_rmse2
def test(self, model, test_loader):
model.eval()
test_recon_loss = 0 # recon loss
test_rmse1 = 0 # rmse for ENR
test_rmse2 = 0 # rmse for our model
for batch_idx, (input_rss, slf_coarse, target_slf) in enumerate(test_loader):
input_rss = input_rss.to(self.device)
slf_coarse = slf_coarse.to(self.device)
target_slf = target_slf.to(self.device)
slf_recon = model.test_model(input_rss, slf_coarse)
recon_loss = model.test_loss(slf_recon, target_slf)
test_recon_loss += recon_loss.item()
mse1 = F.mse_loss(slf_coarse, target_slf, reduction='sum') / (self.args.K0 * self.args.K1)
mse2 = F.mse_loss(slf_recon, target_slf, reduction='sum') / (self.args.K0 * self.args.K1)
test_rmse1 += mse1.item()
test_rmse2 += mse2.item()
test_recon_loss /= len(test_loader.dataset)
test_rmse1 /= len(test_loader.dataset)
test_rmse1 = np.sqrt(test_rmse1)
test_rmse2 /= len(test_loader.dataset)
test_rmse2 = np.sqrt(test_rmse2)
print('====> Recon loss: {:.6f}'.format(test_recon_loss))
print('====> RMSE for ENR: {:.6f}'.format(test_rmse1))
print('====> RMSE for our model: {:.6f}'.format(test_rmse2))
return test_recon_loss, test_rmse1, test_rmse2
def train_save(self):
model = SLF_VAE(M=self.args.M, P=self.args.P, K=(self.args.K0, self.args.K1))
model = model.to(self.device)
train_loader, val_loader, test_loader = self.load_data()
optimizer = self.get_optimizer(model.parameters())
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.2, patience=8, min_lr=1e-5)
train_set = []
val_set = []
test_set = []
train_loss_path = self.path + 'train_loss_' + str(self.args.n_epochs) + '.npy'
val_loss_path = self.path + 'val_loss_' + str(self.args.n_epochs) + '.npy'
test_loss_path = self.path | |
<reponame>TuxML/tuxml<gh_stars>1-10
#!/usr/bin/python3
import argparse
import os
import shutil
import subprocess
import bz2
import json
from compilation.apiManager import APIManager
from compilation.environment import get_environment_details, print_environment_details
from compilation.configuration import create_configuration, print_configuration
from compilation.package_manager import PackageManager
from compilation.logger import Logger, COLOR_SUCCESS, COLOR_ERROR
from compilation.compiler import Compiler
from compilation.boot_checker import BootChecker
from compilation.database_management import fetch_connection_to_database, insert_if_not_exist_and_fetch_hardware, insert_if_not_exist_and_fetch_software, insert_and_fetch_compilation, insert_incrementals_compilation, insert_boot_result, insert_sizes
import compilation.settings as settings
## parser
# @author <NAME>
# @version 1
# @brief Parse the commandline and return the parsed argument.
def parser():
"""Parse the commandline argument
:return: object in which each attribute is one argument and its\
value. Check\
`argparse <https://docs.python.org/3/library/argparse.html>`_\
for more info.
:rtype: `argparse.Namespace`_
.. _argparse.Namespace: https://docs.python.org/3.8/library/argparse.html#argparse.Namespace
"""
parser = argparse.ArgumentParser(
description="" # TODO: Fill the description
)
parser.add_argument(
"incremental",
type=int,
help="Optional. Provide the number of additional incremental "
"compilation. Have to be 0 or over.",
nargs='?',
default=0
)
parser.add_argument(
"-s", "--silent",
action="store_true",
help="Prevent printing on standard output when compiling."
)
parser.add_argument(
"--tiny",
action="store_true",
help="Use Linux tiny configuration. Incompatible with --config "
"argument."
)
parser.add_argument(
"--config",
help="Give a path to specific configuration file. Incompatible with "
"--tiny argument."
)
parser.add_argument(
"--clang_version",
type=int,
help="clang version to use. Only versions 9 and 11 are supported. "
"May not work with all images (right now works only for Docker image with gcc10). 0 to use GCC, which is default.",
default=0
)
parser.add_argument(
"--cpu_cores",
help="Give the number of cpu cores to use. Default to 0, which mean all"
" the cores.",
default=0
)
parser.add_argument(
"--boot",
action="store_true",
help="Optional. Try to boot the kernel after compilation if the compilation "
"has been successful."
)
parser.add_argument(
"--check_size",
action="store_true",
help="Optional. Compute additional size measurements on the kernel and send "
"the results to the 'sizes' table (can be heavy)."
)
parser.add_argument(
"--json",
action="store_true",
help="Serialize into a JSON file with informations about the build."
)
parser.add_argument(
"--mount_host_dev",
action="store_true",
help="Should be used for development only. Enables to use local source code without regenerating Docker images."
)
parser.add_argument(
"--tagbuild",
type=str,
nargs="*",
default=None,
help="Optional. Enables to tag a compilation or a set of compilations (with a string)"
)
return parser.parse_args()
## create_logger
# @author <NAME>
# @version 1
# @brief Create the logger object and return it.
def create_logger(silent):
"""Creates an object logger
:return: the created object logger
:rtype: `Logger`_
.. _Logger: logger.html
"""
return Logger(
settings.OUTPUT_FILE,
settings.STDOUT_FILE,
settings.STDERR_FILE,
settings.BOOT_FILE,
silent
)
## retrieve_and_display_environment
# @author <NAME>
# @version 1
# @brief Retrieve and display the environment dictionary.
def retrieve_and_display_environment(logger, clang_version=0):
"""Retrieve and display the environment details
:param logger: the logger
:type logger: `Logger`_
:param clang_version: clang compiler version (if any)
:type clang_version: int (0: gcc and no clang; 9 or 11 supported right now)
:return: the environment
:rtype: dict
"""
logger.timed_print_output("Getting environment details.")
environment = get_environment_details(clang_version)
print_environment_details(environment, logger.print_output)
return environment
## retrieve_and_display_configuration
# @author <NAME>
# @version 1
# @brief Retrieve and display the configuration dictionary.
def retrieve_and_display_configuration(logger, args):
"""Retrieve and display configuration details (of the machine)
:param logger: the logger
:type logger: `Logger`_
:param args: parsed arguments
:type args: `argparse.Namespace`_
:return: configuration info
:rtype: dict
"""
logger.timed_print_output("Getting configuration details.")
configuration = create_configuration(int(args.cpu_cores), args.incremental != 0)
print_configuration(configuration, logger.print_output)
return configuration
## retrieve_sizes
# @author <NAME>
# @version 1
# @brief Retrieve the additional sizes with more specific commands
def retrieve_sizes(path, kernel_version):
"""Retrieve additional sizes
:param path: path to the compiled Linux kernel
:type path: str
:param kernel_version: version of the compiled Linux kernel to\
retrieve the size from
:type kernel_version: str
:return: info about the retrieved sizes
:rtype: dict
"""
sizes_result = {}
sizes_result['size_vmlinux'] = subprocess.run(['size {}/vmlinux'.format(path)], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
# too much (deprecated)
# sizes_result['nm_size_vmlinux'] = bz2.compress(
# subprocess.run(["nm --size -r {}/vmlinux | sed 's/^[0]*//'".format(path)], shell=True, stdout=subprocess.PIPE).stdout)
kversion = kernel_version.split(".") # eg 4.16 will give [4, 16]
major = int(kversion[0]) # 4
if len(kversion) >= 2:
minor = int(kversion[1]) # 16
else:
minor = 0
builtin=None
if (major == 4 and minor >= 17) or major == 5: # see https://github.com/TuxML/ProjetIrma/issues/180 and https://gitlab.javinator9889.com/Javinator9889/thdkernel/commit/f49821ee32b76b1a356fab17316eb62430182ecf
builtin="built-in.a"
else:
builtin="built-in.o"
# size_report should be preferred (some might be deprecated in the future)
# limitation: does not include totals per built-in (deprecated)
# sizebuilt1 = subprocess.run(['size {}/*/{}'.format(path, builtin)], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
# sizebuilt2 = subprocess.run(['size {}/arch/*/{}'.format(path, builtin)], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
# sizes_result['size_builtin'] = sizebuilt1 + "\n" + sizebuilt2
# two arguments for the bash scripts: path and kind of built-in (.a or .o)
sizes_result['size_report_builtin'] = subprocess.run(['bash {} {} {}'.format(settings.SIZE_REPORT_FILE, path, builtin)], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8') # full report
sizes_result['size_report_builtin_coarse'] = subprocess.run(['bash {} {} {}'.format(settings.SIZE_REPORT_COARSE_FILE, path, builtin)], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8') # coarse grained report (rough summary)
return sizes_result
## run
# @author <NAME>
# @version 1
# @brief Do all the test, from compilation to sending the result to the database
# @details It does all the job, but for one and only one compilation. Therefore,
# it should be called multiple time for multiple compilation.
def run(boot, check_size, logger, configuration, environment,
package_manager, tiny=False, config_file=None,
cid_before=None, json_bool=False, clang_version=0, tagbuild=None):
"""Do all the tests, from compilation to sending the results to the
database.
It does all the job, but for one and only one
compilation. Therefore, it should be called multiple time for
multiple compilation.
:param boot: boot the compiled kernel
:type boot: bool
:param check_size: check the size of the compiled kernel
:type check_size: bool
:param logger: logger
:type logger: `Logger`_
:param configuration: configuration info (See\
:py:func:`retrieve_and_display_configuration`)
:type configuration: dict
:param environment: environment info (See\
:py:func:`retrieve_and_display_environment`)
:type environment: dict
:param package_manager: package manager
:type package_manager: `PackageManager <package_manager.html>`_
:param tiny: use a tiny configuration or not
:type tiny: bool
:param config_file: path to a configuration file
:type config_file: str
:param cid_before:
:type cid_before:
:param clang_version: Clang version to use. 0 to use GCC. Only 9 and 11 are
supported on Debian 11.
:type clang_version: int
:type tag: str
"""
compiler_exec = 'gcc'
if clang_version == 9:
compiler_exec = 'clang-9'
elif clang_version == 11:
compiler_exec = 'clang'
compiler = Compiler(
logger=logger,
package_manager=package_manager,
nb_core=configuration['core_used'],
kernel_path=configuration['kernel_path'],
kernel_version=configuration['kernel_version_compilation'],
tiny=tiny,
config_file=config_file,
compiler_exec=compiler_exec
)
compiler.run()
compilation_result = compiler.get_compilation_dictionary()
environmenthard = environment['hardware']
environmentsoft = environment["software"]
boot_result = None
# by default size report is not performed
sizes_result = {'size_vmlinux': -2, 'size_report_builtin': None, 'size_report_builtin_coarse': None}
if compiler.is_successful():
if check_size:
sizes_result = retrieve_sizes(configuration['kernel_path'], configuration['kernel_version_compilation'])
if boot:
boot_checker = BootChecker(logger, configuration['kernel_path'])
boot_checker.run()
boot_result = boot_checker.get_boot_dictionary()
else:
logger.reset_boot_pipe()
cid = 0
tagbuild_str = ""
if tagbuild:
tagbuild_str = ' '.join(tagbuild)
configfile = open("{}/.config".format(compiler.get_kernel_path()), "r").read()
json_data = {'cid': 0, 'compilation_date': compilation_result['compilation_date'],
'compilation_time': compilation_result['compilation_time'],
'compiled_kernel_size': compilation_result['compiled_kernel_size'],
'compiled_kernel_version': compilation_result['compiled_kernel_version'],
'dependencies': compilation_result['dependencies'],
'number_cpu_core_used': compilation_result['number_cpu_core_used'],
'compressed_compiled_kernel_size': compilation_result['compressed_compiled_kernel_size'],
'stdout_log_file': open(logger.get_stdout_file(), "r").read(),
'stderr_log_file': open(logger.get_stderr_file(), "r").read(),
'user_output_file': open(logger.get_user_output_file(), "r").read(),
'compiler_version': environmentsoft["compiler_version"],
'tiny': tiny, 'config_file': configfile, 'boot': boot,
'cpu_brand_name': environmenthard['cpu_brand_name'],
'cpu_max_frequency': environmenthard['cpu_max_frequency'], 'ram_size': environmenthard['ram_size'],
'architecture': environmenthard['architecture'], 'number_cpu_core': environmenthard['number_cpu_core'],
'mechanical_disk': environmenthard['mechanical_disk'], 'libc_version': environmentsoft['libc_version'],
'tuxml_version': environmentsoft['tuxml_version'], 'system_kernel': environmentsoft['system_kernel'],
'linux_distribution': environmentsoft['linux_distribution'],
'linux_distribution_version': environmentsoft['linux_distribution_version'],
'system_kernel_version': environmentsoft['system_kernel_version'],
'tagbuild': tagbuild_str,
# TODO: same key, refactor code
'size_vmlinux': sizes_result['size_vmlinux'],
'size_report_builtin': sizes_result['size_report_builtin'],
'size_report_builtin_coarse': sizes_result['size_report_builtin_coarse']
}
#
apiManager = APIManager()
response = apiManager.sendPost(json_data)
if (response.status_code == 201):
cid = response.json()
logger.timed_print_output(
"Compilation send to TuxML API.",
color=COLOR_SUCCESS
)
logger.timed_print_output(
"CID received from database : " + str(cid),
color=COLOR_SUCCESS
)
else:
logger.timed_print_output(
"Error received from TuxML API when sending compilation.",
color=COLOR_ERROR
)
logger.timed_print_output(
"Status code : " + str(response.status_code),
color=COLOR_ERROR
)
logger.timed_print_output(
"Error message : " + response.text,
color=COLOR_ERROR
)
if json_bool :
create_json_file(cid, json_data)
return cid
def create_json_file(cid, json_data):
json_data["cid"] = cid
with open(settings._JSON_INTERNAL_FILENAME, 'w') as json_file:
json.dump(json_data, json_file)
## archive_log
# @author <NAME>
# @version 1
# @brief Retrieve the logs file, create a directory named <cid>, and put the log
# in the created directory.
def archive_log(cid):
directory = "{}/{}".format(settings.LOG_DIRECTORY, cid)
os.makedirs(directory)
file_list = [file for file in os.listdir(settings.LOG_DIRECTORY)
if os.path.isfile(os.path.join(settings.LOG_DIRECTORY, file))]
for file in file_list:
shutil.copy2(
os.path.join(settings.LOG_DIRECTORY, file),
os.path.join(directory, file))
## insert_result_into_database
# @author <NAME>
# @version 1
# @brief Send the sample result onto the data.
def insert_result_into_database(logger, compilation, hardware, software,
sizes=None, cid_incremental=None, boot=None):
logger.timed_print_output("Sending result to database.")
connection = fetch_connection_to_database(
settings.IP_BDD,
settings.USERNAME_BDD,
settings.PASSWORD_USERNAME_BDD,
settings.NAME_BDD)
cursor = connection.cursor()
hid = insert_if_not_exist_and_fetch_hardware(connection, cursor, hardware)
sid = insert_if_not_exist_and_fetch_software(connection, cursor, software)
compilation['hid'] = hid
compilation['sid'] = sid
cid = insert_and_fetch_compilation(connection, cursor, compilation)
if cid_incremental is not None:
insert_incrementals_compilation(
connection, cursor,
{'cid': cid, 'cid_base': cid_incremental, 'incremental_level': 1})
if boot is | |
The value for which to search.
- *(string) --*
- **Type** *(string) --*
The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.
:type NextToken: string
:param NextToken:
A token to start the list. Use this token to get the next set of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of items to return for this call. Currently, you can specify null or 50. The call also returns a token that you can specify in a subsequent call to get the next set of results.
:rtype: dict
:returns:
"""
pass
def list_document_versions(self, Name: str, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
List all versions for a document.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocumentVersions>`_
**Request Syntax**
::
response = client.list_document_versions(
Name='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'DocumentVersions': [
{
'Name': 'string',
'DocumentVersion': 'string',
'VersionName': 'string',
'CreatedDate': datetime(2015, 1, 1),
'IsDefaultVersion': True|False,
'DocumentFormat': 'YAML'|'JSON',
'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',
'StatusInformation': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentVersions** *(list) --*
The document versions.
- *(dict) --*
Version information about the document.
- **Name** *(string) --*
The document name.
- **DocumentVersion** *(string) --*
The document version.
- **VersionName** *(string) --*
The version of the artifact associated with the document. For example, "Release 12, Update 6". This value is unique across all versions of a document, and cannot be changed.
- **CreatedDate** *(datetime) --*
The date the document was created.
- **IsDefaultVersion** *(boolean) --*
An identifier for the default version of the document.
- **DocumentFormat** *(string) --*
The document format, either JSON or YAML.
- **Status** *(string) --*
The status of the Systems Manager document, such as ``Creating`` , ``Active`` , ``Failed`` , and ``Deleting`` .
- **StatusInformation** *(string) --*
A message returned by AWS Systems Manager that explains the ``Status`` value. For example, a ``Failed`` status might be explained by the ``StatusInformation`` message, "The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct."
- **NextToken** *(string) --*
The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
:type Name: string
:param Name: **[REQUIRED]**
The name of the document about which you want version information.
:type MaxResults: integer
:param MaxResults:
The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.
:type NextToken: string
:param NextToken:
The token for the next set of items to return. (You received this token from a previous call.)
:rtype: dict
:returns:
"""
pass
def list_documents(self, DocumentFilterList: List = None, Filters: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Describes one or more of your Systems Manager documents.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocuments>`_
**Request Syntax**
::
response = client.list_documents(
DocumentFilterList=[
{
'key': 'Name'|'Owner'|'PlatformTypes'|'DocumentType',
'value': 'string'
},
],
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'DocumentIdentifiers': [
{
'Name': 'string',
'Owner': 'string',
'VersionName': 'string',
'PlatformTypes': [
'Windows'|'Linux',
],
'DocumentVersion': 'string',
'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',
'SchemaVersion': 'string',
'DocumentFormat': 'YAML'|'JSON',
'TargetType': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentIdentifiers** *(list) --*
The names of the Systems Manager documents.
- *(dict) --*
Describes the name of a Systems Manager document.
- **Name** *(string) --*
The name of the Systems Manager document.
- **Owner** *(string) --*
The AWS user account that created the document.
- **VersionName** *(string) --*
An optional field specifying the version of the artifact associated with the document. For example, "Release 12, Update 6". This value is unique across all versions of a document, and cannot be changed.
- **PlatformTypes** *(list) --*
The operating system platform.
- *(string) --*
- **DocumentVersion** *(string) --*
The document version.
- **DocumentType** *(string) --*
The document type.
- **SchemaVersion** *(string) --*
The schema version.
- **DocumentFormat** *(string) --*
The document format, either JSON or YAML.
- **TargetType** *(string) --*
The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see `AWS Resource Types Reference <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html>`__ in the *AWS CloudFormation User Guide* .
- **Tags** *(list) --*
The tags, or metadata, that have been applied to the document.
- *(dict) --*
Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines.
- **Key** *(string) --*
The name of the tag.
- **Value** *(string) --*
The value of the tag.
- **NextToken** *(string) --*
The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
:type DocumentFilterList: list
:param DocumentFilterList:
One or more filters. Use a filter to return a more specific list of results.
- *(dict) --*
Describes a filter.
- **key** *(string) --* **[REQUIRED]**
The name of the filter.
- **value** *(string) --* **[REQUIRED]**
The value of the filter.
:type Filters: list
:param Filters:
One or more filters. Use a filter to return a more specific list of results.
- *(dict) --*
One or more filters. Use a filter to return a more specific list of documents.
For keys, you can specify one or more tags that have been applied to a document.
Other valid values include Owner, Name, PlatformTypes, and DocumentType.
Note that only one Owner can be specified in a request. For example: ``Key=Owner,Values=Self`` .
If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with ``Te`` , run the following command:
``aws ssm list-documents --filters Key=Name,Values=Te``
If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.
To specify a custom key and value pair, use the format ``Key=tag:[tagName],Values=[valueName]`` .
For example, if you created a Key called region and are using the AWS CLI to call the ``list-documents`` command:
``aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self``
- **Key** *(string) --*
The name of the filter key.
- **Values** *(list) --*
The value for the filter key.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.
:type NextToken: string
:param NextToken:
The token for the next set of items to return. (You received this token from a previous call.)
:rtype: dict
:returns:
"""
pass
def list_inventory_entries(self, InstanceId: str, TypeName: str, Filters: List = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
A list of inventory items returned by the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListInventoryEntries>`_
**Request Syntax**
::
response = client.list_inventory_entries(
InstanceId='string',
TypeName='string',
Filters=[
{
'Key': 'string',
'Values': [
'string',
],
'Type': 'Equal'|'NotEqual'|'BeginWith'|'LessThan'|'GreaterThan'|'Exists'
},
],
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'TypeName': 'string',
'InstanceId': 'string',
'SchemaVersion': 'string',
'CaptureTime': 'string',
'Entries': [
{
'string': | |
# coding=utf-8
# Copyright 2019 team Purifier
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import numpy as np
import math
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from sklearn.metrics import matthews_corrcoef, f1_score
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from modeling_purifier import BertForSequenceClassification, BertConfig
from mask_tokenizer import BertTokenizer, BasicTokenizer
from optimization import BertAdam, WarmupLinearSchedule
"""====== set parameter (변수설정) =======
gradient_accumulation_steps = 1
train_batch_size = 32
seed = 42
local_rank = -1
no_cuda = True
fp16 = False
do_train = False
do_eval = True
output_dir = './data/ouput/'
vocab_file = './data/vocab_korea.txt'
task_name = 'Puri'
do_lower_case = True
data_dir = './data/'
max_seq_length = 128
# num_train_epochs = 0.1
# warmup_proportion = 0.1
# learning_rate = 5e-5
# ====================================="""
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
puri_ids_list = []
for (ex_index, example) in enumerate(examples):
tokens_a, puri_ids = tokenizer.tokenize(example.text_a)
puri_ids_list.append(puri_ids)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
# also we mask SEP token since we don't need to classify next sentence
input_mask = [1] * (len(input_ids)-1)
input_mask += [0]
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features, puri_ids_list
def convert_single_example_to_feature(example, max_seq_length, tokenizer, output_mode):
'''Convert single InputExample to InputFeature for predict'''
# we use puri_ids_list for masking toxic expression
tokens_a, puri_ids_list = tokenizer.tokenize(example.text_a)
tokens_b = None
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
# convert tokens to vocab index
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
# also we mask SEP token since we don't need to classify next sentence
input_mask = [1] * (len(input_ids)-1)
input_mask += [0]
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
# check each element's length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# we only want to predict task
label_id = None
# convert example to feature
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature, puri_ids_list
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class PuriProcessor(DataProcessor):
"""Processor for the Puri data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Create examples train / dev"""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[2]
text_b = None
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def create_single_example(self, text):
"""Creates single exmaple for predicting a single sentence"""
guid = 0
text_a = text
text_b = None
label = None
example = InputExample(guid=guid,
text_a = text_a,
text_b=text_b,
label=None)
return example
def create_list_example(self, text_list):
"""Creates examples for list object"""
examples = []
set_type = "ltest"
for (i, text) in enumerate(text_list):
guid = "%s-%s" % (set_type, i)
text_a = text
text_b = None
label = '0'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def single_sentence_masking_percent(text, model):
"""Run text predict model and toxic text masking by loop.
Inputs:
`text`: user string input
`model`: fine-tunned model. This case we use purifier model
Model params:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
`front_pooler` : choose to apply tanh activation function on encoder process . Default: `True`.
`query`, `key`, `value` : select layers for input of puri attention. selected_layers is indices of index which 0 index means embedding_output.
`query_att` :True or False
`key_att` : True or False
`multi_head` : choose to apply multi-head attention. Default: `True`.
`dropout` : choose to apply dropout to attention_probs. Default: `False`.
`back_pooler` : choose to apply tanh activation function after puri layer . Default: `True`.
Outputs:
`final_result[0]` : predict result of model
0 means not toxic text
1 means toxic text
if `final_result[0]` is 0:
`text` : no maksing sentence
if `final_result[0]` is 1:
`text` : maksing sentence
```
"""
# set parameter
# 변수설정
seed = 42
local_rank = -1
no_cuda = True
fp16 = False
do_train = False
do_eval = True
output_dir = './data/ouput/'
vocab_file = './data/vocab_korea.txt'
task_name = 'Puri'
do_lower_case = True
data_dir = './data/'
max_seq_length = 128
# set parameter related with processor
# processor 관련
processors = {"puri": PuriProcessor}
output_modes = {"puri": "classification"}
task_name = task_name.lower()
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list | |
you are missing the `MANAGE_WEBHOOKS` permission.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the channel is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def fetch_webhook(
self,
webhook: snowflakes.SnowflakeishOr[webhooks.Webhook],
*,
token: undefined.UndefinedOr[str] = undefined.UNDEFINED,
) -> webhooks.Webhook:
"""Fetch an existing webhook.
Parameters
----------
webhook : hikari.snowflakes.SnowflakeishOr[hikari.webhooks.Webhook]
The webhook to fetch. This may be the object or the ID
of an existing webhook.
Other Parameters
----------------
token : hikari.undefined.UndefinedOr[builtins.str]
If provided, the webhoook token that will be used to fetch
the webhook instead of the token the client was initialized with.
Returns
-------
hikari.webhooks.Webhook
The requested webhook.
Raises
------
hikari.errors.ForbiddenError
If you are missing the `MANAGE_WEBHOOKS` permission when not
using a token.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the webhook is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def fetch_channel_webhooks(
self,
channel: snowflakes.SnowflakeishOr[channels.TextChannel],
) -> typing.Sequence[webhooks.Webhook]:
"""Fetch all channel webhooks.
Parameters
----------
channel : hikari.snowflakes.SnowflakeishOr[hikari.channels.TextChannel]
The channel to fetch the webhooks for. This
may be a `hikari.channels.TextChannel` or the ID of an
existing channel.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The fetched webhooks.
Raises
------
hikari.errors.ForbiddenError
If you are missing the `MANAGE_WEBHOOKS` permission.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the channel is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def fetch_guild_webhooks(
self,
guild: snowflakes.SnowflakeishOr[guilds.PartialGuild],
) -> typing.Sequence[webhooks.Webhook]:
"""Fetch all guild webhooks.
Parameters
----------
guild : hikari.snowflakes.SnowflakeishOr[hikari.guilds.PartialGuild]
The guild to fetch the webhooks for. This may be the object
or the ID of an existing guild.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The fetched webhooks.
Raises
------
hikari.errors.ForbiddenError
If you are missing the `MANAGE_WEBHOOKS` permission.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the guild is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def edit_webhook(
self,
webhook: snowflakes.SnowflakeishOr[webhooks.Webhook],
*,
token: undefined.UndefinedOr[str] = undefined.UNDEFINED,
name: undefined.UndefinedOr[str] = undefined.UNDEFINED,
avatar: undefined.UndefinedNoneOr[files.Resourceish] = undefined.UNDEFINED,
channel: undefined.UndefinedOr[snowflakes.SnowflakeishOr[channels.TextChannel]] = undefined.UNDEFINED,
reason: undefined.UndefinedOr[str] = undefined.UNDEFINED,
) -> webhooks.Webhook:
"""Edit a webhook.
Parameters
----------
webhook : hikari.snowflakes.SnowflakeishOr[hikari.webhooks.Webhook]
The webhook to edit. This may be the object or the
ID of an existing webhook.
Other Parameters
----------------
token : hikari.undefined.UndefinedOr[builtins.str]
If provided, the webhoook token that will be used to edit
the webhook instead of the token the client was initialized with.
name : hikari.undefined.UndefinedOr[builtins.str]
If provided, the new webhook name.
avatar : hikari.undefined.UndefinedNoneOr[hikari.files.Resourceish]
If provided, the new webhook avatar. If `builtins.None`, will
remove the webhook avatar.
channel : hikari.undefined.UndefinedOr[hikari.snowflakes.SnowflakeishOr[hikari.channels.TextChannel]]
If provided, the text channel to move the webhook to.
reason : hikari.undefined.UndefinedOr[builtins.str]
If provided, the reason that will be recorded in the audit logs.
Maximum of 512 characters.
Returns
-------
hikari.webhooks.Webhook
The edited webhook.
Raises
------
hikari.errors.ForbiddenError
If you are missing the `MANAGE_WEBHOOKS` permission when not
using a token.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the webhook is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def delete_webhook(
self,
webhook: snowflakes.SnowflakeishOr[webhooks.Webhook],
*,
token: undefined.UndefinedOr[str] = undefined.UNDEFINED,
) -> None:
"""Delete a webhook.
Parameters
----------
webhook : hikari.snowflakes.SnowflakeishOr[hikari.webhooks.Webhook]
The webhook to delete. This may be the object or the
ID of an existing webhook.
Other Parameters
----------------
token : hikari.undefined.UndefinedOr[builtins.str]
If provided, the webhoook token that will be used to delete
the webhook instead of the token the client was initialized with.
Raises
------
hikari.errors.ForbiddenError
If you are missing the `MANAGE_WEBHOOKS` permission when not
using a token.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.NotFoundError
If the webhoook is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def execute_webhook(
self,
webhook: snowflakes.SnowflakeishOr[webhooks.Webhook],
token: str,
content: undefined.UndefinedOr[typing.Any] = undefined.UNDEFINED,
*,
username: undefined.UndefinedOr[str] = undefined.UNDEFINED,
avatar_url: undefined.UndefinedOr[str] = undefined.UNDEFINED,
embed: undefined.UndefinedOr[embeds_.Embed] = undefined.UNDEFINED,
embeds: undefined.UndefinedOr[typing.Sequence[embeds_.Embed]] = undefined.UNDEFINED,
attachment: undefined.UndefinedOr[files.Resourceish] = undefined.UNDEFINED,
attachments: undefined.UndefinedOr[typing.Sequence[files.Resourceish]] = undefined.UNDEFINED,
tts: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
mentions_everyone: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
user_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[users.PartialUser]], bool]
] = undefined.UNDEFINED,
role_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[guilds.PartialRole]], bool]
] = undefined.UNDEFINED,
) -> messages_.Message:
"""Execute a webhook.
Parameters
----------
webhook : hikari.snowflakes.SnowflakeishOr[hikari.webhooks.Webhook]
The webhook to execute. This may be the object
or the ID of an existing webhook
token: builtins.str
The webhook token.
content : hikari.undefined.UndefinedOr[typing.Any]
If provided, the message contents. If
`hikari.undefined.UNDEFINED`, then nothing will be sent
in the content. Any other value here will be cast to a
`builtins.str`.
If this is a `hikari.embeds.Embed` and no `embed` nor
no `embeds` kwarg is provided, then this will instead
update the embed. This allows for simpler syntax when
sending an embed alone.
Likewise, if this is a `hikari.files.Resource`, then the
content is instead treated as an attachment if no `attachment` and
no `attachments` kwargs are provided.
Other Parameters
----------------
embed : | |
<filename>CV_adv/distillation_training.py
import os
import os.path as osp
import sys
import time
import argparse
from pdb import set_trace as st
import json
import random
from functools import partial
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchcontrib
from torchvision import transforms
from advertorch.attacks import LinfPGDAttack
from dataset.cub200 import CUB200Data
from dataset.mit67 import MIT67Data
from dataset.stanford_dog import SDog120Data
from dataset.caltech256 import Caltech257Data
from dataset.stanford_40 import Stanford40Data
from dataset.flower102 import Flower102Data
from model.fe_resnet import resnet18_dropout, resnet50_dropout, resnet101_dropout, resnet34_dropout
from model.fe_resnet import feresnet18, feresnet50, feresnet101, feresnet34
from eval_robustness import advtest, myloss
from utils import *
def linear_l2(model):
beta_loss = 0
for m in model.modules():
if isinstance(m, nn.Linear):
beta_loss += (m.weight).pow(2).sum()
beta_loss += (m.bias).pow(2).sum()
return 0.5*beta_loss*args.beta, beta_loss
def l2sp(model, reg):
reg_loss = 0
dist = 0
for m in model.modules():
if hasattr(m, 'weight') and hasattr(m, 'old_weight'):
diff = (m.weight - m.old_weight).pow(2).sum()
dist += diff
reg_loss += diff
if hasattr(m, 'bias') and hasattr(m, 'old_bias'):
diff = (m.bias - m.old_bias).pow(2).sum()
dist += diff
reg_loss += diff
if dist > 0:
dist = dist.sqrt()
loss = (reg * reg_loss)
return loss, dist
def test(model, teacher, loader, loss=False):
with torch.no_grad():
model.eval()
if loss:
teacher.eval()
ce = CrossEntropyLabelSmooth(loader.dataset.num_classes, args.label_smoothing)
featloss = torch.nn.MSELoss(reduction='none')
total_ce = 0
total_feat_reg = np.zeros(len(reg_layers))
total_l2sp_reg = 0
total = 0
top1 = 0
total = 0
top1 = 0
for i, (batch, label) in enumerate(loader):
total += batch.size(0)
out = model(batch)
_, pred = out.max(dim=1)
top1 += int(pred.eq(label).sum().item())
if loss:
total_ce += ce(out, label).item()
if teacher is not None:
with torch.no_grad():
tout = teacher(batch)
# for key in reg_layers:
for i, key in enumerate(reg_layers):
src_x = reg_layers[key][0].out
tgt_x = reg_layers[key][1].out
regloss = featloss(src_x, tgt_x.detach()).mean()
total_feat_reg[i] += regloss.item()
_, unweighted = l2sp(model, 0)
total_l2sp_reg += unweighted.item()
return float(top1)/total*100, total_ce/(i+1), np.sum(total_feat_reg)/(i+1), total_l2sp_reg/(i+1), total_feat_reg/(i+1)
def train(
model,
train_loader,
val_loader,
adv_eval_fn,
iterations=9000,
lr=1e-2,
output_dir='results',
l2sp_lmda=1e-2,
teacher=None,
reg_layers={}
):
if l2sp_lmda == 0:
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=0)
end_iter = iterations
if args.swa:
optimizer = torchcontrib.optim.SWA(optimizer, swa_start=args.swa_start, swa_freq=args.swa_freq)
end_iter = args.swa_start
teacher.eval()
ce = CrossEntropyLabelSmooth(train_loader.dataset.num_classes, args.label_smoothing)
featloss = torch.nn.MSELoss()
batch_time = MovingAverageMeter('Time', ':6.3f')
data_time = MovingAverageMeter('Data', ':6.3f')
ce_loss_meter = MovingAverageMeter('CE Loss', ':6.3f')
feat_loss_meter = MovingAverageMeter('Feat. Loss', ':6.3f')
l2sp_loss_meter = MovingAverageMeter('L2SP Loss', ':6.3f')
linear_loss_meter = MovingAverageMeter('LinearL2 Loss', ':6.3f')
total_loss_meter = MovingAverageMeter('Total Loss', ':6.3f')
top1_meter = MovingAverageMeter('Acc@1', ':6.2f')
train_path = osp.join(output_dir, "train.tsv")
with open(train_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'celoss', 'featloss', 'l2sp']
wf.write('\t'.join(columns) + '\n')
test_path = osp.join(output_dir, "test.tsv")
with open(test_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'celoss', 'featloss', 'l2sp']
wf.write('\t'.join(columns) + '\n')
adv_path = osp.join(output_dir, "adv.tsv")
with open(adv_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'AdvAcc', 'ASR']
wf.write('\t'.join(columns) + '\n')
dataloader_iterator = iter(train_loader)
for i in range(iterations):
model.train()
optimizer.zero_grad()
end = time.time()
try:
batch, label = next(dataloader_iterator)
except:
dataloader_iterator = iter(train_loader)
batch, label = next(dataloader_iterator)
data_time.update(time.time() - end)
out = model(batch)
_, pred = out.max(dim=1)
top1_meter.update(float(pred.eq(label).sum().item()) / label.shape[0] * 100.)
loss = 0.
loss += ce(out, label)
ce_loss_meter.update(loss.item())
with torch.no_grad():
tout = teacher(batch)
# Compute the feature distillation loss only when needed
if args.feat_lmda != 0:
regloss = 0
# for layer in args.feat_layers:
for key in reg_layers:
# key = int(layer)-1
src_x = reg_layers[key][0].out
tgt_x = reg_layers[key][1].out
regloss += featloss(src_x, tgt_x.detach())
regloss = args.feat_lmda * regloss
loss += regloss
feat_loss_meter.update(regloss.item())
beta_loss, linear_norm = linear_l2(model)
loss = loss + beta_loss
linear_loss_meter.update(beta_loss.item())
if l2sp_lmda != 0:
reg, _ = l2sp(model, l2sp_lmda)
l2sp_loss_meter.update(reg.item())
loss = loss + reg
total_loss_meter.update(loss.item())
loss.backward()
optimizer.step()
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
batch_time.update(time.time() - end)
if (i % args.print_freq == 0) or (i == iterations-1):
progress = ProgressMeter(
iterations,
[batch_time, data_time, top1_meter, total_loss_meter, ce_loss_meter, feat_loss_meter, l2sp_loss_meter, linear_loss_meter],
prefix="LR: {:6.3f}".format(current_lr),
output_dir=output_dir,
)
progress.display(i)
if ((i+1) % args.test_interval == 0) or (i == iterations-1):
test_top1, test_ce_loss, test_feat_loss, test_weight_loss, test_feat_layer_loss = test(
model, teacher, val_loader, loss=True
)
train_top1, train_ce_loss, train_feat_loss, train_weight_loss, train_feat_layer_loss = test(
model, teacher, train_loader, loss=True
)
print('Eval Train | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | Feat Reg Loss: {:.6f} | L2SP Reg Loss: {:.3f}'.format(i+1, iterations, train_top1, train_ce_loss, train_feat_loss, train_weight_loss))
print('Eval Test | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | Feat Reg Loss: {:.6f} | L2SP Reg Loss: {:.3f}'.format(i+1, iterations, test_top1, test_ce_loss, test_feat_loss, test_weight_loss))
localtime = time.asctime( time.localtime(time.time()) )[4:-6]
with open(train_path, 'a') as af:
train_cols = [
localtime,
i,
round(train_top1,2),
round(train_ce_loss,2),
round(train_feat_loss,2),
round(train_weight_loss,2),
]
af.write('\t'.join([str(c) for c in train_cols]) + '\n')
with open(test_path, 'a') as af:
test_cols = [
localtime,
i,
round(test_top1,2),
round(test_ce_loss,2),
round(test_feat_loss,2),
round(test_weight_loss,2),
]
af.write('\t'.join([str(c) for c in test_cols]) + '\n')
if not args.no_save:
# if not os.path.exists('ckpt'):
# os.makedirs('ckpt')
# torch.save({'state_dict': model.state_dict()}, 'ckpt/{}.pth'.format(name))
ckpt_path = osp.join(
args.output_dir,
"ckpt.pth"
)
torch.save({'state_dict': model.state_dict()}, ckpt_path)
if args.adv_test_interval > 0 and ( ((i+1) % args.adv_test_interval == 0) ):
clean_top1, adv_top1, adv_sr = adv_eval_fn(model)
localtime = time.asctime( time.localtime(time.time()) )[4:-6]
with open(adv_path, 'a') as af:
test_cols = [
localtime,
i,
round(clean_top1,2),
round(adv_top1,2),
round(adv_sr,2),
]
af.write('\t'.join([str(c) for c in test_cols]) + '\n')
if args.swa:
optimizer.swap_swa_sgd()
for m in model.modules():
if hasattr(m, 'running_mean'):
m.reset_running_stats()
m.momentum = None
with torch.no_grad():
model.train()
for x, y in train_loader:
out = model(x)
test_top1, test_ce_loss, test_feat_loss, test_weight_loss, test_feat_layer_loss = test(
model, teacher, val_loader, loss=True
)
train_top1, train_ce_loss, train_feat_loss, train_weight_loss, train_feat_layer_loss = test(
model, teacher, train_loader, loss=True
)
# clean_top1, adv_top1, adv_sr = adv_eval_fn(model)
print('Eval Train | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | Feat Reg Loss: {:.6f} | L2SP Reg Loss: {:.3f}'.format(i+1, iterations, train_top1, train_ce_loss, train_feat_loss, train_weight_loss))
print('Eval Test | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | Feat Reg Loss: {:.6f} | L2SP Reg Loss: {:.3f}'.format(i+1, iterations, test_top1, test_ce_loss, test_feat_loss, test_weight_loss))
localtime = time.asctime( time.localtime(time.time()) )[4:-6]
with open(train_path, 'a') as af:
train_cols = [
localtime,
i,
round(train_top1,2),
round(train_ce_loss,2),
round(train_feat_loss,2),
round(train_weight_loss,2),
]
af.write('\t'.join([str(c) for c in train_cols]) + '\n')
with open(test_path, 'a') as af:
test_cols = [
localtime,
i,
round(test_top1,2),
# round(adv_top1,2),
# round(adv_sr,2),
round(test_ce_loss,2),
round(test_feat_loss,2),
round(test_weight_loss,2),
]
af.write('\t'.join([str(c) for c in test_cols]) + '\n')
# clean_top1, adv_top1, adv_sr = adv_eval_fn(model)
# localtime = time.asctime( time.localtime(time.time()) )[4:-6]
# with open(adv_path, 'a') as af:
# test_cols = [
# localtime,
# i,
# round(clean_top1,2),
# round(adv_top1,2),
# round(adv_sr,2),
# ]
# af.write('\t'.join([str(c) for c in test_cols]) + '\n')
if not args.no_save:
# if not os.path.exists('ckpt'):
# os.makedirs('ckpt')
ckpt_path = osp.join(
args.output_dir,
"ckpt.pth"
)
torch.save({'state_dict': model.state_dict()}, ckpt_path)
return model
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default='/data', help='path to the dataset')
parser.add_argument("--dataset", type=str, default='CUB200Data', help='Target dataset. Currently support: \{SDog120Data, CUB200Data, Stanford40Data, MIT67Data, Flower102Data\}')
parser.add_argument("--iterations", type=int, default=30000, help='Iterations to train')
parser.add_argument("--print_freq", type=int, default=100, help='Frequency of printing training logs')
parser.add_argument("--test_interval", type=int, default=1000, help='Frequency of testing')
parser.add_argument("--adv_test_interval", type=int, default=1000)
parser.add_argument("--name", type=str, default='test', help='Name for the checkpoint')
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--lr", type=float, default=1e-2)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--beta", type=float, default=1e-2, help='The strength of the L2 regularization on the last linear layer')
parser.add_argument("--dropout", type=float, default=0, help='Dropout rate for spatial dropout')
parser.add_argument("--l2sp_lmda", type=float, default=0)
parser.add_argument("--feat_lmda", type=float, default=0)
parser.add_argument("--feat_layers", type=str, default='1234', help='Used for DELTA (which layers or stages to match), ResNets should be 1234 and MobileNetV2 should be 12345')
parser.add_argument("--reinit", action='store_true', default=False, help='Reinitialize before training')
parser.add_argument("--no_save", action='store_true', default=False, help='Do not save checkpoints')
parser.add_argument("--swa", action='store_true', default=False, help='Use SWA')
parser.add_argument("--swa_freq", type=int, default=500, help='Frequency of averaging models in SWA')
parser.add_argument("--swa_start", type=int, default=0, help='Start SWA since which iterations')
parser.add_argument("--label_smoothing", type=float, default=0)
parser.add_argument("--checkpoint", type=str, default='', help='Load a previously trained checkpoint')
parser.add_argument("--network", type=str, default='resnet18', help='Network architecture. Currently support: \{resnet18, resnet50, resnet101, mbnetv2\}')
parser.add_argument("--shot", type=int, default=-1, help='Number of training samples per class for the training dataset. -1 indicates using the full dataset.')
parser.add_argument("--log", action='store_true', default=False, help='Redirect the output to log/args.name.log')
parser.add_argument("--output_dir", default="results")
parser.add_argument("--B", type=float, default=0.1, help='Attack budget')
parser.add_argument("--m", type=float, default=1000, help='Hyper-parameter for task-agnostic attack')
parser.add_argument("--pgd_iter", type=int, default=40)
parser.add_argument("--adv_data_dir", default="results/advdata")
parser.add_argument("--seed", type=int, default=98)
args = parser.parse_args()
args.adv_data_dir = osp.join(
args.adv_data_dir, f"{args.dataset}_{args.network}.pt"
)
return args
# Used to matching features
def record_act(self, input, output):
self.out = output
if __name__ == '__main__':
args = | |
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
HDMI = _hilens_internal.Display_HDMI
RTMP = _hilens_internal.Display_RTMP
H264_FILE = _hilens_internal.Display_H264_FILE
if _newclass:
Create = staticmethod(_hilens_internal.Display_Create)
else:
Create = _hilens_internal.Display_Create
def Show(self, frame):
return _hilens_internal.Display_Show(self, frame)
__swig_destroy__ = _hilens_internal.delete_Display
__del__ = lambda self: None
Display_swigregister = _hilens_internal.Display_swigregister
Display_swigregister(Display)
def Display_Create(type, path=None):
return _hilens_internal.Display_Create(type, path)
Display_Create = _hilens_internal.Display_Create
def POST(url, body, httpcode, response=None, headers=None):
return _hilens_internal.POST(url, body, httpcode, response, headers)
POST = _hilens_internal.POST
def GetWorkspacePath():
return _hilens_internal.GetWorkspacePath()
GetWorkspacePath = _hilens_internal.GetWorkspacePath
def GetModelDirPath():
return _hilens_internal.GetModelDirPath()
GetModelDirPath = _hilens_internal.GetModelDirPath
def GetSkillConfig():
return _hilens_internal.GetSkillConfig()
GetSkillConfig = _hilens_internal.GetSkillConfig
def GetHardSampleConfig():
return _hilens_internal.GetHardSampleConfig()
GetHardSampleConfig = _hilens_internal.GetHardSampleConfig
def SetHardSampleConfig(confStr):
return _hilens_internal.SetHardSampleConfig(confStr)
SetHardSampleConfig = _hilens_internal.SetHardSampleConfig
def MD5ofFile(filepath):
return _hilens_internal.MD5ofFile(filepath)
MD5ofFile = _hilens_internal.MD5ofFile
def DownloadFileFromOBS(url, downloadTo):
return _hilens_internal.DownloadFileFromOBS(url, downloadTo)
DownloadFileFromOBS = _hilens_internal.DownloadFileFromOBS
class VideoCapture(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VideoCapture, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VideoCapture, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
if _newclass:
Create = staticmethod(_hilens_internal.VideoCapture_Create)
else:
Create = _hilens_internal.VideoCapture_Create
__swig_destroy__ = _hilens_internal.delete_VideoCapture
__del__ = lambda self: None
def Read(self):
return _hilens_internal.VideoCapture_Read(self)
def Width(self):
return _hilens_internal.VideoCapture_Width(self)
def Height(self):
return _hilens_internal.VideoCapture_Height(self)
VideoCapture_swigregister = _hilens_internal.VideoCapture_swigregister
VideoCapture_swigregister(VideoCapture)
def VideoCapture_Create(*args):
return _hilens_internal.VideoCapture_Create(*args)
VideoCapture_Create = _hilens_internal.VideoCapture_Create
MAX_FRAME_NUM_ONCE = _hilens_internal.MAX_FRAME_NUM_ONCE
class AudioFrame(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioFrame, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioFrame, name)
__repr__ = _swig_repr
__swig_setmethods__["data"] = _hilens_internal.AudioFrame_data_set
__swig_getmethods__["data"] = _hilens_internal.AudioFrame_data_get
if _newclass:
data = _swig_property(_hilens_internal.AudioFrame_data_get, _hilens_internal.AudioFrame_data_set)
__swig_setmethods__["size"] = _hilens_internal.AudioFrame_size_set
__swig_getmethods__["size"] = _hilens_internal.AudioFrame_size_get
if _newclass:
size = _swig_property(_hilens_internal.AudioFrame_size_get, _hilens_internal.AudioFrame_size_set)
def __init__(self):
this = _hilens_internal.new_AudioFrame()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_AudioFrame
__del__ = lambda self: None
AudioFrame_swigregister = _hilens_internal.AudioFrame_swigregister
AudioFrame_swigregister(AudioFrame)
class AudioProperties(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioProperties, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioProperties, name)
__repr__ = _swig_repr
__swig_setmethods__["enSamplerate"] = _hilens_internal.AudioProperties_enSamplerate_set
__swig_getmethods__["enSamplerate"] = _hilens_internal.AudioProperties_enSamplerate_get
if _newclass:
enSamplerate = _swig_property(_hilens_internal.AudioProperties_enSamplerate_get, _hilens_internal.AudioProperties_enSamplerate_set)
__swig_setmethods__["enBitwidth"] = _hilens_internal.AudioProperties_enBitwidth_set
__swig_getmethods__["enBitwidth"] = _hilens_internal.AudioProperties_enBitwidth_get
if _newclass:
enBitwidth = _swig_property(_hilens_internal.AudioProperties_enBitwidth_get, _hilens_internal.AudioProperties_enBitwidth_set)
__swig_setmethods__["u32PtNumPerFrm"] = _hilens_internal.AudioProperties_u32PtNumPerFrm_set
__swig_getmethods__["u32PtNumPerFrm"] = _hilens_internal.AudioProperties_u32PtNumPerFrm_get
if _newclass:
u32PtNumPerFrm = _swig_property(_hilens_internal.AudioProperties_u32PtNumPerFrm_get, _hilens_internal.AudioProperties_u32PtNumPerFrm_set)
__swig_setmethods__["soundMode"] = _hilens_internal.AudioProperties_soundMode_set
__swig_getmethods__["soundMode"] = _hilens_internal.AudioProperties_soundMode_get
if _newclass:
soundMode = _swig_property(_hilens_internal.AudioProperties_soundMode_get, _hilens_internal.AudioProperties_soundMode_set)
def __init__(self):
this = _hilens_internal.new_AudioProperties()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_AudioProperties
__del__ = lambda self: None
AudioProperties_swigregister = _hilens_internal.AudioProperties_swigregister
AudioProperties_swigregister(AudioProperties)
AUDIO_FROM_MIC = _hilens_internal.AUDIO_FROM_MIC
AUDIO_FROM_FILE = _hilens_internal.AUDIO_FROM_FILE
AUDIO_FROM_BUTT = _hilens_internal.AUDIO_FROM_BUTT
class AudioCapture(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioCapture, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioCapture, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
if _newclass:
Create = staticmethod(_hilens_internal.AudioCapture_Create)
else:
Create = _hilens_internal.AudioCapture_Create
__swig_destroy__ = _hilens_internal.delete_AudioCapture
__del__ = lambda self: None
def Read(self, frames, arg3=1):
return _hilens_internal.AudioCapture_Read(self, frames, arg3)
def SetProperty(self, properties):
return _hilens_internal.AudioCapture_SetProperty(self, properties)
def GetProperty(self, properties):
return _hilens_internal.AudioCapture_GetProperty(self, properties)
def SetVolume(self, volume):
return _hilens_internal.AudioCapture_SetVolume(self, volume)
def GetVolume(self):
return _hilens_internal.AudioCapture_GetVolume(self)
AudioCapture_swigregister = _hilens_internal.AudioCapture_swigregister
AudioCapture_swigregister(AudioCapture)
def AudioCapture_Create(*args):
return _hilens_internal.AudioCapture_Create(*args)
AudioCapture_Create = _hilens_internal.AudioCapture_Create
def PlayAacFile(filePath, vol):
return _hilens_internal.PlayAacFile(filePath, vol)
PlayAacFile = _hilens_internal.PlayAacFile
def PlayHandleSigno(signo):
return _hilens_internal.PlayHandleSigno(signo)
PlayHandleSigno = _hilens_internal.PlayHandleSigno
class AudioOutput(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioOutput, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioOutput, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
if _newclass:
Create = staticmethod(_hilens_internal.AudioOutput_Create)
else:
Create = _hilens_internal.AudioOutput_Create
__swig_destroy__ = _hilens_internal.delete_AudioOutput
__del__ = lambda self: None
def Play(self):
return _hilens_internal.AudioOutput_Play(self)
def SetProperty(self, properties):
return _hilens_internal.AudioOutput_SetProperty(self, properties)
def GetProperty(self, properties):
return _hilens_internal.AudioOutput_GetProperty(self, properties)
def SetVolume(self, volume):
return _hilens_internal.AudioOutput_SetVolume(self, volume)
def GetVolume(self):
return _hilens_internal.AudioOutput_GetVolume(self)
AudioOutput_swigregister = _hilens_internal.AudioOutput_swigregister
AudioOutput_swigregister(AudioOutput)
def AudioOutput_Create(*args):
return _hilens_internal.AudioOutput_Create(*args)
AudioOutput_Create = _hilens_internal.AudioOutput_Create
class VideoCaptureWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VideoCaptureWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VideoCaptureWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_VideoCaptureWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_VideoCaptureWrapper
__del__ = lambda self: None
def Init(self, *args):
return _hilens_internal.VideoCaptureWrapper_Init(self, *args)
def ReadArray(self, data):
return _hilens_internal.VideoCaptureWrapper_ReadArray(self, data)
def ReadError(self):
return _hilens_internal.VideoCaptureWrapper_ReadError(self)
def Width(self):
return _hilens_internal.VideoCaptureWrapper_Width(self)
def Height(self):
return _hilens_internal.VideoCaptureWrapper_Height(self)
VideoCaptureWrapper_swigregister = _hilens_internal.VideoCaptureWrapper_swigregister
VideoCaptureWrapper_swigregister(VideoCaptureWrapper)
def CvtColorWrapper(srcData, dstData, rows, cols, code):
return _hilens_internal.CvtColorWrapper(srcData, dstData, rows, cols, code)
CvtColorWrapper = _hilens_internal.CvtColorWrapper
class PreprocessorWrapper(Preprocessor):
__swig_setmethods__ = {}
for _s in [Preprocessor]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PreprocessorWrapper, name, value)
__swig_getmethods__ = {}
for _s in [Preprocessor]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PreprocessorWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_PreprocessorWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_PreprocessorWrapper
__del__ = lambda self: None
def ResizeArray(self, srcData, dstData, cols, rows, w, h, type=0):
return _hilens_internal.PreprocessorWrapper_ResizeArray(self, srcData, dstData, cols, rows, w, h, type)
def CropArray(self, srcData, dstData, cols, rows, x, y, w, h, type=0):
return _hilens_internal.PreprocessorWrapper_CropArray(self, srcData, dstData, cols, rows, x, y, w, h, type)
def Init(self):
return _hilens_internal.PreprocessorWrapper_Init(self)
PreprocessorWrapper_swigregister = _hilens_internal.PreprocessorWrapper_swigregister
PreprocessorWrapper_swigregister(PreprocessorWrapper)
class InferDataWrapper(InferData):
__swig_setmethods__ = {}
for _s in [InferData]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, InferDataWrapper, name, value)
__swig_getmethods__ = {}
for _s in [InferData]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, InferDataWrapper, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _hilens_internal.new_InferDataWrapper(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_InferDataWrapper
__del__ = lambda self: None
def ToArrayUint8(self, data):
return _hilens_internal.InferDataWrapper_ToArrayUint8(self, data)
def ToArrayFloat(self, data):
return _hilens_internal.InferDataWrapper_ToArrayFloat(self, data)
InferDataWrapper_swigregister = _hilens_internal.InferDataWrapper_swigregister
InferDataWrapper_swigregister(InferDataWrapper)
class ModelWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ModelWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ModelWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_ModelWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_ModelWrapper
__del__ = lambda self: None
def Init(self, filename):
return _hilens_internal.ModelWrapper_Init(self, filename)
def InferWrapper(self, inputs, outputs):
return _hilens_internal.ModelWrapper_InferWrapper(self, inputs, outputs)
ModelWrapper_swigregister = _hilens_internal.ModelWrapper_swigregister
ModelWrapper_swigregister(ModelWrapper)
class DisplayWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DisplayWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DisplayWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_DisplayWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_DisplayWrapper
__del__ = lambda self: None
def Init(self, type, path=None):
return _hilens_internal.DisplayWrapper_Init(self, type, path)
def ShowArray(self, srcData, cols, rows):
return _hilens_internal.DisplayWrapper_ShowArray(self, srcData, cols, rows)
DisplayWrapper_swigregister = _hilens_internal.DisplayWrapper_swigregister
DisplayWrapper_swigregister(DisplayWrapper)
def GetSkillConfigText():
return _hilens_internal.GetSkillConfigText()
GetSkillConfigText = _hilens_internal.GetSkillConfigText
def GetHardSampleConfigText():
return _hilens_internal.GetHardSampleConfigText()
GetHardSampleConfigText = _hilens_internal.GetHardSampleConfigText
class AudioCaptureWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioCaptureWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioCaptureWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_AudioCaptureWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_AudioCaptureWrapper
__del__ = lambda self: None
def Init(self, *args):
return _hilens_internal.AudioCaptureWrapper_Init(self, *args)
def SetVolume(self, vol):
return _hilens_internal.AudioCaptureWrapper_SetVolume(self, vol)
def GetVolume(self):
return _hilens_internal.AudioCaptureWrapper_GetVolume(self)
def ReadArray(self, numFrames):
return _hilens_internal.AudioCaptureWrapper_ReadArray(self, numFrames)
def ToNumpyArray(self, data):
return _hilens_internal.AudioCaptureWrapper_ToNumpyArray(self, data)
__swig_setmethods__["totalSize"] = _hilens_internal.AudioCaptureWrapper_totalSize_set
__swig_getmethods__["totalSize"] = _hilens_internal.AudioCaptureWrapper_totalSize_get
if _newclass:
totalSize = _swig_property(_hilens_internal.AudioCaptureWrapper_totalSize_get, _hilens_internal.AudioCaptureWrapper_totalSize_set)
SAMPLE_RATE_AUDIO_SAMPLE_RATE_8000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_8000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_12000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_12000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_11025 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_11025
SAMPLE_RATE_AUDIO_SAMPLE_RATE_16000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_16000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_22050 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_22050
SAMPLE_RATE_AUDIO_SAMPLE_RATE_24000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_24000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_32000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_32000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_44100 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_44100
SAMPLE_RATE_AUDIO_SAMPLE_RATE_48000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_48000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_64000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_64000
SAMPLE_RATE_AUDIO_SAMPLE_RATE_96000 = _hilens_internal.AudioCaptureWrapper_SAMPLE_RATE_AUDIO_SAMPLE_RATE_96000
BIT_WIDTH_AUDIO_BIT_WIDTH_16 = _hilens_internal.AudioCaptureWrapper_BIT_WIDTH_AUDIO_BIT_WIDTH_16
NUM_SAMPLES_PER_FRAME_MIN_SAMPLES = _hilens_internal.AudioCaptureWrapper_NUM_SAMPLES_PER_FRAME_MIN_SAMPLES
NUM_SAMPLES_PER_FRAME_MAX_SAMPLES = _hilens_internal.AudioCaptureWrapper_NUM_SAMPLES_PER_FRAME_MAX_SAMPLES
SOUND_MODE_AUDIO_SOUND_MODE_MONO = _hilens_internal.AudioCaptureWrapper_SOUND_MODE_AUDIO_SOUND_MODE_MONO
SOUND_MODE_AUDIO_SOUND_MODE_STEREO = _hilens_internal.AudioCaptureWrapper_SOUND_MODE_AUDIO_SOUND_MODE_STEREO
AudioCaptureWrapper_swigregister = _hilens_internal.AudioCaptureWrapper_swigregister
AudioCaptureWrapper_swigregister(AudioCaptureWrapper)
class AudioOutputWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AudioOutputWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AudioOutputWrapper, name)
__repr__ = _swig_repr
def __init__(self):
this = _hilens_internal.new_AudioOutputWrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _hilens_internal.delete_AudioOutputWrapper
__del__ = lambda self: None
def Init(self, filePath):
return _hilens_internal.AudioOutputWrapper_Init(self, filePath)
def Play(self):
return _hilens_internal.AudioOutputWrapper_Play(self)
def PlayAacFile(self, filePath, vol):
return _hilens_internal.AudioOutputWrapper_PlayAacFile(self, filePath, vol)
AudioOutputWrapper_swigregister = _hilens_internal.AudioOutputWrapper_swigregister
AudioOutputWrapper_swigregister(AudioOutputWrapper)
class BboxWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BboxWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BboxWrapper, name)
__repr__ = _swig_repr
__swig_setmethods__["bboxXmin"] = _hilens_internal.BboxWrapper_bboxXmin_set
__swig_getmethods__["bboxXmin"] = _hilens_internal.BboxWrapper_bboxXmin_get
if _newclass:
bboxXmin = _swig_property(_hilens_internal.BboxWrapper_bboxXmin_get, _hilens_internal.BboxWrapper_bboxXmin_set)
__swig_setmethods__["bboxYmin"] = _hilens_internal.BboxWrapper_bboxYmin_set
__swig_getmethods__["bboxYmin"] = _hilens_internal.BboxWrapper_bboxYmin_get
if _newclass:
bboxYmin = _swig_property(_hilens_internal.BboxWrapper_bboxYmin_get, _hilens_internal.BboxWrapper_bboxYmin_set)
__swig_setmethods__["bboxXmax"] = _hilens_internal.BboxWrapper_bboxXmax_set
__swig_getmethods__["bboxXmax"] = _hilens_internal.BboxWrapper_bboxXmax_get
if _newclass:
bboxXmax = _swig_property(_hilens_internal.BboxWrapper_bboxXmax_get, _hilens_internal.BboxWrapper_bboxXmax_set)
__swig_setmethods__["bboxYmax"] = _hilens_internal.BboxWrapper_bboxYmax_set
__swig_getmethods__["bboxYmax"] = _hilens_internal.BboxWrapper_bboxYmax_get
if _newclass:
bboxYmax = _swig_property(_hilens_internal.BboxWrapper_bboxYmax_get, _hilens_internal.BboxWrapper_bboxYmax_set)
__swig_setmethods__["bboxScore"] = _hilens_internal.BboxWrapper_bboxScore_set
__swig_getmethods__["bboxScore"] = _hilens_internal.BboxWrapper_bboxScore_get
if _newclass:
bboxScore = _swig_property(_hilens_internal.BboxWrapper_bboxScore_get, _hilens_internal.BboxWrapper_bboxScore_set)
__swig_setmethods__["bboxLabel"] = _hilens_internal.BboxWrapper_bboxLabel_set
__swig_getmethods__["bboxLabel"] = _hilens_internal.BboxWrapper_bboxLabel_get
if _newclass:
bboxLabel = _swig_property(_hilens_internal.BboxWrapper_bboxLabel_get, _hilens_internal.BboxWrapper_bboxLabel_set)
def __init__(self):
| |
from flask import render_template, flash, redirect, url_for, request
from app import app, db, scheduler
from app.forms import LoginForm, RegistrationForm, ResetPasswordRequestForm, ResetPasswordForm
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
from werkzeug.urls import url_parse
from app.email import send_password_reset_email
import sqlite3
import XIRR
import datetime
from scrape import look_for_data
from config import Config
def add_asset_units(calculation_date):
conn = sqlite3.connect('app.db')
c = conn.cursor()
c.execute('SELECT * FROM movimiento_activo WHERE fecha<=? ORDER BY fecha DESC', (calculation_date,))
query = c.fetchall()
units = {}
for q in query:
if q[4] in units:
units[q[4]] = units[q[4]] + q[2]
else:
units[q[4]] = q[2]
for key, value in units.items():
if value < 0.000001:
units[key] = 0
return units
def assets_with_units(calculation_date):
units = add_asset_units(calculation_date)
delete = []
for key, value in units.items():
if value == 0:
delete.append(key)
for e in delete:
del units[e]
return units
def date_str_to_date(fecha):
return datetime.date(int(fecha[0:4]), int(fecha[5:7]), int(fecha[8:]))
def date_to_eu_format(fecha):
return date_str_to_date(fecha).strftime("%d-%m-%Y")
def to_euros(value, date, currency):
conn = sqlite3.connect('app.db')
c = conn.cursor()
if currency == 'GBP':
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (11, date))
query = c.fetchone()
value_currency = query[2]
value = value / value_currency
elif currency == 'USD':
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (10, date))
query = c.fetchone()
value_currency = query[2]
value = value / value_currency
else:
value = value
return value
def npv_calculation(calculation_date):
# The if activo_id == 15 are for an special case
conn = sqlite3.connect('app.db')
c = conn.cursor()
units = assets_with_units(calculation_date)
NPV = 0
response = []
for key in units:
profit = 0
c.execute('SELECT * FROM activo WHERE id=?', (key,))
query = c.fetchone()
activo_id = query[0]
name = query[2]
number = units[key]
currency = query[5]
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (key, calculation_date))
query = c.fetchone()
# Si algún activo del período no tiene al menos un movimiento antes de esta da error. Hago que coja el anterior
if not query:
c.execute('SELECT * FROM cotizacion WHERE activo_id=? ORDER BY fecha ASC', (key,))
query_bis = c.fetchall()
date = query_bis[0][1]
VL = query_bis[0][2]
else:
date = query[1]
VL = query[2]
# XIRR
# Esta sección está para casos como los depositos, que no varian en numero de unidades
if number == 1:
if key == 15:
c.execute('SELECT * FROM investment_movements WHERE fecha<=? and cuenta=?', (calculation_date, "CajaIngenieros"))
query = c.fetchall()
values = []
dates = []
for q in query:
values.append(q[2])
dates.append(date_str_to_date(q[1]))
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (key, calculation_date))
query = c.fetchone()
values.append(query[2])
dates.append(date_str_to_date(query[1]))
try:
rate = "{0:.2f}".format(XIRR.xirr(values, dates) * 100) + "%"
except: # noqa
rate = "XIRR error"
for v in values:
profit = profit + v
elif key == 37:
c.execute('SELECT * FROM investment_movements WHERE fecha<=? and cuenta=?',
(calculation_date, "eToro"))
query = c.fetchall()
values = []
dates = []
for q in query:
values.append(q[2])
dates.append(date_str_to_date(q[1]))
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1',
(key, calculation_date))
query = c.fetchone()
valor_final_en_euros = to_euros(query[2], calculation_date, 'USD')
values.append(valor_final_en_euros)
dates.append(date_str_to_date(query[1]))
try:
rate = "{0:.2f}".format(XIRR.xirr(values, dates) * 100) + "%"
except: # noqa
rate = "XIRR error"
for v in values:
profit = profit + v
else:
rate = ""
else:
c.execute('SELECT * FROM movimiento_activo WHERE activo_id=? and fecha<=? ', (key, calculation_date))
query = c.fetchall()
values = []
dates = []
for q in query:
number_2 = q[2] * (-1)
price = q[3]
date_2 = q[1]
v = number_2 * price
profit = profit + to_euros(v, date_2, currency)
values.append(v)
dates.append(date_str_to_date(date_2))
values.append(number * VL)
dates.append(date_str_to_date(date))
try:
rate = "{0:.2f}".format(XIRR.xirr(values, dates) * 100) + "%"
except: # noqa
rate = "XIRR error"
# END XIRR
if currency == 'EUR':
value = units[key] * VL
elif currency == 'GBP':
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (11, calculation_date))
query = c.fetchone()
value_currency = query[2]
value = units[key] * VL / value_currency
elif currency == 'USD':
c.execute('SELECT * FROM cotizacion WHERE activo_id=? and fecha<=? ORDER BY fecha DESC LIMIT 1', (10, calculation_date))
query = c.fetchone()
value_currency = query[2]
value = number * VL / value_currency
if key in [15, 37]:
profit = profit
else:
profit = profit + value
NPV = NPV + value
number = "{0:.2f}".format(number)
VL = "{0:.2f}".format(VL)
profit = "{0:.2f}".format(profit) + "€"
if number == "1.00":
number = ""
VL = ""
if activo_id in [15, 37]:
profit = profit
else:
profit = ""
value = "{0:.2f}".format(value) + "€"
date = date_to_eu_format(date)
response.append([name, number, date, VL, currency, value, rate, activo_id, profit])
return response, NPV
@app.route('/')
@app.route('/index', methods=['GET', 'POST'])
@login_required
def index():
conn = sqlite3.connect('app.db')
c = conn.cursor()
if request.method == 'POST':
look_for_data()
scheduler.delete_job('job1')
scheduler.add_job('job1', look_for_data, trigger='interval', seconds=Config.JOBS[0]['seconds'])
response = []
c.execute('SELECT * FROM activo WHERE descargar=? ORDER BY nombre', (1,))
query = c.fetchall()
for q in query:
c.execute('SELECT * FROM cotizacion WHERE activo_id=? ORDER BY fecha DESC LIMIT 2', (q[0],))
data = c.fetchall()
if len(data) == 2:
ticker = q[1]
nombre = q[2]
fechaultima = date_to_eu_format(data[0][1])
VLultimo = data[0][2]
VLanterior = data[1][2]
variation = (VLultimo - VLanterior) / VLanterior * 100
VLultimo = "{0:.4f}".format(VLultimo)
VLanterior = "{0:.4f}".format(VLanterior)
fechaanterior = date_to_eu_format(data[1][1])
variation = "{0:.2f}".format(variation)
activo_id = q[0]
elif len(data) == 1:
ticker = q[1]
nombre = q[2]
fechaultima = date_to_eu_format(data[0][1])
VLultimo = data[0][2]
VLanterior = ""
variation = ""
VLultimo = "{0:.4f}".format(VLultimo)
fechaanterior = ""
activo_id = q[0]
response.append([ticker, nombre, fechaultima, VLultimo, fechaanterior, VLanterior, variation, activo_id])
c.execute("SELECT * from variables WHERE name=?", ("last_scrape",))
query = c.fetchone()
if query is None:
last_scrape = 0
else:
last_scrape = int(float(query[1]))
next_run_time_epoch = scheduler.get_job('job1').next_run_time.timestamp()
t_last = datetime.datetime.utcfromtimestamp(last_scrape)
t_next = datetime.datetime.utcfromtimestamp(next_run_time_epoch)
data = [t_last, t_next]
return render_template('index.html', title='Home', table=response, data=data)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('login'))
return render_template('reset_password_request.html',
title='Reset Password', form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset.')
return redirect(url_for('login'))
return render_template('reset_password.html', form=form)
@app.route('/assets')
@login_required
def assets():
conn = sqlite3.connect('app.db')
c = conn.cursor()
response = []
c.execute('SELECT * FROM activo')
query = c.fetchall()
for q in query:
lista = []
lista.append(q[0])
lista.append(q[2])
response.append(lista)
response = sorted(response, key=lambda asset: asset[1])
return render_template('assets.html', title='Assets', query=response)
@app.route('/asset/<id>')
@login_required
def asset(id):
conn = sqlite3.connect('app.db')
c = conn.cursor()
c.execute('SELECT * FROM activo WHERE id=?', (id,))
query = c.fetchone()
# response
response_0 = []
for q in query:
response_0.append(q)
units = add_asset_units(datetime.date.today())
try:
units[int(id)]
except KeyError:
units[int(id)] = 0
response_0.append(units[int(id)])
# data_1
c.execute('SELECT * FROM cotizacion WHERE activo_id=? ORDER BY fecha DESC LIMIT 5', (id,))
data_1 = c.fetchall()
response_1 = []
for d in data_1:
line = []
line.append(date_to_eu_format(d[1]))
line.append(d[2])
response_1.append(line)
# data_2
c.execute('SELECT * FROM movimiento_activo WHERE activo_id=? ORDER BY fecha DESC LIMIT 5', (id,))
data_2 = c.fetchall()
response_2 = []
for d in data_2:
line = []
line.append(date_to_eu_format(d[1]))
line.append(d[2])
line.append(d[3])
response_2.append(line)
return render_template('asset.html', title='Assets', query=response_0, data_1=response_1, data_2=response_2)
@app.route('/asset/VL/<id>', methods=['POST'])
@login_required
def asset_vl(id):
conn = sqlite3.connect('app.db')
c = conn.cursor()
fecha = request.form.get('fecha')
VL = request.form.get('VL')
c.execute("INSERT OR REPLACE INTO cotizacion (fecha, VL, activo_id) VALUES (?, ?, ?)", (fecha, VL, id,))
conn.commit()
return redirect(url_for('asset', id=id))
@app.route('/asset/movement/<id>', methods=['POST'])
@login_required
def asset_movement(id):
conn = sqlite3.connect('app.db')
c = conn.cursor()
fecha = request.form.get('fecha')
unidades = request.form.get('unidades')
precio = request.form.get('precio')
c.execute("INSERT OR REPLACE INTO movimiento_activo (fecha, unidades, precio, activo_id, user_id) VALUES (?, ?, ?, ?, ?)", (fecha, unidades, precio, id, 1,))
| |
<reponame>ckulal/cephci<filename>tests/rados/stretch_cluster.py
import datetime
import json
import logging
import re
import time
from ceph.ceph_admin import CephAdmin
from ceph.parallel import parallel
from tests.rados.mute_alerts import get_alerts
from tests.rados.rados_prep import create_pool
from tests.rados.test_9281 import do_rados_get, do_rados_put
log = logging.getLogger(__name__)
def run(ceph_cluster, **kw):
"""
enables connectivity mode and deploys stretch cluster with arbiter mon node
Actions Performed:
1. Disables the automatic crush map update
2. Collects the OSD daemons in the cluster and split them into 2 sites.
3. If add capacity is selected, only half of the OSD's will be added to various sites initially.
4. Adds the stretch rule into crush map.
5. Adding monitors into the 2 sites.
6. Create a replicated pool and deploy stretch mode.
7. Create a test pool, write some data and perform add capacity. ( add osd nodes into two sites )
8. Check for the bump in election epochs throughout.
9. Check the acting set in PG for 4 OSD's. 2 from each site.
Args:
ceph_cluster (ceph.ceph.Ceph): ceph cluster
"""
log.info("Deploying stretch cluster with arbiter mon node")
log.info(run.__doc__)
config = kw.get("config")
cephadm = CephAdmin(cluster=ceph_cluster, **config)
client_node = ceph_cluster.get_nodes(role="client")[0]
tiebreaker_node = ceph_cluster.get_nodes(role="installer")[0]
if not client_node and not tiebreaker_node:
log.error(
"Admin client and tie breaker node not configured, Cannot modify crush rules for stretch cluster"
)
return 1
mon_state = get_mon_details(node=cephadm)
if len(list(mon_state["monitors"])) < 5:
log.error(
f"Minimum of 5 Mon daemons needed to deploy a stretch cluster, found : {len(mon_state['monitors'])}"
)
return 1
osd_details = get_osd_details(node=cephadm)
if len(osd_details.keys()) < 4:
log.error(
f"Minimum of 4 osd daemons needed to deploy a stretch cluster, found : {len(osd_details.keys())}"
)
return 1
# disabling automatic crush update
cmd = "ceph config set osd osd_crush_update_on_start false"
cephadm.shell([cmd])
# Collecting osd details and split them into Sita A and Site B
sorted_osds = sort_osd_sites(all_osd_details=osd_details)
site_a_osds = sorted_osds[0]
site_b_osds = sorted_osds[1]
if config.get("perform_add_capacity"):
site_a_osds = sorted_osds[0][: (len(sorted_osds[0]) // 2)]
site_b_osds = sorted_osds[1][: (len(sorted_osds[1]) // 2)]
if not set_osd_sites(
node=cephadm,
osds=site_a_osds,
site=1,
all_osd_details=osd_details,
):
log.error("Failed to move the OSD's into sites")
return 1
if not set_osd_sites(
node=cephadm,
osds=site_b_osds,
site=2,
all_osd_details=osd_details,
):
log.error("Failed to move the OSD's into sites")
return 1
# collecting mon map to be compared after strtech cluster deployment
stretch_rule_name = "stretch_rule"
if not setup_crush_rule(node=client_node, rule_name=stretch_rule_name):
log.error("Failed to Add crush rules in the crush map")
return 1
# Setting the election strategy to connectivity mode
cmd = "/bin/ceph mon set election_strategy connectivity"
cephadm.shell([cmd])
# Sleeping for 5 sec for the strategy to be active
time.sleep(5)
init_mon_state = get_mon_details(node=cephadm)
# Checking if mon elections happened after changing election strategy
if mon_state["epoch"] > init_mon_state["epoch"]:
log.error("Election epoch not bumped up after setting the connectivity mode.")
return 1
# Checking updated election strategy in mon map
if init_mon_state["election_strategy"] != 3:
log.error(
f"Election strategy is not connectivity mode.\n Currently set {mon_state['election_strategy']}"
)
return 1
log.info("Enabled connectivity mode on the cluster")
log.info(f"selecting mon : {tiebreaker_node} as tie breaker monitor on site 3")
if not set_mon_sites(node=cephadm, tiebreaker_node=tiebreaker_node):
log.error("Failed to ad monitors into respective sites")
return 1
# All the existing pools should be automatically changed with stretch rule. Creating a test pool
pool_name = "test_pool_1"
if not create_pool(
node=cephadm, disable_pg_autoscale=True, pool_name=pool_name, pg_num=16
):
log.error("Failed to create the replicated Pool")
return 1
log.info("Monitors added to respective sites. enabling stretch rule")
cmd = f"/bin/ceph mon enable_stretch_mode {tiebreaker_node.hostname} {stretch_rule_name} datacenter"
try:
cephadm.shell([cmd])
except Exception as err:
log.error(
f"Error while enabling stretch rule on the datacenter. Command : {cmd}"
)
log.error(err)
return 1
if get_mon_details(node=cephadm)["epoch"] < init_mon_state["epoch"]:
log.error("Election epoch not bumped up after Enabling strech mode")
return 1
if config.get("perform_add_capacity"):
pool_name = "test_stretch_pool"
if not create_pool(
node=cephadm,
disable_pg_autoscale=True,
pool_name=pool_name,
crush_rule=stretch_rule_name,
):
log.error("Failed to create the replicated Pool")
return 1
do_rados_put(mon=client_node, pool=pool_name, nobj=100)
log.info("Performing add Capacity after the deployment of stretch cluster")
site_a_osds = [osd for osd in sorted_osds[0] if osd not in site_a_osds]
site_b_osds = [osd for osd in sorted_osds[1] if osd not in site_b_osds]
if not set_osd_sites(
node=cephadm,
osds=site_a_osds,
site=1,
all_osd_details=osd_details,
):
log.error("Failed to move the OSD's into sites")
return 1
if not set_osd_sites(
node=cephadm,
osds=site_b_osds,
site=2,
all_osd_details=osd_details,
):
log.error("Failed to move the OSD's into sites")
return 1
# Sleeping for 10 seconds after adding OSD's for the PG re-balancing to start and begin rados get
time.sleep(10)
with parallel() as p:
p.spawn(do_rados_get, client_node, pool_name, 10)
for res in p:
log.info(res)
# Checking if the pools have been updated with the new crush rules
acting_set = get_pg_acting_set(node=cephadm, pool_name=pool_name)
if len(acting_set) != 4:
log.error(
f"There are {len(acting_set)} OSD's in PG. OSDs: {acting_set}. Stretch cluster requires 4"
)
return 1
log.info(f"Acting set : {acting_set} Consists of 4 OSD's per PG")
log.info("Stretch rule with arbiter monitor node set up successfully")
return 0
def get_pg_acting_set(node: CephAdmin, pool_name: str) -> list:
"""
Fetches the PG details about the given pool and then returns the acting set of OSD's from sample PG of the pool
Args:
node: Cephadm node where the commands need to be executed
pool_name: name of the pool whose one of the acting OSD set is needed
Returns: list osd's part of acting set
eg : [3,15,20]
"""
# Collecting details about the cluster
cmd = "ceph osd dump --format=json"
out, err = node.shell([cmd])
res = json.loads(out)
for val in res["pools"]:
if val["pool_name"] == pool_name:
pool_id = val["pool"]
break
# Collecting the details of the 1st PG in the pool <ID>.0
pg_num = f"{pool_id}.0"
cmd = f"ceph pg map {pg_num} --format=json"
out, err = node.shell([cmd])
res = json.loads(out)
return res["up"]
def setup_crush_rule(node, rule_name: str) -> bool:
"""
Adds the crush rule required for stretch cluster into crush map
Args:
node: ceph client node where the commands need to be executed
rule_name: Name of the crush rule to add
Returns: True -> pass, False -> fail
"""
rule = rule_name
rules = """id 111
type replicated
min_size 1
max_size 10
step take site1
step chooseleaf firstn 2 type host
step emit
step take site2
step chooseleaf firstn 2 type host
step emit"""
if not add_crush_rules(node=node, rule_name=rule, rules=rules):
log.error("Failed to add the new crush rule")
return False
return True
def add_crush_rules(node, rule_name: str, rules: str) -> bool:
"""
Adds the given crush rules into the crush map
Args:
node: ceph client node where the commands need to be executed
rule_name: Name of the crush rule to add
rules: The rules for crush
Returns: True -> pass, False -> fail
"""
try:
# Getting the crush map
cmd = "/bin/ceph osd getcrushmap > /tmp/crush.map.bin"
node.exec_command(cmd=cmd)
# changing it to text for editing
cmd = "/bin/crushtool -d /tmp/crush.map.bin -o /tmp/crush.map.txt"
node.exec_command(cmd=cmd)
# Adding the crush rules into the file
cmd = f"""cat <<EOF >> /tmp/crush.map.txt
rule {rule_name} {"{"}
{rules}
{"}"}
EOF"""
node.exec_command(cmd=cmd)
# Changing back the text file into bin
cmd = "/bin/crushtool -c /tmp/crush.map.txt -o /tmp/crush2.map.bin"
node.exec_command(cmd=cmd)
# Setting the new crush map
cmd = "/bin/ceph osd setcrushmap -i /tmp/crush2.map.bin"
node.exec_command(cmd=cmd)
log.info(f"Crush rule : {rule_name} added successfully")
return True
except Exception as err:
log.error("Failed to set the crush rules")
log.error(err)
return False
def sort_osd_sites(all_osd_details: dict) -> tuple:
"""
Sorts the OSD's present such that the weights on two sites remains the same
Args:
all_osd_details: dictionary of OSD's containing the details
eg : {'2': {'weight': 0.01459, 'state': 'up', 'name': 'osd.2'},
'7': {'weight': 0.01459, 'state': 'up', 'name': 'osd.7'}}
Returns: Tuple of lists, containing the OSD list for the 2 sites
eg : ([1, 2, 3, 4, 5], [6, 7, 8, 9, 0])
"""
site_a_osds = []
site_b_osds = []
osd_list = [x for x in all_osd_details.keys()]
# distributing the OSD's into two sites such that both sites have equal weight
while len(osd_list) > 1:
site_a_osd = osd_list.pop()
if not all_osd_details[site_a_osd]["state"] == "up":
log.error(f"OSD : {site_a_osd} is not up")
break
flag = 0
for osd in osd_list:
if all_osd_details[osd]["state"] == "up":
if (
all_osd_details[site_a_osd]["weight"]
== all_osd_details[osd]["weight"]
):
osd_list.remove(osd)
site_a_osds.append(site_a_osd)
site_b_osds.append(osd)
flag = 1
break
else:
log.error(f"OSD : {osd} is not up")
osd_list.remove(osd)
if not flag:
log.error(f"no peer OSD for: {site_a_osd} found")
log.info(
f"Proposed Site-A OSD's : {site_a_osds}\nProposed Site-B OSD's : {site_b_osds}"
)
return site_a_osds, site_b_osds
def set_osd_sites(
node: CephAdmin, osds: list, site: int, all_osd_details: | |
1. From a histogram (i.e. counts on a grid)::
h,edges = numpy.histogramdd(...)
D = Density(h, edges, parameters={'isDensity': False}, units={'length': 'A'})
D.make_density()
2. From a saved density file (e.g. in OpenDX format), where the lengths are
in Angstrom and the density in 1/A**3::
D = Density("density.dx")
3. From a saved density file (e.g. in OpenDX format), where the lengths are
in Angstrom and the density is measured relative to the density of water
at ambient conditions::
D = Density("density.dx", units={'density': 'water'})
4. From a saved *histogram* (less common, but in order to demonstrate the
*parameters* keyword) where the lengths are in nm::
D = Density("counts.dx", parameters={'isDensity': False}, units={'length': 'nm'})
D.make_density()
D.convert_length('Angstrom^{-3}')
D.convert_density('water')
After the final step, ``D`` will contain a density on a grid measured in
Ångstrom, with the density values itself measured relative to the
density of water.
:class:`Density` objects can be algebraically manipulated (added,
subtracted, multiplied, ...) but there are *no sanity checks* in place to
make sure that units, metadata, etc are compatible!
.. Note::
It is suggested to construct the Grid object from a histogram,
to supply the appropriate length unit, and to use
:meth:`Density.make_density` to obtain a density. This ensures
that the length- and the density unit correspond to each other.
"""
def __init__(self, *args, **kwargs):
length_unit = MDAnalysis.core.flags['length_unit']
parameters = kwargs.pop('parameters', {})
if len(args) > 0 and isinstance(args[0], string_types) or isinstance(kwargs.get('grid', None), string_types):
# try to be smart: when reading from a file then it is likely that this
# is a density
parameters.setdefault('isDensity', True)
else:
parameters.setdefault('isDensity', False)
units = kwargs.pop('units', {})
units.setdefault('length', length_unit)
if parameters['isDensity']:
units.setdefault('density', length_unit)
else:
units.setdefault('density', None)
super(Density, self).__init__(*args, **kwargs)
self.parameters = parameters # isDensity: set by make_density()
self.units = units
def _check_set_unit(self, u):
"""Check and set units.
First check that all units and their values in the dict `u` are valid
and then set the object's units attribute.
Parameters
----------
u : dict
``{unit_type : value, ...}``
Raises
------
ValueError
if unit types or unit values are not recognized or if required
unit types are not in :attr:`units`
"""
# all this unit crap should be a class...
try:
for unit_type, value in u.items():
if value is None: # check here, too iffy to use dictionary[None]=None
self.units[unit_type] = None
continue
try:
units.conversion_factor[unit_type][value]
self.units[unit_type] = value
except KeyError:
raise ValueError('Unit ' + str(value) + ' of type ' + str(unit_type) + ' is not recognized.')
except AttributeError:
errmsg = '"unit" must be a dictionary with keys "length" and "density.'
logger.fatal(errmsg)
raise ValueError(errmsg)
# need at least length and density (can be None)
if 'length' not in self.units:
raise ValueError('"unit" must contain a unit for "length".')
if 'density' not in self.units:
self.units['density'] = None
def make_density(self):
"""Convert the grid (a histogram, counts in a cell) to a density (counts/volume).
This method changes the grid irrevocably.
For a probability density, manually divide by :meth:`grid.sum`.
If this is already a density, then a warning is issued and nothing is
done, so calling `make_density` multiple times does not do any harm.
"""
# Make it a density by dividing by the volume of each grid cell
# (from numpy.histogramdd, which is for general n-D grids)
if self.parameters['isDensity']:
msg = "Running make_density() makes no sense: Grid is already a density. Nothing done."
logger.warning(msg)
warnings.warn(msg)
return
dedges = [np.diff(edge) for edge in self.edges]
D = len(self.edges)
for i in range(D):
shape = np.ones(D, int)
shape[i] = len(dedges[i])
self.grid /= dedges[i].reshape(shape)
self.parameters['isDensity'] = True
# see units.densityUnit_factor for units
self.units['density'] = self.units['length'] + "^{-3}"
def convert_length(self, unit='Angstrom'):
"""Convert Grid object to the new `unit`.
Parameters
----------
unit : str (optional)
unit that the grid should be converted to: one of
"Angstrom", "nm"
Notes
-----
This changes the edges but will not change the density; it is the
user's responsibility to supply the appropriate unit if the Grid object
is constructed from a density. It is suggested to start from a
histogram and a length unit and use :meth:`make_density`.
"""
if unit == self.units['length']:
return
cvnfact = units.get_conversion_factor('length', self.units['length'], unit)
self.edges = [x * cvnfact for x in self.edges]
self.units['length'] = unit
self._update() # needed to recalculate midpoints and origin
def convert_density(self, unit='Angstrom'):
"""Convert the density to the physical units given by `unit`.
Parameters
----------
unit : str (optional)
The target unit that the density should be converted to.
`unit` can be one of the following:
============= ===============================================================
name description of the unit
============= ===============================================================
Angstrom^{-3} particles/A**3
nm^{-3} particles/nm**3
SPC density of SPC water at standard conditions
TIP3P ... see :data:`MDAnalysis.units.water`
TIP4P ... see :data:`MDAnalysis.units.water`
water density of real water at standard conditions (0.997 g/cm**3)
Molar mol/l
============= ===============================================================
Raises
------
RuntimeError
If the density does not have a unit associated with it to begin
with (i.e., is not a density) then no conversion can take place.
ValueError
for unknown `unit`.
Notes
-----
(1) This method only works if there is already a length unit associated with the
density; otherwise raises :exc:`RuntimeError`
(2) Conversions always go back to unity so there can be rounding
and floating point artifacts for multiple conversions.
"""
if not self.parameters['isDensity']:
errmsg = 'The grid is not a density so converty_density() makes no sense.'
logger.fatal(errmsg)
raise RuntimeError(errmsg)
if unit == self.units['density']:
return
try:
self.grid *= units.get_conversion_factor('density',
self.units['density'], unit)
except KeyError:
raise ValueError("The name of the unit ({0!r} supplied) must be one of:\n{1!r}".format(unit, units.conversion_factor['density'].keys()))
self.units['density'] = unit
def __repr__(self):
if self.parameters['isDensity']:
grid_type = 'density'
else:
grid_type = 'histogram'
return '<Density ' + grid_type + ' with ' + str(self.grid.shape) + ' bins>'
def _set_user_grid(gridcenter, xdim, ydim, zdim, smin, smax):
"""Helper function to set the grid dimensions to user defined values
Parameters
----------
gridcenter : numpy ndarray, float32
3 element ndarray containing the x, y and z coordinates of the grid
box center
xdim : float
Box edge length in the x dimension
ydim : float
Box edge length in the y dimension
zdim : float
Box edge length in the y dimension
smin : numpy ndarray, float32
Minimum x,y,z coordinates for the input selection
smax : numpy ndarray, float32
Maximum x,y,z coordinates for the input selection
Returns
-------
umin : numpy ndarray, float32
Minimum x,y,z coordinates of the user defined grid
umax : numpy ndarray, float32
Maximum x,y,z coordinates of the user defined grid
"""
# Check user inputs
try:
gridcenter = np.asarray(gridcenter, dtype=np.float32)
except ValueError:
raise ValueError("Non-number values assigned to gridcenter")
if gridcenter.shape != (3,):
raise ValueError("gridcenter must be a 3D coordinate")
try:
xyzdim = np.array([xdim, ydim, zdim], dtype=np.float32)
except ValueError:
raise ValueError("xdim, ydim, and zdim must be numbers")
# Set min/max by shifting by half the edge length of each dimension
umin = gridcenter - xyzdim/2
umax = gridcenter + xyzdim/2
# Here we test if coords of selection fall outside of the defined grid
# if this happens, we warn users they may want to resize their grids
if any(smin < umin) or any(smax > umax):
msg = ("Atom selection does not fit grid --- "
"you may want to define a larger box")
warnings.warn(msg)
logger.warning(msg)
return umin, umax
def density_from_Universe(universe, delta=1.0, atomselection='name OH2',
start=None, stop=None, step=None,
metadata=None, padding=2.0, cutoff=0, soluteselection=None,
use_kdtree=True, update_selection=False,
verbose=False, interval=1, quiet=None,
parameters=None,
gridcenter=None, xdim=None, ydim=None, zdim=None):
"""Create a density grid from a :class:`MDAnalysis.Universe` object.
The trajectory is read, frame by frame, and the atoms selected with `atomselection` are
histogrammed on a grid with spacing `delta`.
Parameters
----------
universe : MDAnalysis.Universe
:class:`MDAnalysis.Universe` object with a trajectory
atomselection : str (optional)
selection string (MDAnalysis syntax) for the species to be analyzed
["name OH2"]
delta : float (optional)
bin size for the density grid in Angstroem (same in x,y,z) [1.0]
start : int (optional)
stop : int (optional)
step : int (optional)
Slice the trajectory as ``trajectory[start:stop:step]``; default
is to read the whole trajectory.
metadata : dict. optional
`dict` of additional data to | |
<filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
"""
Implement transformation on Numba IR
"""
from __future__ import absolute_import, print_function
from collections import namedtuple, defaultdict
import logging
from numba.analysis import compute_cfg_from_blocks, find_top_level_loops
from numba import ir, errors, ir_utils
from numba.analysis import compute_use_defs
_logger = logging.getLogger(__name__)
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
_logger.debug("return-statement in loop.")
return False
outedges |= succs
ok = len(outedges) == 1
_logger.debug("same_exit_point=%s (%s)", ok, outedges)
return ok
def one_entry(loop):
"there is one entry"
ok = len(loop.entries) == 1
_logger.debug("one_entry=%s", ok)
return ok
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
_logger.debug("has yield")
return False
_logger.debug("no yield")
return True
_logger.info('finding looplift candidates')
# the check for cfg.entry_point in the loop.entries is to prevent a bad
# rewrite where a prelude for a lifted loop would get written into block -1
# if a loop entry were in block 0
candidates = []
for loop in find_top_level_loops(cfg):
_logger.debug("top-level loop: %s", loop)
if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and
cfg.entry_point() not in loop.entries):
candidates.append(loop)
_logger.debug("add candidate: %s", loop)
return candidates
def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):
"""Find input and output variables to a block region.
"""
inputs = livemap[callfrom]
outputs = livemap[returnto]
# ensure live variables are actually used in the blocks, else remove,
# saves having to create something valid to run through postproc
# to achieve similar
loopblocks = {}
for k in body_block_ids:
loopblocks[k] = blocks[k]
used_vars = set()
def_vars = set()
defs = compute_use_defs(loopblocks)
for vs in defs.usemap.values():
used_vars |= vs
for vs in defs.defmap.values():
def_vars |= vs
used_or_defined = used_vars | def_vars
# note: sorted for stable ordering
inputs = sorted(set(inputs) & used_or_defined)
outputs = sorted(set(outputs) & used_or_defined & def_vars)
return inputs, outputs
_loop_lift_info = namedtuple('loop_lift_info',
'loop,inputs,outputs,callfrom,returnto')
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
else:
# Post-Py3.8 DO NOT have multiple exits
returnto = an_exit
local_block_ids = set(loop.body) | set(loop.entries)
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=livemap,
callfrom=callfrom,
returnto=returnto,
body_block_ids=local_block_ids,
)
lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,
callfrom=callfrom, returnto=returnto)
loopinfos.append(lli)
return loopinfos
def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):
"""
Transform calling block from top-level function to call the lifted loop.
"""
scope = block.scope
loc = block.loc
blk = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=blk,
callee=liftedloop,
label_next=returnto,
inputs=inputs,
outputs=outputs,
)
return blk
def _loop_lift_prepare_loop_func(loopinfo, blocks):
"""
Inplace transform loop blocks for use as lifted loop.
"""
entry_block = blocks[loopinfo.callfrom]
scope = entry_block.scope
loc = entry_block.loc
# Lowering assumes the first block to be the one with the smallest offset
firstblk = min(blocks) - 1
blocks[firstblk] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=loopinfo.inputs,
label_next=loopinfo.callfrom,
)
blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=loopinfo.outputs,
)
def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals):
"""
Modify the block inplace to call to the lifted-loop.
Returns a dictionary of blocks of the lifted-loop.
"""
from numba.dispatcher import LiftedLoop
# Copy loop blocks
loop = loopinfo.loop
loopblockkeys = set(loop.body) | set(loop.entries)
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
loopblockkeys |= loop.exits
loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)
# Modify the loop blocks
_loop_lift_prepare_loop_func(loopinfo, loopblocks)
# Create a new IR for the lifted loop
lifted_ir = func_ir.derive(blocks=loopblocks,
arg_names=tuple(loopinfo.inputs),
arg_count=len(loopinfo.inputs),
force_non_generator=True)
liftedloop = LiftedLoop(lifted_ir,
typingctx, targetctx, flags, locals)
# modify for calling into liftedloop
callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],
loopinfo.inputs, loopinfo.outputs,
loopinfo.returnto)
# remove blocks
for k in loopblockkeys:
del blocks[k]
# update main interpreter callsite into the liftedloop
blocks[loopinfo.callfrom] = callblock
return liftedloop
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
"""
Loop lifting transformation.
Given a interpreter `func_ir` returns a 2 tuple of
`(toplevel_interp, [loop0_interp, loop1_interp, ....])`
"""
blocks = func_ir.blocks.copy()
cfg = compute_cfg_from_blocks(blocks)
loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,
func_ir.variable_lifetime.livemap)
loops = []
if loopinfos:
_logger.debug('loop lifting this IR with %d candidates:\n%s',
len(loopinfos), func_ir.dump_to_string())
for loopinfo in loopinfos:
lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals)
loops.append(lifted)
# Make main IR
main = func_ir.derive(blocks=blocks)
return main, loops
def canonicalize_cfg_single_backedge(blocks):
"""
Rewrite loops that have multiple backedges.
"""
cfg = compute_cfg_from_blocks(blocks)
newblocks = blocks.copy()
def new_block_id():
return max(newblocks.keys()) + 1
def has_multiple_backedges(loop):
count = 0
for k in loop.body:
blk = blocks[k]
edges = blk.terminator.get_targets()
# is a backedge?
if loop.header in edges:
count += 1
if count > 1:
# early exit
return True
return False
def yield_loops_with_multiple_backedges():
for lp in cfg.loops().values():
if has_multiple_backedges(lp):
yield lp
def replace_target(term, src, dst):
def replace(target):
return (dst if target == src else target)
if isinstance(term, ir.Branch):
return ir.Branch(cond=term.cond,
truebr=replace(term.truebr),
falsebr=replace(term.falsebr),
loc=term.loc)
elif isinstance(term, ir.Jump):
return ir.Jump(target=replace(term.target), loc=term.loc)
else:
assert not term.get_targets()
return term
def rewrite_single_backedge(loop):
"""
Add new tail block that gathers all the backedges
"""
header = loop.header
tailkey = new_block_id()
for blkkey in loop.body:
blk = newblocks[blkkey]
if header in blk.terminator.get_targets():
newblk = blk.copy()
# rewrite backedge into jumps to new tail block
newblk.body[-1] = replace_target(blk.terminator, header,
tailkey)
newblocks[blkkey] = newblk
# create new tail block
entryblk = newblocks[header]
tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)
# add backedge
tailblk.append(ir.Jump(target=header, loc=tailblk.loc))
newblocks[tailkey] = tailblk
for loop in yield_loops_with_multiple_backedges():
rewrite_single_backedge(loop)
return newblocks
def canonicalize_cfg(blocks):
"""
Rewrite the given blocks to canonicalize the CFG.
Returns a new dictionary of blocks.
"""
return canonicalize_cfg_single_backedge(blocks)
def with_lifting(func_ir, typingctx, targetctx, flags, locals):
"""With-lifting transformation
Rewrite the IR to extract all withs.
Only the top-level withs are extracted.
Returns the (the_new_ir, the_lifted_with_ir)
"""
from numba import postproc
def dispatcher_factory(func_ir, objectmode=False, **kwargs):
from numba.dispatcher import LiftedWith, ObjModeLiftedWith
myflags = flags.copy()
if objectmode:
# Lifted with-block cannot looplift
myflags.enable_looplift = False
# Lifted with-block uses object mode
myflags.enable_pyobject = True
myflags.force_pyobject = True
myflags.no_cpython_wrapper = False
cls = ObjModeLiftedWith
else:
cls = LiftedWith
return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs)
postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime
assert func_ir.variable_lifetime
vlt = func_ir.variable_lifetime
blocks = func_ir.blocks.copy()
# find where with-contexts regions are
withs = find_setupwiths(blocks)
cfg = vlt.cfg
_legalize_withs_cfg(withs, cfg, blocks)
# For each with-regions, mutate them according to
# the kind of contextmanager
sub_irs = []
for (blk_start, blk_end) in withs:
body_blocks = []
for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):
body_blocks.append(node)
_legalize_with_head(blocks[blk_start])
# Find the contextmanager
cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)
# Mutate the body and get new IR
sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory,
extra)
sub_irs.append(sub)
if not sub_irs:
# Unchanged
new_ir = func_ir
else:
new_ir = func_ir.derive(blocks)
return new_ir, sub_irs
def _get_with_contextmanager(func_ir, blocks, blk_start):
"""Get the global object used for the context manager
"""
_illegal_cm_msg = "Illegal use of context-manager."
def get_var_dfn(var):
"""Get the definition given a variable"""
return func_ir.get_definition(var)
def get_ctxmgr_obj(var_ref):
"""Return the context-manager object and extra info.
The extra contains the arguments if the context-manager is used
as a call.
"""
# If the contextmanager used as a Call
dfn = func_ir.get_definition(var_ref)
if isinstance(dfn, ir.Expr) and dfn.op == 'call':
args = [get_var_dfn(x) for x in dfn.args]
kws = {k: get_var_dfn(v) for k, v in dfn.kws}
extra = {'args': args, 'kwargs': kws}
var_ref = dfn.func
else:
extra = None
ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)
# check the contextmanager object
if ctxobj is ir.UNDEFINED:
raise errors.CompilerError(
"Undefined variable used as context manager",
loc=blocks[blk_start].loc,
)
if ctxobj is None:
raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)
return ctxobj, extra
# Scan the start of the with-region for the contextmanager
for stmt in blocks[blk_start].body:
if isinstance(stmt, ir.EnterWith):
var_ref = stmt.contextmanager
ctxobj, extra = | |
from struct import pack, Struct
from collections import defaultdict
from typing import List, Dict, Tuple, Union, Any
from pyNastran.bdf.cards.collpase_card import collapse_thru_packs
from pyNastran.op2.errors import SixtyFourBitError
from .geom1_writer import write_geom_header, close_geom_table
def write_geom4(op2_file, op2_ascii, obj, endian: bytes=b'<', nastran_format: str='nx') -> None:
if not hasattr(obj, 'rigid_elements'):
return
loads_by_type = defaultdict(list) # type: Dict[str, Any]
for unused_id, rigid_element in obj.rigid_elements.items():
loads_by_type[rigid_element.type].append(rigid_element)
for aset in obj.asets:
assert not isinstance(aset, int), obj.asets
# ASET
loads_by_type[aset.type].append(aset)
for bset in obj.bsets:
assert not isinstance(bset, int), obj.bsets
loads_by_type[bset.type].append(bset)
for cset in obj.csets:
assert not isinstance(cset, int), obj.csets
loads_by_type[cset.type].append(cset)
for qset in obj.qsets:
assert not isinstance(qset, int), obj.qsets
loads_by_type[qset.type].append(qset)
for unused_name, usets in obj.usets.items():
for uset in usets:
loads_by_type[uset.type].append(uset)
for omit in obj.omits:
assert not isinstance(omit, int), obj.omits
loads_by_type[omit.type].append(omit)
for suport in obj.suport:
loads_by_type[suport.type].append(suport)
for unused_idi, suport in obj.suport1:
loads_by_type[suport.type].append(suport)
for unused_id, spcs in obj.spcs.items():
for spc in spcs:
loads_by_type[spc.type].append(spc)
for unused_id, mpcs in obj.mpcs.items():
for mpc in mpcs:
loads_by_type[mpc.type].append(mpc)
for unused_id, spcadds in obj.spcadds.items():
for spcadd in spcadds:
loads_by_type[spcadd.type].append(spcadd)
for unused_id, mpcadds in obj.mpcadds.items():
for mpcadd in mpcadds:
loads_by_type[mpcadd.type].append(mpcadd)
#for unused_load_id, load in obj.tempds.items():
#loads_by_type[load.type].append(load)
# return if no supported cards are found
skip_cards = {
#'SUPORT', 'SUPORT1', # suport
# spcs
'GMSPC',
# rigid elements
'RROD', 'RSSCON',
# sets
'CSET1',
'USET', 'USET1',
}
# not defined in DMAP
not_defined_cards = {'RBAR1'}
supported_cards = {
'SUPORT', 'SUPORT1', # suport
# sets
'ASET', 'BSET', 'CSET', 'QSET', 'OMIT',
'ASET1', 'BSET1', 'QSET1', 'OMIT1',
# rigid
'RBAR', 'RBE1', 'RBE2', 'RBE3',
# constraints
'MPC', 'SPC', 'SPC1', 'SPCADD', 'MPCADD',
}
is_constraints = False
for card_type in sorted(loads_by_type.keys()):
if card_type in skip_cards:
obj.log.warning('skipping GEOM4-%s' % card_type)
continue
if card_type in not_defined_cards:
continue
if card_type in supported_cards:
is_constraints = True
continue
#break
obj.log.warning('skipping GEOM4-%s' % card_type)
#else:
#return
if not is_constraints:
return
write_geom_header(b'GEOM4', op2_file, op2_ascii)
itable = -3
for card_type, cards in sorted(loads_by_type.items()):
#if card_type in ['SPCD']: # not a GEOM3 load
#continue
if card_type in skip_cards or card_type in not_defined_cards:
continue
try:
nbytes = write_card(op2_file, op2_ascii, card_type, cards, endian,
nastran_format=nastran_format)
except: # pragma: no cover
obj.log.error('failed GEOM4-%s' % card_type)
raise
op2_file.write(pack('i', nbytes))
itable -= 1
data = [
4, itable, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
#-------------------------------------
#print('itable', itable)
close_geom_table(op2_file, op2_ascii, itable)
#-------------------------------------
def write_card(op2_file, op2_ascii, card_type: str, cards, endian: bytes,
nastran_format: str='nx') -> int:
ncards = len(cards)
if card_type in ['ASET1', 'BSET1', 'CSET1', 'QSET1', 'OMIT1']:
nbytes = _write_xset1(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type in ['ASET', 'BSET', 'CSET', 'OMIT', 'QSET']:
nbytes = _write_xset(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type == 'SUPORT':
key = (5601, 56, 14)
data = []
fmt = endian
for suport in cards:
datai = []
nnodes = len(suport.Cs)
for nid, ci in zip(suport.node_ids, suport.Cs):
assert isinstance(nid, int), suport.get_stats()
assert isinstance(ci, str), suport.get_stats()
datai.extend([nid, int(ci)])
fmt += b'%ii' % (nnodes * 2)
data.extend(datai)
op2_ascii.write(' SUPORT data=%s\n' % str(datai))
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
op2_file.write(pack(fmt, *data))
del data, fmt
elif card_type == 'SUPORT1':
key = (10100, 101, 472)
data = []
fmt = endian
for suport1 in cards:
suport1i = [suport1.conid]
nnodes = len(suport1.Cs)
for nid, ci in zip(suport1.node_ids, suport1.Cs):
assert isinstance(nid, int), suport1.get_stats()
assert isinstance(ci, int), suport1.get_stats()
suport1i.extend([nid, ci])
suport1i.append(-1)
op2_ascii.write(' SUPORT1 data=%s\n' % str(suport1i))
fmt += b'%ii' % (2 * nnodes + 2)
data.extend(suport1i)
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
op2_file.write(pack(fmt, *data))
del data, fmt
elif card_type == 'MPC':
key = (4901, 49, 17)
data = []
fmt = endian
for mpc in cards:
datai = [mpc.conid, ]
fmt += b'i' + b'ifi' * len(mpc.coefficients) + b'3i'
for nid, coeff, comp in zip(mpc.node_ids, mpc.coefficients, mpc.components):
datai += [nid, coeff, int(comp)]
datai += [-1, -1, -1]
op2_ascii.write(' MPC data=%s\n' % str(datai))
data += datai
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
op2_file.write(pack(fmt, *data))
del data, fmt
elif card_type == 'RBE1':
nbytes = _write_rbe1(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type == 'RBE2':
nbytes = _write_rbe2(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type == 'RBE3':
nbytes = _write_rbe3(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type == 'RBAR':
nbytes = _write_rbar(card_type, cards, ncards, op2_file, op2_ascii,
endian, nastran_format=nastran_format)
elif card_type == 'SPC1':
nbytes = _write_spc1(card_type, cards, ncards, op2_file, op2_ascii,
endian)
elif card_type in ['SPCADD', 'MPCADD']:
if card_type == 'SPCADD':
key = (5491, 59, 13)
elif card_type == 'MPCADD':
key = (4891, 60, 83)
else: # pragma: no cover
raise NotImplementedError(card_type)
#SPCADD(5491,59,13)
#MPCADD(4891,60,83)
#[2 1 10 -1]
#[3 1 -1]
data = []
for spcadd in cards:
datai = [spcadd.conid] + spcadd.ids + [-1]
op2_ascii.write(' %s data=%s\n' % (card_type, str(datai)))
data += datai
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
spack = Struct(endian + b'%ii' % nfields)
op2_file.write(spack.pack(*data))
elif card_type == 'SPC':
nbytes = _write_spc(card_type, cards, ncards, op2_file, op2_ascii, endian,
nastran_format=nastran_format)
#elif card_type == 'TEMPD':
#key = (5641, 65, 98)
#nfields = 6
#spack = Struct(endian + b'if')
#nbytes = write_header(card_type, nfields, ncards, key, op2_file, op2_ascii)
#for load in cards:
#print(load.get_stats())
##sid, T = data
#data = [load.sid, load.temperature]
#op2_ascii.write(' TEMPD data=%s\n' % str(data))
#op2_file.write(spack.pack(*data))
else: # pragma: no cover
card0 = cards[0]
raise NotImplementedError(card0)
return nbytes
def write_header_nvalues(name: str, nvalues: int, key: Tuple[int, int, int], op2_file, op2_ascii):
"""a more precise version of write_header for when card lengths can vary"""
nvalues += 3 # +3 comes from the keys
nbytes = nvalues * 4
op2_file.write(pack('3i', *[4, nvalues, 4]))
op2_file.write(pack('i', nbytes)) #values, nbtyes))
op2_file.write(pack('3i', *key))
op2_ascii.write('%s %s\n' % (name, str(key)))
return nbytes
def write_header(name: str, nfields: int, ncards: int, key: Tuple[int, int, int],
op2_file, op2_ascii) -> int:
"""writes the op2 card header given the number of cards and the fields per card"""
nvalues = nfields * ncards
nbytes = write_header_nvalues(name, nvalues, key, op2_file, op2_ascii)
return nbytes
def _write_spc(card_type: str, cards, ncards: int, op2_file, op2_ascii,
endian: bytes, nastran_format: str='nx') -> int:
"""writes an SPC"""
key = (5501, 55, 16)
#nastran_format = 'msc'
max_spc_id = max([spc.conid for spc in cards])
max_nid = max([max(spc.node_ids) for spc in cards])
if max_spc_id > 99999999:
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max spc_id={max_spc_id}')
if max_nid > 99999999:
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max SPC nid={max_nid}')
data = [] # type: List[Union[int, float]]
if nastran_format == 'msc':
# MSC
# SPC(5501,55,16) - Record 44
#
# 1 SID I Set identification number
# 2 ID I Grid or scalar point identification number
# 3 C I Component numbers
# 4 UNDEF none Not used
# 5 D RX Enforced displacement
nfields = 5
nbytes = write_header(card_type, nfields, ncards, key, op2_file, op2_ascii)
for spc in cards:
node_ids = spc.node_ids
for nid, comp, enforcedi in zip(node_ids, spc.components, spc.enforced):
datai = [spc.conid, nid, int(comp), 0, enforcedi]
op2_ascii.write(' SPC data=%s\n' % str(datai))
data += datai
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
fmt = endian + b'4if' * (nfields // 5)
op2_file.write(pack(fmt, *data))
elif nastran_format == 'nx':
# NX
# SPC(5501,55,16) - Record 44
#
# 1 SID I Set identification number
# 2 ID I Grid or scalar point identification number
# 3 C I Component numbers
# 4 D RS Enforced displacement
for spc in cards:
node_ids = spc.node_ids
#assert len(node_ids) == 1, spc.get_stats()
datai = []
for nid, comp, enforcedi in zip(node_ids, spc.components, spc.enforced):
datai = [spc.conid, nid, int(comp), enforcedi]
op2_ascii.write(' SPC data=%s\n' % str(datai))
data += datai
nfields = len(data)
nbytes = write_header_nvalues(card_type, nfields, key, op2_file, op2_ascii)
fmt = endian + b'3if' * (nfields // 4)
op2_file.write(pack(fmt, *data))
else: # pragma: no cover
raise RuntimeError(f'nastran_format={nastran_format} not msc, nx')
return nbytes
def _write_rbe1(card_type: str, cards, unused_ncards: int, op2_file, op2_ascii,
endian: bytes) -> int:
"""
RBE1(6801,68,294) - Record 23
MSC/NX
Word Name Type Description
1 EID I Element identification number
2 GN I Grid point identification number for independent degrees-of-freedom
3 CN I Component numbers of independent degrees-of-freedom
Words 2 through 3 repeat until (-2,-2) occurs
4 GM I Grid point identification number for dependent degrees-of-freedom
5 | |
<reponame>quanpands/wflow
"""
wf_DynamicFramework
-------------------
This is a replacement for the standard pcraster/python DynamicFramwork class.\
It provides extra functionality to simplify linking the models build in the framework
with other models. The provided functionality allows external programs to control
and interrogate the model.
"""
# TODO: Remove command-line options from models such as -F that is now in the ini
# TODO: Fix timestep not forewarding in BMI runs (for reading writing maps)
import calendar
import configparser
import csv
import datetime
import glob
import logging
import shutil
import traceback
from collections import namedtuple
from functools import reduce
import numpy as np
import pcraster as pcr
import pcraster.framework
from wflow import __version__
from wflow.wf_netcdfio import *
from . import pcrut
from . import wflow_adapt
from .wflow_lib import *
import time # last to prevent clobbering by *
def log_uncaught_exceptions(ex_cls, ex, tb):
global logging
logging.error("".join(traceback.format_tb(tb)))
logging.error("{0}: {1}".format(ex_cls, ex))
sys.excepthook = log_uncaught_exceptions
logging.getLogger("foo").addHandler(logging.NullHandler())
class runDateTimeInfo:
"""
class to maintain and retrieve date/time info of the model run. IN order to support
difefrent views on date/time the class supports both a step (each input time is timestep) and
an interval base method (each model timestep is the interval between two input timesteps)
"""
def __init__(
self,
datetimestart=dt.datetime(1990, 1, 1),
datetimeend=dt.datetime(1990, 1, 5),
timestepsecs=86400,
mode="steps",
):
self.runStartTime = datetimestart
self.runEndTime = datetimeend
self.timeStepSecs = timestepsecs
self.currentTimeStep = 0
self.lastTimeStep = 0
self.startadjusted = 0
self.startendadjusted = 0
self.currentmode = mode
self.callstopupdate = 0
if mode == "steps":
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
self.runStateTime = self.runStartTime
self.setByBMI = False
self.currentDateTime = self.runStateTime
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
self.nextDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.lastTimeStep = self.runTimeSteps + self.currentTimeStep
def __str__(self):
a = self.__dict__
return str(a)
def update(
self,
timestepsecs=None,
datetimestart=None,
datetimeend=None,
currentTimeStep=None,
currentDatetime=None,
runTimeSteps=None,
mode="steps",
incrementStep=False,
setByBMI=False,
):
"""
Updates the content of the framework date/time object. Use only one input parameter per call. or runTimeSteps and datatimestart at the same time
use the mode option to switch between steps and intervals ('steps' or 'intervals')
:param timestepsecs:
:param datetimestart: data time start of the input data
:param datetimeend:
:param currentTimeStep:
:param currentDatetime:
:return:
"""
self.currentmode = mode
self.callstopupdate = self.callstopupdate + 1
if setByBMI:
self.setByBMI = True
if timestepsecs and not runTimeSteps:
self.timeStepSecs = timestepsecs
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.currentmode == "steps":
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
elif timestepsecs and runTimeSteps:
self.timeStepSecs = timestepsecs
self.runTimeSteps = runTimeSteps
if datetimestart:
self.currentTimeStep = 1
# if self.startadjusted
if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
# self.runStartTime = datetimestart + datetime.timedelta(seconds=self.timeStepSecs)
self.runStartTime = (
datetimestart
) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
self.runStateTime = (
self.runStartTime
) # - datetime.timedelta(seconds=self.timeStepSecs)
self.currentDateTime = self.runStateTime
self.outPutStartTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
self.runEndTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * self.runTimeSteps
)
if datetimestart and runTimeSteps:
self.currentTimeStep = 1
self.currentDateTime = self.runStartTime
if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
self.runStateTime = self.runStartTime - datetime.timedelta(
seconds=self.timeStepSecs
)
else:
self.runStartTime = (
datetimestart
) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
self.runStateTime = self.runStartTime
self.outPutStartTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.currentDateTime = self.runStartTime
self.runEndTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * runTimeSteps
)
if datetimeend:
self.runEndTime = datetimeend
self.runTimeSteps = (
calendar.timegm(self.runEndTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs
if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
self.runStartTime = self.runEndTime - datetime.timedelta(
seconds=self.timeStepSecs * self.runTimeSteps
)
if currentTimeStep and currentTimeStep != self.currentTimeStep:
self.currentTimeStep = currentTimeStep
self.currentDateTime = self.runStateTime + datetime.timedelta(
seconds=self.timeStepSecs * (self.currentTimeStep - 1)
)
if incrementStep:
self.currentTimeStep = self.currentTimeStep + 1
self.currentDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
if currentDatetime:
self.currentDateTime = currentDatetime
self.currentTimeStep = (
calendar.timegm(self.currentDateTime.utctimetuple())
- calendar.timegm(self.runStateTime.utctimetuple())
) / self.timeStepSecs + 1
self.nextDateTime = self.currentDateTime + datetime.timedelta(
seconds=self.timeStepSecs
)
self.lastTimeStep = self.runTimeSteps
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
class wf_exchnageVariables:
"""
List of exchange variables
The style determined how they are used
- 1: read from file like normal
- 2: set by the api in mem (for consistency this is style 0 in the ini file)
"""
def __init__(self):
self.vars = []
def varexists(self, name):
exists = 0
for item in self.vars:
if item[0] == name:
exists = 1
return exists
def addvar(self, name, role, unit):
if not self.varexists(name):
if unit == "0":
unit = "mm/timestep"
elif unit == "1":
unit = "m^3/sec"
elif unit == "2":
unit = "ma"
elif unit == "3":
unit = "degree Celcius"
elif unit == "4":
unit = "mm"
elif unit == "5":
unit = "-"
tvar = [name, role, unit]
self.vars.append(tvar)
def getvars(self):
return self.vars
def getvarStyle(self, name):
"""
returns 2 if this is a input variable to be set from api otherwise 1
( in the ini 0 is for in memory variables)
A bit confusing!!!
"""
for xx in self.vars:
if xx.__contains__(name):
if xx[1] == 0:
return 2
else:
return 1
return 1
class wf_online_stats:
def __init__(self):
"""
:param invarname:
:param mode:
:param points:
:param filename:
"""
self.count = {}
self.rangecount = {}
self.result = {}
self.mode = {}
self.points = {}
self.filename = {}
self.statvarname = {}
def addstat(self, name, mode="mean", points=30, filename=None):
"""
:param name:
:param mode:
:param points:
:param filename:
:return:
"""
self.statvarname[name] = name + "_" + mode + "_" + str(points)
self.mode[name] = mode
self.points[name] = points
self.count[name] = 0
self.rangecount[name] = 0
self.filename[name] = filename
def getstat(self, data, name):
"""
:param data:
:param name:
:return:
"""
if self.count[name] == 0:
self.result[name] = data
else:
if self.mode[name] == "mean":
self.result[name] = (
self.result[name] * (self.points[name] - 1) / self.points[name]
+ data / self.points[name]
)
self.count[name] = self.count[name] + 1
return pcr.scalar(self.result[name])
class wf_sumavg:
def __init__(self, varname, mode="sum", filename=None):
"""
Class to hold variable in the usermodel that must be averaged summed etc.
"""
if filename == None:
filename = varname
self.mode = mode
self.varname = varname
self.filename = filename
self.data = []
self.count = 0
self.result = []
self.availtypes = ["sum", "avg", "min", "max"]
def add_one(self, data):
"""
Ad a map (timmestep)
"""
if self.count == 0:
self.data = data
else:
if self.mode == "sum" or self.mode == "avg":
self.data = self.data + data
if self.mode == "max":
self.data = pcr.max(self.data, data)
if self.mode == "min":
self.data = pcr.min(self.data, data)
self.count = self.count + 1
def finalise(self):
"""
Perform final calculations if needed (average, etc) and assign to the
result variable
"""
if hasattr(self.data, "isSpatial"):
if self.mode == "sum" or self.mode == "min" or self.mode == "max":
self.result = self.data
if self.mode == "avg":
self.result = self.data / self.count
class wf_OutputTimeSeriesArea:
def __init__(self, area, oformat="csv", areafunction="average", tformat="steps"):
"""
Replacement timeseries output function for the pcraster framework
area - an area-map to average from
oformat - format of the output file (csv, txt, tss, only csv and tss at the moment)
tformat - steps of datetime (format of the timsteps/stamp)
Step 1: make average of variable using the areaverage function
Step 2: Sample the values from the areas (remember the index so we can do it faster lateron)
step 3: store them in order
"""
self.steps = 0
self.timeformat = tformat
self.area = area
self.areanp = pcr.pcr2numpy(area, 0).copy()
self.oformat = oformat
self.areafunction = areafunction
""" average, total, minimum, maximum, majority"""
self.flatarea, self.idx = np.unique(self.areanp, return_index=True)
# print self.flatarea
# self.flatarea = self.flatarea[np.isfinite(self.flatarea)]
# self.idx = self.idx[np.isfinite(self.flatarea)]
self.fnamelist = []
self.writer = []
self.ofile = []
def closeall(self):
"""
Close all open filepointers
"""
for fp in self.ofile:
fp.close()
self.fnamelist = []
self.writer = []
self.ofile = []
def writestep(self, variable, fname, timestep=None, dtobj=None):
"""
write a single timestep
variable - pcraster map to save to tss
fname - name of the timeseries file
"""
| |
<reponame>Open-EO/openeo-geopyspark-driver
import collections
import json
import logging
import math
import pathlib
import subprocess
import tempfile
from datetime import datetime, date
from functools import partial
from typing import Dict, List, Union, Tuple, Iterable, Callable
import geopyspark as gps
import numpy as np
import pandas as pd
import pyproj
import pytz
import xarray as xr
from geopyspark import TiledRasterLayer, Pyramid, Tile, SpaceTimeKey, SpatialKey, Metadata
from geopyspark.geotrellis import Extent, ResampleMethod
from geopyspark.geotrellis.constants import CellType
from pandas import Series
from py4j.java_gateway import JVMView
from shapely.geometry import Point, Polygon, MultiPolygon, GeometryCollection
import openeo.metadata
from openeo.internal.process_graph_visitor import ProcessGraphVisitor
from openeo.metadata import CollectionMetadata, Band, Dimension
from openeo.udf import UdfData, run_udf_code
from openeo.udf.xarraydatacube import XarrayDataCube, XarrayIO
from openeo_driver.datacube import DriverDataCube
from openeo_driver.datastructs import ResolutionMergeArgs
from openeo_driver.datastructs import SarBackscatterArgs
from openeo_driver.delayed_vector import DelayedVector
from openeo_driver.errors import FeatureUnsupportedException, OpenEOApiException, InternalException, \
ProcessParameterInvalidException
from openeo_driver.save_result import AggregatePolygonResult
from openeo_driver.utils import EvalEnv
from openeogeotrellis.configparams import ConfigParams
from openeogeotrellis.geotrellis_tile_processgraph_visitor import GeotrellisTileProcessGraphVisitor
from openeogeotrellis.utils import to_projected_polygons, log_memory
from openeogeotrellis._version import __version__ as softwareversion
_log = logging.getLogger(__name__)
class GeopysparkCubeMetadata(CollectionMetadata):
"""
GeoPySpark Cube metadata (additional tracking of spatial and temporal extent
"""
# TODO move to python driver?
def __init__(
self, metadata: dict, dimensions: List[Dimension] = None,
spatial_extent: dict = None, temporal_extent: tuple = None
):
super().__init__(metadata=metadata, dimensions=dimensions)
self._spatial_extent = spatial_extent
self._temporal_extent = temporal_extent
if(self.has_temporal_dimension()):
self.temporal_dimension.extent = temporal_extent
def _clone_and_update(
self, metadata: dict = None, dimensions: List[Dimension] = None,
spatial_extent: dict = None, temporal_extent: tuple = None, **kwargs
) -> 'GeopysparkCubeMetadata':
# noinspection PyTypeChecker
return super()._clone_and_update(
metadata=metadata, dimensions=dimensions,
spatial_extent=spatial_extent or self._spatial_extent,
temporal_extent=temporal_extent or self._temporal_extent,
**kwargs
)
def filter_bbox(self, west, south, east, north, crs) -> 'GeopysparkCubeMetadata':
"""Create new metadata instance with spatial extent"""
# TODO take intersection with existing extent
return self._clone_and_update(
spatial_extent={"west": west, "south": south, "east": east, "north": north, "crs": crs}
)
@property
def spatial_extent(self) -> dict:
return self._spatial_extent
def filter_temporal(self, start, end) -> 'GeopysparkCubeMetadata':
"""Create new metadata instance with temporal extent"""
# TODO take intersection with existing extent
return self._clone_and_update(temporal_extent=(start, end))
@property
def temporal_extent(self) -> tuple:
return self._temporal_extent
@property
def opensearch_link_titles(self) -> List[str]:
"""Get opensearch_link_titles from band dimension"""
names_with_aliases = zip(self.band_dimension.band_names, self.band_dimension.band_aliases)
return [n[1][0] if n[1] else n[0] for n in names_with_aliases]
SpatialExtent = collections.namedtuple("SpatialExtent", ["top", "bottom", "right", "left", "height", "width"])
class GeopysparkDataCube(DriverDataCube):
metadata: GeopysparkCubeMetadata = None
def __init__(
self, pyramid: Pyramid,
metadata: GeopysparkCubeMetadata = None
):
super().__init__(metadata=metadata or GeopysparkCubeMetadata({}))
self.pyramid = pyramid
def _get_jvm(self) -> JVMView:
# TODO: cache this?
return gps.get_spark_context()._gateway.jvm
def _is_spatial(self):
return self.pyramid.levels[self.pyramid.max_zoom].layer_type == gps.LayerType.SPATIAL
def apply_to_levels(self, func, metadata: GeopysparkCubeMetadata = None) -> 'GeopysparkDataCube':
"""
Applies a function to each level of the pyramid. The argument provided to the function is of type TiledRasterLayer
:param func:
:return:
"""
pyramid = Pyramid({k: func(l) for k, l in self.pyramid.levels.items()})
return GeopysparkDataCube(pyramid=pyramid, metadata=metadata or self.metadata)
def _create_tilelayer(self,contextrdd, layer_type, zoom_level):
jvm = self._get_jvm()
spatial_tiled_raster_layer = jvm.geopyspark.geotrellis.SpatialTiledRasterLayer
temporal_tiled_raster_layer = jvm.geopyspark.geotrellis.TemporalTiledRasterLayer
if layer_type == gps.LayerType.SPATIAL:
srdd = spatial_tiled_raster_layer.apply(jvm.scala.Option.apply(zoom_level),contextrdd)
else:
srdd = temporal_tiled_raster_layer.apply(jvm.scala.Option.apply(zoom_level),contextrdd)
return gps.TiledRasterLayer(layer_type, srdd)
def _apply_to_levels_geotrellis_rdd(self, func, metadata: GeopysparkCubeMetadata = None, target_type = None):
"""
Applies a function to each level of the pyramid. The argument provided to the function is the Geotrellis ContextRDD.
:param func:
:return:
"""
pyramid = Pyramid({
k: self._create_tilelayer(func(l.srdd.rdd(), k), l.layer_type if target_type==None else target_type , k)
for k, l in self.pyramid.levels.items()
})
return GeopysparkDataCube(pyramid=pyramid, metadata=metadata or self.metadata)
def _data_source_type(self):
return self.metadata.get("_vito", "data_source", "type", default="Accumulo")
# TODO: deprecated
def date_range_filter(
self, start_date: Union[str, datetime, date], end_date: Union[str, datetime, date]
) -> 'GeopysparkDataCube':
return self.apply_to_levels(lambda rdd: rdd.filter_by_times([pd.to_datetime(start_date),pd.to_datetime(end_date)]))
def filter_temporal(self, start: str, end: str) -> 'GeopysparkDataCube':
# TODO: is this necessary? Temporal range is handled already at load_collection time
return self.apply_to_levels(
lambda rdd: rdd.filter_by_times([pd.to_datetime(start), pd.to_datetime(end)]),
metadata=self.metadata.filter_temporal(start, end)
)
def filter_bbox(self, west, east, north, south, crs=None, base=None, height=None) -> 'GeopysparkDataCube':
# Bbox is handled at load_collection time
return GeopysparkDataCube(
pyramid=self.pyramid,
metadata=self.metadata.filter_bbox(west=west, south=south, east=east, north=north, crs=crs)
)
def filter_spatial(self, geometries) -> 'GeopysparkDataCube':
#not sure if we can update the metadata here
return self
def filter_bands(self, bands) -> 'GeopysparkDataCube':
band_indices = [self.metadata.get_band_index(b) for b in bands]
_log.info("filter_bands({b!r}) -> indices {i!r}".format(b=bands, i=band_indices))
return self.apply_to_levels(lambda rdd: rdd.bands(band_indices), metadata=self.metadata.filter_bands(bands))
def rename_dimension(self, source: str, target: str) -> 'GeopysparkDataCube':
return GeopysparkDataCube(pyramid=self.pyramid, metadata=self.metadata.rename_dimension(source, target))
def apply(self, process: str, arguments: dict = {}) -> 'GeopysparkDataCube':
from openeogeotrellis.backend import SingleNodeUDFProcessGraphVisitor, GeoPySparkBackendImplementation
if isinstance(process, dict):
process = GeoPySparkBackendImplementation.accept_process_graph(process)
if isinstance(process, GeotrellisTileProcessGraphVisitor):
#apply should leave metadata intact, so can do a simple call?
# Note: It's not obvious from its name, but `reduce_bands` not only supports reduce operations,
# also `apply` style local unary mapping operations.
return self.reduce_bands(process)
if isinstance(process, SingleNodeUDFProcessGraphVisitor):
udf = process.udf_args.get('udf', None)
context = process.udf_args.get('context', {})
if not isinstance(udf, str):
raise ValueError(
"The 'run_udf' process requires at least a 'udf' string argument, but got: '%s'." % udf)
return self.apply_tiles(udf,context)
elif isinstance(process,str):
#old 04x code path
if 'y' in arguments:
raise NotImplementedError("Apply only supports unary operators,"
" but got {p!r} with {a!r}".format(p=process, a=arguments))
applyProcess = gps.get_spark_context()._jvm.org.openeo.geotrellis.OpenEOProcesses().applyProcess
return self._apply_to_levels_geotrellis_rdd(lambda rdd, k: applyProcess(rdd, process))
else:
raise FeatureUnsupportedException(f"Unsupported: apply with {process}")
def apply_dimension(self, process, dimension: str, target_dimension: str = None,
context: dict = None, env: EvalEnv = None) -> 'DriverDataCube':
from openeogeotrellis.backend import SingleNodeUDFProcessGraphVisitor, GeoPySparkBackendImplementation
if isinstance(process, dict):
process = GeoPySparkBackendImplementation.accept_process_graph(process)
if isinstance(process, GeotrellisTileProcessGraphVisitor):
if self.metadata.has_temporal_dimension() and dimension == self.metadata.temporal_dimension.name:
from openeo_driver.ProcessGraphDeserializer import convert_node
context = convert_node(context, env=env)
pysc = gps.get_spark_context()
if target_dimension == self.metadata.band_dimension.name:
#reduce the time dimension into the bands dimension
result_collection = self._apply_to_levels_geotrellis_rdd(
lambda rdd, level: pysc._jvm.org.openeo.geotrellis.OpenEOProcesses().applyTimeDimensionTargetBands(rdd,
process.builder,
context if isinstance(
context,
dict) else {}), target_type=gps.LayerType.SPATIAL)
result_collection.metadata = result_collection.metadata.reduce_dimension(dimension)
return result_collection
else:
return self._apply_to_levels_geotrellis_rdd(
lambda rdd, level: pysc._jvm.org.openeo.geotrellis.OpenEOProcesses().applyTimeDimension(rdd,process.builder,context if isinstance(context,dict) else {}))
elif self.metadata.has_band_dimension() and dimension == self.metadata.band_dimension.name:
return self._apply_bands_dimension(process)
else:
raise FeatureUnsupportedException(f"apply_dimension along dimension {dimension} is not supported. These dimensions are available: " + str(self.metadata.dimension_names()))
if isinstance(process, SingleNodeUDFProcessGraphVisitor):
udf = process.udf_args.get('udf', None)
return self._run_udf_dimension(udf, context, dimension, env)
raise FeatureUnsupportedException(f"Unsupported: apply_dimension with {process}")
def reduce(self, reducer: str, dimension: str) -> 'GeopysparkDataCube':
# TODO: rename this to reduce_temporal (because it only supports temporal reduce)?
from .numpy_aggregators import var_composite, std_composite, min_composite, max_composite, sum_composite, median_composite
reducer = self._normalize_temporal_reducer(dimension, reducer)
if reducer == 'Variance':
return self._aggregate_over_time_numpy(var_composite)
elif reducer == 'StandardDeviation':
return self._aggregate_over_time_numpy(std_composite)
elif reducer == 'Min':
return self._aggregate_over_time_numpy(min_composite)
elif reducer == 'Max':
return self._aggregate_over_time_numpy(max_composite)
elif reducer == 'Sum':
return self._aggregate_over_time_numpy(sum_composite)
elif reducer == 'Median':
return self._aggregate_over_time_numpy(median_composite)
else:
return self.apply_to_levels(lambda layer: layer.to_spatial_layer().aggregate_by_cell(reducer))
def reduce_bands(self, pgVisitor: GeotrellisTileProcessGraphVisitor) -> 'GeopysparkDataCube':
"""
TODO Define in super class? API is not yet ready for client side...
:param pgVisitor:
:return:
"""
result = self._apply_bands_dimension(pgVisitor)
if result.metadata.has_band_dimension():
result.metadata.reduce_dimension(result.metadata.band_dimension.name)
return result
def _apply_bands_dimension(self,pgVisitor):
pysc = gps.get_spark_context()
float_datacube = self.apply_to_levels(lambda layer: layer.convert_data_type("float32"))
result = float_datacube._apply_to_levels_geotrellis_rdd(
lambda rdd, level: pysc._jvm.org.openeo.geotrellis.OpenEOProcesses().mapBands(rdd, pgVisitor.builder))
target_cell_type = pgVisitor.builder.getOutputCellType().name()
result = result.apply_to_levels(
lambda layer: GeopysparkDataCube._transform_metadata(TiledRasterLayer(layer.layer_type,
layer.srdd.convertDataType(target_cell_type)),
cellType=target_cell_type))
return result
def _normalize_temporal_reducer(self, dimension: str, reducer: str) -> str:
if dimension != self.metadata.temporal_dimension.name:
raise FeatureUnsupportedException('Reduce on dimension {d!r} not supported'.format(d=dimension))
if reducer.upper() in ["MIN", "MAX", "SUM", "MEAN", "VARIANCE", "MEDIAN", "FIRST", "LAST"]:
reducer = reducer.lower().capitalize()
elif reducer.upper() == "SD":
reducer = "StandardDeviation"
else:
raise FeatureUnsupportedException('Reducer {r!r} not supported'.format(r=reducer))
return reducer
def add_dimension(self, name: str, label: str, type: str = None):
return GeopysparkDataCube(
pyramid=self.pyramid,
metadata=self.metadata.add_dimension(name=name, label=label, type=type)
)
def rename_labels(self, dimension: str, target: list, source: list=None) -> 'GeopysparkDataCube':
""" Renames the labels of the specified dimension in the data cube from source to target.
:param dimension: Dimension name
:param target: The new names for the labels.
:param source: The names of the labels as they are currently in the data cube.
:return: An GeopysparkDataCube instance
"""
return GeopysparkDataCube(
pyramid=self.pyramid,
metadata=self.metadata.rename_labels(dimension,target,source)
)
@classmethod
def _mapTransform(cls, layoutDefinition, spatialKey) -> SpatialExtent:
ex = layoutDefinition.extent
x_range = ex.xmax - ex.xmin
xinc = x_range / layoutDefinition.tileLayout.layoutCols
yrange = ex.ymax - ex.ymin
yinc = yrange / layoutDefinition.tileLayout.layoutRows
return SpatialExtent(
top=ex.ymax - yinc * spatialKey.row,
bottom=ex.ymax - yinc * (spatialKey.row + 1),
right=ex.xmin + xinc * (spatialKey.col + 1),
left=ex.xmin + xinc * spatialKey.col,
height=layoutDefinition.tileLayout.tileCols,
width=layoutDefinition.tileLayout.tileRows
)
@classmethod
def _tile_to_datacube(
cls, bands_numpy: np.ndarray, extent: SpatialExtent,
band_dimension: openeo.metadata.BandDimension, start_times=None
) -> XarrayDataCube:
coords = {}
dims = ('bands','y', 'x')
# time coordinates if exists
if len(bands_numpy.shape) == 4:
#we have a temporal dimension
coords = {'t':start_times}
dims = ('t' ,'bands','y', 'x')
# band names if exists
if band_dimension:
# TODO: also use the band dimension name (`band_dimension.name`) instead of hardcoded "bands"?
coords['bands'] = band_dimension.band_names
band_count | |
<filename>sysinv/sysinv/sysinv/sysinv/conductor/manager.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
"""Conduct all activity related system inventory.
A single instance of :py:class:`sysinv.conductor.manager.ConductorManager` is
created within the *sysinv-conductor* process, and is responsible for
performing all actions for hosts managed by system inventory.
Commands are received via RPC calls. The conductor service also performs
collection of inventory data for each host.
"""
import errno
import filecmp
import fnmatch
import glob
import hashlib
import math
import os
import re
import shutil
import socket
import subprocess
import tempfile
import time
import uuid
import xml.etree.ElementTree as ElementTree
from contextlib import contextmanager
from datetime import datetime
import tsconfig.tsconfig as tsc
from collections import namedtuple
from cgcs_patch.patch_verify import verify_files
from controllerconfig.upgrades import management as upgrades_management
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from eventlet import greenthread
from fm_api import constants as fm_constants
from fm_api import fm_api
from netaddr import IPAddress
from netaddr import IPNetwork
from oslo_config import cfg
from platform_util.license import license
from sqlalchemy.orm import exc
from six.moves import http_client as httplib
from sysinv.agent import rpcapi as agent_rpcapi
from sysinv.api.controllers.v1 import address_pool
from sysinv.api.controllers.v1 import cpu_utils
from sysinv.api.controllers.v1 import kube_app as kube_api
from sysinv.api.controllers.v1 import mtce_api
from sysinv.api.controllers.v1 import utils
from sysinv.api.controllers.v1 import vim_api
from sysinv.common import constants
from sysinv.common import ceph as cceph
from sysinv.common import exception
from sysinv.common import fm
from sysinv.common import fernet
from sysinv.common import health
from sysinv.common import kubernetes
from sysinv.common import retrying
from sysinv.common import service
from sysinv.common import utils as cutils
from sysinv.common.retrying import retry
from sysinv.common.storage_backend_conf import StorageBackendConfig
from cephclient import wrapper as ceph
from sysinv.conductor import ceph as iceph
from sysinv.conductor import kube_app
from sysinv.conductor import openstack
from sysinv.conductor import docker_registry
from sysinv.db import api as dbapi
from sysinv.objects import base as objects_base
from sysinv.objects import kube_app as kubeapp_obj
from sysinv.openstack.common import excutils
from sysinv.openstack.common import jsonutils
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common import timeutils
from sysinv.openstack.common import uuidutils
from sysinv.openstack.common.gettextutils import _
from sysinv.puppet import common as puppet_common
from sysinv.puppet import puppet
from sysinv.helm import common as helm_common
from sysinv.helm import helm
from sysinv.helm import utils as helm_utils
MANAGER_TOPIC = 'sysinv.conductor_manager'
LOG = log.getLogger(__name__)
conductor_opts = [
cfg.StrOpt('api_url',
default=None,
help=('Url of SysInv API service. If not set SysInv can '
'get current value from Keystone service catalog.')),
cfg.IntOpt('audit_interval',
default=60,
help='Interval to run conductor audit'),
cfg.IntOpt('osd_remove_retry_count',
default=11,
help=('Maximum number of retries in case Ceph OSD remove '
'requests fail because OSD is still up.')),
cfg.IntOpt('osd_remove_retry_interval',
default=5,
help='Interval in seconds between retries to remove Ceph OSD.'),
cfg.IntOpt('managed_app_auto_recovery_interval',
default=300,
help='Interval to run managed app auto recovery'),
]
CONF = cfg.CONF
CONF.register_opts(conductor_opts, 'conductor')
# doesn't work otherwise for ceph-manager RPC calls; reply is lost
#
CONF.amqp_rpc_single_reply_queue = True
# configuration flags
CFS_DRBDADM_RECONFIGURED = os.path.join(
tsc.PLATFORM_CONF_PATH, ".cfs_drbdadm_reconfigured")
# volatile flags
CONFIG_CONTROLLER_ACTIVATE_FLAG = os.path.join(tsc.VOLATILE_PATH,
".config_controller_activate")
CONFIG_CONTROLLER_FINI_FLAG = os.path.join(tsc.VOLATILE_PATH,
".config_controller_fini")
CONFIG_FAIL_FLAG = os.path.join(tsc.VOLATILE_PATH, ".config_fail")
# configuration UUID reboot required flag (bit)
CONFIG_REBOOT_REQUIRED = (1 << 127)
LOCK_NAME_UPDATE_CONFIG = 'update_config_'
AppTarBall = namedtuple(
'AppTarBall',
"tarball_name app_name app_version manifest_name manifest_file")
class ConductorManager(service.PeriodicService):
"""Sysinv Conductor service main class."""
RPC_API_VERSION = '1.1'
my_host_id = None
def __init__(self, host, topic):
serializer = objects_base.SysinvObjectSerializer()
super(ConductorManager, self).__init__(host, topic,
serializer=serializer)
self.dbapi = None
self.fm_api = None
self.fm_log = None
self._app = None
self._ceph = None
self._ceph_api = ceph.CephWrapper(
endpoint='http://localhost:5001')
self._kube = None
self._fernet = None
self._openstack = None
self._api_token = None
self._mtc_address = constants.LOCALHOST_HOSTNAME
self._mtc_port = 2112
# Timeouts for adding & removing operations
self._pv_op_timeouts = {}
self._stor_bck_op_timeouts = {}
def start(self):
self._start()
# accept API calls and run periodic tasks after
# initializing conductor manager service
super(ConductorManager, self).start()
def _start(self):
self.dbapi = dbapi.get_instance()
self.fm_api = fm_api.FaultAPIs()
self.fm_log = fm.FmCustomerLog()
self._openstack = openstack.OpenStackOperator(self.dbapi)
self._puppet = puppet.PuppetOperator(self.dbapi)
# create /var/run/sysinv if required. On DOR, the manifests
# may not run to create this volatile directory.
cutils.check_lock_path()
system = self._create_default_system()
# Besides OpenStack and Puppet operators, all other operators
# should be initialized after the default system is in place.
# For instance, CephOperator expects a system to exist to initialize
# correctly. With Ansible bootstrap deployment, sysinv conductor is
# brought up during bootstrap manifest apply and is not restarted
# until host unlock and we need ceph-mon up in order to configure
# ceph for the initial unlock.
self._app = kube_app.AppOperator(self.dbapi)
self._ceph = iceph.CephOperator(self.dbapi)
self._helm = helm.HelmOperator(self.dbapi)
self._kube = kubernetes.KubeOperator(self.dbapi)
self._kube_app_helper = kube_api.KubeAppHelper(self.dbapi)
self._fernet = fernet.FernetOperator()
# Upgrade start tasks
self._upgrade_init_actions()
self._handle_restore_in_progress()
helm_utils.refresh_helm_repo_information()
LOG.info("sysinv-conductor start committed system=%s" %
system.as_dict())
def periodic_tasks(self, context, raise_on_error=False):
""" Periodic tasks are run at pre-specified intervals. """
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
@contextmanager
def session(self):
session = dbapi.get_instance().get_session(autocommit=True)
try:
yield session
finally:
session.remove()
def _create_default_system(self):
"""Populate the default system tables"""
system = None
try:
system = self.dbapi.isystem_get_one()
# fill in empty remotelogging system_id fields
self.dbapi.remotelogging_fill_empty_system_id(system.id)
# fill in empty ptp system_id fields
self.dbapi.ptp_fill_empty_system_id(system.id)
return system # system already configured
except exception.NotFound:
pass # create default system
# Create the default system entry
mode = None
if tsc.system_mode is not None:
mode = tsc.system_mode
security_profile = None
if tsc.security_profile is not None:
security_profile = tsc.security_profile
security_feature = constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_DEFAULT_OPTS
if tsc.security_feature is not None:
security_feature = tsc.security_feature
system = self.dbapi.isystem_create({
'name': uuidutils.generate_uuid(),
'system_mode': mode,
'software_version': cutils.get_sw_version(),
'capabilities': {},
'security_profile': security_profile,
'security_feature': security_feature
})
# Populate the default system tables, referencing the newly created
# table (additional attributes will be populated during
# config_controller configuration population)
values = {'forisystemid': system.id}
self.dbapi.iuser_create(values)
self.dbapi.idns_create(values)
self.dbapi.intp_create(values)
self.dbapi.drbdconfig_create({
'forisystemid': system.id,
'uuid': uuidutils.generate_uuid(),
'link_util': constants.DRBD_LINK_UTIL_DEFAULT,
'num_parallel': constants.DRBD_NUM_PARALLEL_DEFAULT,
'rtt_ms': constants.DRBD_RTT_MS_DEFAULT
})
# remotelogging and ptp tables have attribute 'system_id' not 'forisystemid'
system_id_attribute_value = {'system_id': system.id}
self.dbapi.remotelogging_create(system_id_attribute_value)
self.dbapi.ptp_create(system_id_attribute_value)
# populate service table
for optional_service in constants.ALL_OPTIONAL_SERVICES:
self.dbapi.service_create({'name': optional_service,
'enabled': False})
self._create_default_service_parameter()
return system
def _upgrade_init_actions(self):
""" Perform any upgrade related startup actions"""
try:
upgrade = self.dbapi.software_upgrade_get_one()
except exception.NotFound:
# Not upgrading. No need to update status
return
hostname = socket.gethostname()
if hostname == constants.CONTROLLER_0_HOSTNAME:
if os.path.isfile(tsc.UPGRADE_ROLLBACK_FLAG):
self._set_state_for_rollback(upgrade)
elif os.path.isfile(tsc.UPGRADE_ABORT_FLAG):
self._set_state_for_abort(upgrade)
elif hostname == constants.CONTROLLER_1_HOSTNAME:
self._init_controller_for_upgrade(upgrade)
system_mode = self.dbapi.isystem_get_one().system_mode
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
self._init_controller_for_upgrade(upgrade)
self._upgrade_default_service()
self._upgrade_default_service_parameter()
def _handle_restore_in_progress(self):
if os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG):
if StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_CEPH):
StorageBackendConfig.update_backend_states(
self.dbapi,
constants.CINDER_BACKEND_CEPH,
task=constants.SB_TASK_RESTORE)
def _set_state_for_abort(self, upgrade):
""" Update the database to reflect the abort"""
LOG.info("Upgrade Abort detected. Correcting database state.")
# Update the upgrade state
self.dbapi.software_upgrade_update(
upgrade.uuid, {'state': constants.UPGRADE_ABORTING})
try:
os.remove(tsc.UPGRADE_ABORT_FLAG)
except OSError:
LOG.exception("Failed to remove upgrade rollback flag")
def _set_state_for_rollback(self, upgrade):
""" Update the database to reflect the rollback"""
LOG.info("Upgrade Rollback detected. Correcting database state.")
# Update the upgrade state
self.dbapi.software_upgrade_update(
upgrade.uuid, {'state': constants.UPGRADE_ABORTING_ROLLBACK})
# At this point we are swacting to controller-0 which has just been
# downgraded.
# Before downgrading controller-0 all storage/worker nodes were locked
# The database of the from_load is not aware of this, so we set the
# state in the database to match the state of the system. This does not
# actually lock the nodes.
hosts = self.dbapi.ihost_get_list()
for host in hosts:
if host.personality not in [constants.WORKER, constants.STORAGE]:
continue
self.dbapi.ihost_update(host.uuid, {
'administrative': constants.ADMIN_LOCKED})
# Remove the rollback flag, we only want to modify the database once
try:
os.remove(tsc.UPGRADE_ROLLBACK_FLAG)
except OSError:
LOG.exception("Failed to remove upgrade rollback flag")
def _init_controller_for_upgrade(self, upgrade):
# Raise alarm to show an upgrade is in progress
# After upgrading controller-1 and swacting to it, we must
# re-raise the upgrades alarm, because alarms are not preserved
# from the previous release.
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST,
constants.CONTROLLER_HOSTNAME)
if not self.fm_api.get_fault(
fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS,
entity_instance_id):
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_MINOR,
reason_text="System Upgrade in progress.",
# operational
alarm_type=fm_constants.FM_ALARM_TYPE_7,
# congestion
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8,
proposed_repair_action="No action required.",
service_affecting=False)
self.fm_api.set_fault(fault)
# Regenerate dnsmasq.hosts and dnsmasq.addn_hosts.
# This is necessary to handle the case where a lease expires during
# an upgrade, in order to allow hostnames to be resolved from
# the dnsmasq.addn_hosts file before unlocking controller-0 forces
# dnsmasq.addn_hosts to be regenerated.
self._generate_dnsmasq_hosts_file()
DEFAULT_PARAMETERS = [
{'service': constants.SERVICE_TYPE_IDENTITY,
'section': constants.SERVICE_PARAM_SECTION_IDENTITY_CONFIG,
'name': constants.SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION,
'value': constants.SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION_DEFAULT
},
{'service': constants.SERVICE_TYPE_HORIZON,
'section': | |
<gh_stars>100-1000
"""The GraphTensor composite tensor and its pieces."""
import abc
from typing import Any, Callable, cast, Dict, Mapping, Optional, Sequence, Union
import tensorflow as tf
from tensorflow_gnn.graph import graph_constants as const
from tensorflow_gnn.graph import graph_piece as gp
from tensorflow_gnn.graph import tensor_utils as utils
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import type_spec
# pylint: enable=g-direct-tensorflow-import
FieldName = const.FieldName
NodeSetName = const.NodeSetName
EdgeSetName = const.EdgeSetName
ShapeLike = const.ShapeLike
Field = const.Field
Fields = const.Fields
FieldSpec = const.FieldSpec
FieldsSpec = const.FieldsSpec
# TODO(b/189057503): use adjacency interface class instead.
Adjacency = Any
AdjacencySpec = Any
class _GraphPieceWithFeatures(gp.GraphPieceBase, metaclass=abc.ABCMeta):
"""Base class for graph pieces that hold user-defined features."""
_DATAKEY_FEATURES = 'features' # A Mapping[FieldName, Field].
_DATAKEY_SIZES = 'sizes' # A Field with `sizes`.
def __getitem__(self, feature_name: FieldName) -> Field:
"""Indexing operator `[]` to access feature values by their name."""
return self._get_features_ref[feature_name]
@property
def features(self) -> Mapping[FieldName, Field]:
"""A read-only mapping of feature name to feature specs."""
return _as_immutable_mapping(self._get_features_ref)
@property
def sizes(self) -> Field:
"""The number of items in each graph component.
Returns:
A potentially ragged int tensor of shape [*graph_shape, num_components]
where the graph_shape is the graph piece shape and its containing
GraphTensor, num_components is the number of graph components contained
in each graph (could be ragged).
"""
return self._data[_GraphPieceWithFeatures._DATAKEY_SIZES] # pylint: disable=protected-access
@property
def total_size(self) -> tf.Tensor:
"""The total number of items.
Returns:
A scalar integer tensor equal to `tf.math.reduce_sum(sizes)`. If result is
statically known (spec.total_size is not None), the output is a constant
tensor (suitable for environments in which constant shapes are required,
like TPU).
"""
dtype = self.spec.sizes_spec.dtype
return _fast_alternative(
self.spec.total_size is not None,
lambda: tf.constant(self.spec.total_size, dtype, shape=[]),
lambda: tf.math.reduce_sum(self.sizes),
'total_size != spec.total_size')
@property
def num_components(self) -> tf.Tensor:
"""The number of graph components for each graph.
Returns:
A dense integer tensor with the same shape as the graph piece.
"""
result = tf.reduce_sum(tf.ones_like(self.sizes), axis=self.rank)
if utils.is_ragged_tensor(result):
# TODO(b/232914703): reconsider if ragged batch dimensions are supported.
result_dense = result.to_tensor()
check_ops = []
if const.validate_internal_results:
check_ops.append(
tf.debugging.assert_equal(
tf.size(result),
tf.size(result_dense),
message='`sizes` shape is not compatible with the piece shape'))
with tf.control_dependencies(check_ops):
result = tf.identity(result_dense)
return result
@property
def total_num_components(self) -> tf.Tensor:
"""The total number of graph components.
Returns:
A scalar integer tensor equal to `tf.math.reduce_sum(num_components)`. If
result is statically known (spec.total_num_components is not None), the
output is a constant Tensor (suitable for environments in which constant
shapes are required, like TPU).
"""
dtype = self.spec.sizes_spec.dtype
return _fast_alternative(
self.spec.total_num_components is not None,
lambda: tf.constant(self.spec.total_num_components, dtype, []),
lambda: tf.size(self.sizes, out_type=dtype),
'total_num_components != spec.total_num_components')
@property
def _get_features_ref(self) -> Fields:
return self._data[_GraphPieceWithFeatures._DATAKEY_FEATURES]
@classmethod
def _from_features_and_sizes(cls, features: Fields, sizes: Field,
**extra_data) -> '_GraphPieceWithFeatures':
"""Constructs graph piece from features and component sizes."""
assert isinstance(features, Mapping)
sizes = gp.convert_to_tensor_or_ragged(sizes)
prepared_features = {
key: gp.convert_to_tensor_or_ragged(value)
for key, value in features.items()
}
data = {
_GraphPieceWithFeatures._DATAKEY_FEATURES: prepared_features,
_GraphPieceWithFeatures._DATAKEY_SIZES: sizes
}
data.update({
key: gp.convert_to_tensor_or_ragged(value)
for key, value in extra_data.items()
})
# Note that this graph piece does not use any metadata fields.
return cls._from_data(
data=data, shape=sizes.shape[:-1], indices_dtype=sizes.dtype)
def get_features_dict(self) -> Dict[FieldName, Field]:
"""Returns features copy as a dictionary."""
return dict(self._get_features_ref)
class _GraphPieceWithFeaturesSpec(gp.GraphPieceSpecBase):
"""A type spec for `_GraphPieceWithFeatures`."""
def __getitem__(self, feature_name: FieldName) -> FieldSpec:
return self._get_features_spec_ref[feature_name]
@property
def features_spec(self) -> Mapping[FieldName, FieldSpec]:
"""A read-only mapping of feature name to feature spec."""
return _as_immutable_mapping(self._get_features_spec_ref)
@property
def sizes_spec(self) -> FieldSpec:
"""The type spec for the sizes that provides num. elements per component."""
return self._data_spec[_GraphPieceWithFeatures._DATAKEY_SIZES] # pylint: disable=protected-access
@property
def total_num_components(self) -> Optional[int]:
"""The total number of graph components if known."""
return self.sizes_spec.shape.num_elements()
@property
def total_size(self) -> Optional[int]:
"""The total number of graph items if known."""
indicative_feature_spec = _get_indicative_feature(
self._get_features_spec_ref)
if indicative_feature_spec is None:
return None
else:
return indicative_feature_spec.shape[:(self.rank + 1)].num_elements()
@property
def _get_features_spec_ref(self) -> FieldsSpec:
return self._data_spec[_GraphPieceWithFeatures._DATAKEY_FEATURES] # pylint: disable=protected-access
@classmethod
def _from_feature_and_size_specs(
cls, features_spec: FieldsSpec, sizes_spec: FieldSpec,
**extra_data) -> '_GraphPieceWithFeaturesSpec':
"""Constructs GraphPieceSpec from specs of features and component sizes."""
# pylint: disable=protected-access
assert isinstance(features_spec, Mapping)
data_spec = {
_NodeOrEdgeSet._DATAKEY_FEATURES: features_spec.copy(),
_NodeOrEdgeSet._DATAKEY_SIZES: sizes_spec
}
data_spec.update(extra_data)
# Note that this graph piece does not use any metadata fields.
return cls._from_data_spec(
data_spec, shape=sizes_spec.shape[:-1], indices_dtype=sizes_spec.dtype)
class Context(_GraphPieceWithFeatures):
"""A composite tensor for graph context features.
The items of the context are the graph components (just like the items of a
node set are the nodes and the items of an edge set are the edges). The
`Context` is a composite tensor. It stores features that belong to a graph
component as a whole, not any particular node or edge. Each context feature
has a shape [*graph_shape, num_components, ...], where num_components is the
number of graph components in a graph (could be ragged).
"""
@classmethod
def from_fields(cls,
*,
features: Optional[Fields] = None,
sizes: Optional[Field] = None,
shape: Optional[ShapeLike] = None,
indices_dtype: Optional[tf.dtypes.DType] = None) -> 'Context':
"""Constructs a new instance from context fields.
Example:
tfgnn.Context.from_fields(features={'country_code': ['CH']})
Args:
features: A mapping from feature name to feature Tensor or RaggedTensor.
All feature tensors must have shape [*graph_shape, num_components,
*feature_shape], where num_components is the number of graph components
(could be ragged); feature_shape are feature-specific inner dimensions.
sizes: A Tensor of 1's with shape [*graph_shape, num_components], where
num_components is the number of graph components (could be ragged).
For symmetry with `sizes` in NodeSet and EdgeSet, this counts the items
per graph component, but since the items of Context are the components
themselves, each value is 1. Must be compatible with `shape`, if that
is specified.
shape: The shape of this tensor and a GraphTensor containing it, also
known as the graph_shape. If not specified, the shape is inferred from
`sizes` or set to `[]` if the `sizes` is not specified.
indices_dtype: An `indices_dtype` of a GraphTensor containing this
object, used as `row_splits_dtype` when batching potentially ragged
fields. If `sizes` are specified they are casted to that type.
Returns:
A `Context` composite tensor.
"""
if indices_dtype is not None:
if indices_dtype not in (tf.int64, tf.int32):
raise ValueError(f'Expected indices_dtype={indices_dtype}'
' to be tf.int64 or tf.int32')
if shape is not None:
shape = shape if isinstance(shape,
tf.TensorShape) else tf.TensorShape(shape)
if sizes is not None:
sizes = gp.convert_to_tensor_or_ragged(sizes)
if indices_dtype is not None and indices_dtype != sizes.dtype:
sizes = tf.cast(sizes, dtype=indices_dtype)
if shape is not None and sizes is not None:
if sizes.shape.rank != shape.rank + 1:
raise ValueError('The `sizes` rank != shape.rank + 1: '
f' shape={shape}'
f' sizes.shape={sizes.shape}')
if not shape.is_compatible_with(sizes.shape[:shape.rank]):
raise ValueError('The `sizes` is not compatible with the `shape`: '
f' shape={shape}'
f' sizes.shape={sizes.shape}')
if features is None:
features = {}
else:
features = {k: gp.convert_to_tensor_or_ragged(v)
for k, v in features.items()}
if sizes is None:
shape = _ifnone(shape, tf.TensorShape([]))
indices_dtype = _ifnone(indices_dtype, const.default_indices_dtype)
indicative_feature = _get_indicative_feature(features)
if indicative_feature is None:
# There are no features to use for sizes inference. Assume that the
# Context has no components and set sizes accordingly.
size_dims = [_ifnone(dim, 0) for dim in shape.concatenate([0])]
sizes = tf.ones(shape=size_dims, dtype=indices_dtype)
else:
sizes = utils.ones_like_leading_dims(
indicative_feature, shape.rank + 1, dtype=indices_dtype)
return cls._from_features_and_sizes(features=features, sizes=sizes)
def replace_features(self, features: Fields) -> 'Context':
"""Returns a new instance with a new set of features."""
assert isinstance(features, Mapping)
return self.__class__.from_fields(
features=features,
sizes=self.sizes,
shape=self.shape,
indices_dtype=self.indices_dtype)
@staticmethod
def _type_spec_cls():
return ContextSpec
def __repr__(self):
return (f'Context('
f'features={utils.short_features_repr(self.features)}, '
f'sizes={self.sizes}, '
f'shape={self.shape}, '
f'indices_dtype={self.indices_dtype!r})')
@type_spec.register('tensorflow_gnn.ContextSpec.v2')
class ContextSpec(_GraphPieceWithFeaturesSpec):
"""A type spec for `tfgnn.Context`."""
@classmethod
def from_field_specs(
cls, *,
features_spec: Optional[FieldsSpec] = None,
sizes_spec: Optional[FieldSpec] = None,
shape: ShapeLike = tf.TensorShape([]),
indices_dtype: tf.dtypes.DType = const.default_indices_dtype
) -> 'ContextSpec':
"""The counterpart of `Context.from_fields()` for field type specs."""
shape = shape if isinstance(shape,
tf.TensorShape) else tf.TensorShape(shape)
if features_spec is None:
features_spec = {}
if sizes_spec is None:
indicative_feature_spec = _get_indicative_feature(features_spec)
is_ragged = False
if indicative_feature_spec is None:
sizes_shape = shape.concatenate([0])
else:
components_dim = indicative_feature_spec.shape[shape.rank]
sizes_shape = shape.concatenate(tf.TensorShape([components_dim]))
if isinstance(indicative_feature_spec, tf.RaggedTensorSpec):
is_ragged = (shape.rank > 0) and (components_dim is None)
if is_ragged:
sizes_spec = tf.RaggedTensorSpec(
shape=sizes_shape,
ragged_rank=shape.rank + 1,
dtype=indices_dtype,
row_splits_dtype=indices_dtype)
else:
sizes_spec = tf.TensorSpec(
shape=sizes_shape,
dtype=indices_dtype)
return cls._from_feature_and_size_specs(features_spec, sizes_spec)
@property
def value_type(self):
return Context
class _NodeOrEdgeSet(_GraphPieceWithFeatures):
"""Base class for node set or edge set."""
def replace_features(self, features: Mapping[FieldName,
Field]) -> '_NodeOrEdgeSet':
"""Returns a new instance with a new set of features."""
assert isinstance(features, Mapping)
new_data = self._data.copy()
| |
<filename>zepster/genschema.py
'''
Copyright 2020 Cisco Systems, Inc. and its affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
Program to generate a database schema (table definitions, etc.)
from an Entity-Relationship Markup Language (ERML) file.
Usage: genschema.py [OPTIONS]
Read an Entity-Relationship Markup Language file and write a database
schema SQL file
Options:
--input TEXT Input Entity-Relationship Markup Language
file (default is standard input, also
represented by a dash "-")
--output TEXT Output schema definition file (default is
standard output, also represented by a dash
"-")
--overwrite If specified, overwrite the output file if
it already exists
--logging TEXT Set logging to the specified level: NOTSET,
DEBUG, INFO, WARNING, ERROR, CRITICAL
--dialect [CRDB|RS] Set the database dialect: "CRDB" for
CockroachDB [Not implemented: and "RS" for
Redshift].
--generate-keys [Not implemented] Indicates whether to
generate synthetic keys. Default is True.
--generated-key-type [INTEGER|UUID]
Set the data type for generated synthetic
keys. The default depends on the database
dialect: UUID for CockroachDB [Not
implemented: and INTEGER for Redshift].
--help Show this message and exit.
'''
import sys
import os.path
from loguru import logger
import click
import cardinality
import yaml
import jsonschema
import datetime
from json_schema_erml import json_schema_erml
import json
from util import i, topological_sort_entities, build_entity_parents_and_children
@logger.catch
def generate_enums(er_yaml, output_object):
'''
Generate the schema definitions and data for enum tables
'''
logger.debug('Entering generate_enums()')
for enum_table in er_yaml['enums']:
logger.debug(f'enum_table={enum_table}')
enum_table_name = enum_table['enum']['name']
logger.debug(f'enum_table_name={enum_table_name}')
if 'description' in enum_table['enum']:
print('-- Description:', file=output_object)
enum_table_description = enum_table['enum']['description']
for line in enum_table_description.splitlines():
print(f'-- {line}', file=output_object)
if 'note' in enum_table['enum']:
if 'description' in enum_table['enum']:
print(file=output_object)
print('-- Note:', file=output_object)
enum_table_note = enum_table['enum']['note']
for line in enum_table_note.splitlines():
print(f'-- {line}', file=output_object)
print(f'create table {enum_table_name} (pk integer primary key, name varchar(500));', file=output_object)
for ordinal, enum_value_or_more in enumerate(enum_table['enum']['values']):
logger.debug(f'{i(1)}enum_value_or_more={enum_value_or_more} type={type(enum_value_or_more)}')
if type(enum_value_or_more) == type(''):
logger.debug(f'{i(1)}Type is string')
enum_value = enum_value_or_more
elif type(enum_value_or_more) == type({}):
logger.debug(f'{i(1)}Type is dictionary')
enum_value = enum_value_or_more['value']
if 'description' in enum_value_or_more:
print('-- Description:', file=output_object)
enum_value_description = enum_value_or_more['description']
for line in enum_value_description.splitlines():
print(f'-- {line}', file=output_object)
if 'note' in enum_value_or_more:
print('-- Note:', file=output_object)
enum_value_note = enum_value_or_more['note']
for line in enum_value_note.splitlines():
print(f'-- {line}', file=output_object)
else:
raise ValueError(f'Enum value did not match expected type of string or '
f'dicitonary for enum table "{enum_table_name}". '
f'Value is {enum_value_or_more}')
# escape to prevent SQL injection
print(f"insert into {enum_table_name} (pk, name) values ({ordinal+1}, '{enum_value}');", file=output_object)
print(file=output_object)
logger.debug('Leaving generate_enums()')
@logger.catch
def generate_mm_synthesized(entity_name, graph, output_object):
'''
Generate DDL for synthesized many-to-many mapping table
Assumes synthesized many-to-many mapping tables have no attributes
This may change with future enhancement
'''
logger.debug('Entering generate_mm_synthesized()')
graph_dependees = graph[entity_name]
logger.debug(f'{i(1)}graph_dependees={graph_dependees}')
print(f'create table {entity_name} (', file=output_object)
print(f'{i(1)}pk uuid not null default gen_random_uuid() primary key,', file=output_object)
num_parents = cardinality.count(graph_dependees)
for dependee_num, dependee in enumerate(graph_dependees):
column_line = f'{i(1)}fk_{dependee} uuid not null references {dependee}(pk) on delete cascade'
if dependee_num < num_parents-1:
column_line += ','
print(column_line, file=output_object)
print(');\n', file=output_object)
logger.debug('Leaving generate_mm_synthesized()')
@logger.catch
def generate_entity_comments(entity_name, entities, entity_indices, entities_pc, output_object):
'''
Handle entity description and note
'''
logger.debug('Entering generate_entity_comments()')
entity_index = entity_indices[entity_name]
entity = entities[entity_index]['entity']
logger.debug(f'entity=\n{yaml.dump(entity)}')
num_parents = 0
entity_pc = entities_pc[entity_name]
logger.debug(f'{i(1)}entity_pc={entity_pc}')
parents = None
if 'parents' in entity_pc:
parents = entity_pc['parents']
num_parents = cardinality.count(parents)
num_attributes = 0
attributes = None
if 'attributes' in entity:
attributes = entity['attributes']
num_attributes = cardinality.count(attributes)
logger.debug(f'num_parents={num_parents} num_attributes={num_attributes}')
if 'description' in entity:
print('-- Description:', file=output_object)
table_description = entity['description']
for line in table_description.splitlines():
print(f'-- {line}', file=output_object)
if 'note' in entity:
if 'description' in entity:
print(file=output_object)
print('-- Note:', file=output_object)
table_note = entity['note']
for line in table_note.splitlines():
print(f'-- {line}', file=output_object)
print(file=output_object)
logger.debug('Leaving generate_entity_comments()')
return entity, parents, num_parents, attributes, num_attributes
@logger.catch
def generate_foreign_keys(parents, num_parents, num_attributes, output_object):
'''
Generate DDL for foreign keys
'''
logger.debug('Entering generate_foreign_keys()')
if num_parents >= 1:
logger.debug('Generating DDL for foreign keys...')
logger.debug(f'parents=\n{json.dumps(parents, indent=4)}')
for parent_num, parent in enumerate(parents):
logger.debug(f'{i(1)}parent_num={parent_num} parent={parent}')
assert cardinality.count(parent) == 1
for parent_name, parent_vals in parent.items():
pass
parent_kind = parent_vals['kind']
is_defining = False
if 'defining' in parent_vals:
if parent_vals['defining'] == True:
is_defining = True
logger.debug(f'{i(1)}is_defining={is_defining}')
column_line = f'{i(1)}{"fk_" + parent_name} uuid '
if parent_kind in ['one', 'base_class']:
column_line += 'not null '
column_line += f'references {parent_name}(pk)'
if is_defining:
column_line += ' on delete cascade'
elif parent_kind == 'zero_or_one':
column_line += ' on delete set null'
logger.debug(f'{i(1)}column_line={column_line}')
if parent_num < num_parents-1 or num_attributes > 0:
column_line += ','
logger.debug(f'column_line={column_line}')
print(f'{column_line}', file=output_object)
logger.debug('Leaving generate_foreign_keys()')
@logger.catch
def generate_attribute_columns(attributes, num_attributes, output_object):
'''
Generate DDL for attributes
'''
logger.debug('Entering generate_attribute_columns()')
if num_attributes > 0:
logger.debug(f"type(attributes)={type(attributes)}")
logger.debug(f"attributes={attributes}")
for current_attribute_num, attribute_key_values in enumerate(attributes.items()):
logger.debug(f'current_attribute_num={current_attribute_num} attribute_key_values={attribute_key_values}')
attribute_key = attribute_key_values[0]
attribute_values = attribute_key_values[1]
logger.debug(f'attribute_key={attribute_key} attribute_values={attribute_values}')
if 'description' in attribute_values:
print(f'{i(1)}-- Description:', file=output_object)
attribute_description = attribute_values['description']
for line in attribute_description.splitlines():
print(f'{i(1)}-- {line}', file=output_object)
if 'note' in attribute_values:
print(f'{i(1)}-- Note:', file=output_object)
attribute_note = attribute_values['note']
for line in attribute_note.splitlines():
print(f'{i(1)}-- {line}', file=output_object)
logger.debug(f'{i(1)}attribute_key={attribute_key} attribute_values={attribute_values}')
assert 'type' in attribute_values
attribute_type = attribute_values['type']
column_type = f'integer references {"enum_" + attribute_key + "(pk)"}' if attribute_type == 'enum' else attribute_type
column_line = f'{i(1)}{attribute_key} {column_type}'
logger.debug(f'column_line={column_line}')
if 'required' in attribute_values:
if attribute_values['required'] == True:
column_line += ' not null'
if 'unique' in attribute_values:
if attribute_values['unique'] == True:
column_line += ' unique' # handle unique-within-parent
logger.debug(f'num_attributes={num_attributes} current_attribute_num={current_attribute_num}')
if current_attribute_num < num_attributes - 1:
column_line += ','
print(column_line, file=output_object)
else:
logger.debug('Skipping attributes because no attributes')
logger.debug('Leaving generate_attribute_columns()')
@logger.catch
def generate_entities(er_yaml, output_object):
'''
Generate the schema definitions for entity tables and many-to-many mapping tables
'''
logger.debug('Entering generate_entities()')
# Topologically sort the entities (so we can do foreign key constraints correctly)
graph, dependency_ordering, mm_synthesized = topological_sort_entities(er_yaml)
logger.debug(f'graph={graph}')
logger.debug(f'dependency_ordering={dependency_ordering}')
logger.debug(f'mm_synthesized={mm_synthesized}')
entities_pc = build_entity_parents_and_children(er_yaml)
logger.debug(f'after build_entity_parents_and_children(): entities_pc={json.dumps(entities_pc, indent=4)}')
entities = er_yaml['entities']
logger.debug(f'entities={yaml.dump(entities)}')
# Index the entities
entity_indices = { }
for entity_index, entity_obj in enumerate(entities):
logger.debug(f'entity_index={entity_index} for entity:\n{yaml.dump(entity_obj)}')
entity_indices.update( { entity_obj['entity']['name']: entity_index } )
logger.debug(f'entity_indices={entity_indices}')
# Generate table definitions for entities
for entity_name in dependency_ordering:
logger.debug(f'Generating table for {entity_name}')
if entity_name in mm_synthesized:
generate_mm_synthesized(entity_name, graph, output_object)
else:
entity, parents, num_parents, attributes, num_attributes = \
generate_entity_comments(entity_name, entities, entity_indices, entities_pc, output_object)
# Start the DDL to create the table
print(f'create table {entity_name} (', file=output_object)
column_line = f'{i(1)}pk uuid not null default gen_random_uuid() primary key'
if num_parents > 0 or num_attributes > 0:
column_line += ','
print(column_line, file=output_object)
generate_foreign_keys(parents, num_parents, num_attributes, output_object)
generate_attribute_columns(attributes, num_attributes, output_object)
print(f');\n', file=output_object)
# Generate drop table statements in proper order
print('\n\n', file=output_object)
for table_name in reversed(dependency_ordering):
print(f'-- drop table if exists {table_name};', file=output_object)
for enum in er_yaml['enums']:
enum_table_name = enum['enum']['name']
print(f'-- drop table if exists {enum_table_name};', file=output_object)
logger.debug('Leaving generate_entities()')
@logger.catch
def genschema(er_yaml, input, output_object):
'''
Generally-callable entry point to
read an Entity-Relationship Markup Language file and write a database schema SQL file
'''
logger.debug('Entering genschema()')
logger.debug('Before validating YAML via jsonschema.validate()')
try:
jsonschema.validate(instance=er_yaml, schema=json_schema_erml)
except jsonschema.exceptions.ValidationError as ex:
print(f'\nERROR: Invalid YAML (schema) for Entity-Relationship Markup Language input file.\n'
f'ERROR DETAILS:\n{ex}\n', file=sys.stderr)
sys.exit(1)
logger.debug('After jsonschema.validate()')
print(f'-- Database schema generated by Zepster', file=output_object)
print(f'-- Source: {"stdin" if input == "-" else input}', file=output_object)
print(f'-- Generated: {datetime.datetime.utcnow().isoformat()}', file=output_object)
print(file=output_object)
generate_enums(er_yaml, output_object)
generate_entities(er_yaml, output_object)
logger.debug('Leaving genschema()')
@click.command()
@click.option(
'--input',
default='-',
help='Input Entity-Relationship Markup Language file (default is standard input, also represented by a dash "-")',
)
@click.option(
'--output',
default='-',
help='Output schema definition file (default is standard output, also represented by a dash "-")',
)
@click.option(
'--overwrite',
is_flag=True,
default=False,
help='If specified, overwrite the output file if it already exists',
)
@click.option(
'--logging',
type=str,
default='WARNING',
help='Set logging to the specified level: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL',
)
@click.option(
'--dialect',
type=click.Choice(['CRDB', 'RS'], case_sensitive=False),
default='CRDB',
help='Set the database dialect: "CRDB" for CockroachDB [Not implemented: and "RS" for Redshift].',
)
@click.option(
'--generate-keys',
is_flag=True,
default=True,
help='[Not implemented] Indicates whether to generate synthetic keys. Default is True.'
)
@click.option(
'--generated-key-type',
type=click.Choice(['INTEGER', 'UUID'], case_sensitive=False),
help='Set the data type for generated synthetic keys. The default depends on '
'the database dialect: UUID for CockroachDB [Not implemented: and INTEGER for Redshift].',
)
@logger.catch
def main(input, output, overwrite, logging, dialect, generate_keys, generated_key_type):
'''
Read an Entity-Relationship Markup Language file and write a database schema | |
+ toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[4] == "U" or word[4] == "u" :
toGuess = toGuess[:4] + "u" + toGuess[5:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" and word[4] != "U" and word[4] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[4] == "V" or word[4] == "v" :
toGuess = toGuess[:4] + "v" + toGuess[5:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" and word[4] != "V" and word[4] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[3] == "W" or word[3] == "w" :
toGuess = toGuess[:3] + "w" + toGuess[4:]
if word[4] == "W" or word[4] == "w" :
toGuess = toGuess[:4] + "w" + toGuess[5:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" and word[4] != "W" and word[4] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[3] == "X" or word[3] == "x" :
toGuess = toGuess[:3] + "x" + toGuess[4:]
if word[4] == "X" or word[4] == "x" :
toGuess = toGuess[:4] + "x" + toGuess[5:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" and word[4] != "X" and word[4] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[3] == "Y" or word[3] == "y" :
toGuess = toGuess[:3] + "y" + toGuess[4:]
if word[4] == "Y" or word[4] == "y" :
toGuess = toGuess[:4] + "y" + toGuess[5:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" and word[4] != "Y" and word[4] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[4] == "Z" or word[4] == "z" :
toGuess = toGuess[:4] + "z" + toGuess[5:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" and word[4] != "Z" and word[4] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 4 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[4] == "A" or word[4] == "a" :
toGuess = toGuess[:4] + "a" + toGuess[5:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" and word[4] != "A" and word[4] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[4] == "B" or word[4] == "b" :
toGuess = toGuess[:4] + "b" + toGuess[5:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" and word[4] != "B" and word[4] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[4] == "C" or word[4] == "c" :
toGuess = toGuess[:4] + "c" + toGuess[5:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" and word[4] != "C" and word[4] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[4] == "D" or word[4] == "d" :
toGuess = toGuess[:4] + "d" + toGuess[5:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" and word[4] != "D" and word[4] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[4] == "E" or word[4] == "e" :
toGuess = toGuess[:4] + "e" + toGuess[5:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" and word[4] != "E" and word[4] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[4] == "F" or word[4] == "f" :
toGuess = toGuess[:4] + "f" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.