repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
fairness-indicators
|
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/version.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the version string of Fairness Indicators Tensorboard Plugin."""
# Note that setup.py uses this version.
__version__ = '0.45.0.dev'
| 721
| 37
| 76
|
py
|
fairness-indicators
|
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/plugin_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the Tensorboard Fairness Indicators plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import abc
import os
import shutil
from unittest import mock
# Standard imports
from tensorboard_plugin_fairness_indicators import plugin
from tensorboard_plugin_fairness_indicators import summary_v2
import six
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer
from tensorboard.plugins import base_plugin
tf.enable_eager_execution()
tf = tf2
class PluginTest(tf.test.TestCase):
"""Tests for Fairness Indicators plugin server."""
def setUp(self):
super(PluginTest, self).setUp()
# Log dir to save temp events into.
self._log_dir = self.get_temp_dir()
self._eval_result_output_dir = os.path.join(self.get_temp_dir(),
"eval_result")
if not os.path.isdir(self._eval_result_output_dir):
os.mkdir(self._eval_result_output_dir)
writer = tf.summary.create_file_writer(self._log_dir)
with writer.as_default():
summary_v2.FairnessIndicators(self._eval_result_output_dir, step=1)
writer.close()
# Start a server that will receive requests.
self._multiplexer = event_multiplexer.EventMultiplexer({
".": self._log_dir,
})
self._context = base_plugin.TBContext(
logdir=self._log_dir, multiplexer=self._multiplexer)
self._plugin = plugin.FairnessIndicatorsPlugin(self._context)
self._multiplexer.Reload()
wsgi_app = application.TensorBoardWSGI([self._plugin])
self._server = werkzeug_test.Client(wsgi_app, wrappers.Response)
self._routes = self._plugin.get_plugin_apps()
def tearDown(self):
super(PluginTest, self).tearDown()
shutil.rmtree(self._log_dir, ignore_errors=True)
def _exportEvalSavedModel(self, classifier):
temp_eval_export_dir = os.path.join(self.get_temp_dir(), "eval_export_dir")
_, eval_export_dir = classifier(None, temp_eval_export_dir)
return eval_export_dir
def _writeTFExamplesToTFRecords(self, examples):
data_location = os.path.join(self.get_temp_dir(), "input_data.rio")
with tf.io.TFRecordWriter(data_location) as writer:
for example in examples:
writer.write(example.SerializeToString())
return data_location
def _makeExample(self, age, language, label):
example = tf.train.Example()
example.features.feature["age"].float_list.value[:] = [age]
example.features.feature["language"].bytes_list.value[:] = [
six.ensure_binary(language, "utf8")
]
example.features.feature["label"].float_list.value[:] = [label]
return example
def testRoutes(self):
self.assertIsInstance(self._routes["/get_evaluation_result"],
abc.Callable)
self.assertIsInstance(
self._routes["/get_evaluation_result_from_remote_path"],
abc.Callable)
self.assertIsInstance(self._routes["/index.js"], abc.Callable)
self.assertIsInstance(self._routes["/vulcanized_tfma.js"],
abc.Callable)
@mock.patch.object(
event_multiplexer.EventMultiplexer,
"PluginRunToTagToContent",
return_value={"bar": {
"foo": "".encode("utf-8")
}},
)
def testIsActive(self, get_random_stub):
self.assertTrue(self._plugin.is_active())
@mock.patch.object(
event_multiplexer.EventMultiplexer,
"PluginRunToTagToContent",
return_value={})
def testIsInactive(self, get_random_stub):
self.assertFalse(self._plugin.is_active())
def testIndexJsRoute(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
response = self._server.get("/data/plugin/fairness_indicators/index.js")
self.assertEqual(200, response.status_code)
def testVulcanizedTemplateRoute(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
response = self._server.get(
"/data/plugin/fairness_indicators/vulcanized_tfma.js")
self.assertEqual(200, response.status_code)
def testGetEvalResultsRoute(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language="english", label=1.0),
self._makeExample(age=3.0, language="chinese", label=0.0),
self._makeExample(age=4.0, language="english", label=1.0),
self._makeExample(age=5.0, language="chinese", label=1.0),
self._makeExample(age=5.0, language="hindi", label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
_ = tfma.run_model_analysis(
eval_shared_model=tfma.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key="age"),
data_location=data_location,
output_path=self._eval_result_output_dir)
response = self._server.get(
"/data/plugin/fairness_indicators/get_evaluation_result?run=.")
self.assertEqual(200, response.status_code)
def testGetEvalResultsFromURLRoute(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language="english", label=1.0),
self._makeExample(age=3.0, language="chinese", label=0.0),
self._makeExample(age=4.0, language="english", label=1.0),
self._makeExample(age=5.0, language="chinese", label=1.0),
self._makeExample(age=5.0, language="hindi", label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
_ = tfma.run_model_analysis(
eval_shared_model=tfma.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key="age"),
data_location=data_location,
output_path=self._eval_result_output_dir)
response = self._server.get(
"/data/plugin/fairness_indicators/" +
"get_evaluation_result_from_remote_path?evaluation_output_path=" +
os.path.join(self._eval_result_output_dir, tfma.METRICS_KEY))
self.assertEqual(200, response.status_code)
def testGetOutputFileFormat(self):
self.assertEqual("", self._plugin._get_output_file_format("abc_path"))
self.assertEqual("tfrecord",
self._plugin._get_output_file_format("abc_path.tfrecord"))
if __name__ == "__main__":
tf.test.main()
| 7,434
| 38.131579
| 94
|
py
|
fairness-indicators
|
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/demo.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fairness Indicators Plugin Demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from tensorboard_plugin_fairness_indicators import summary_v2
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
tf.enable_eager_execution()
tf = tf2
FLAGS = flags.FLAGS
flags.DEFINE_string('eval_result_output_dir', '',
'Log dir containing evaluation results.')
flags.DEFINE_string('logdir', '', 'Log dir where demo logs will be written.')
def main(unused_argv):
writer = tf.summary.create_file_writer(FLAGS.logdir)
with writer.as_default():
summary_v2.FairnessIndicators(FLAGS.eval_result_output_dir, step=1)
writer.close()
if __name__ == '__main__':
app.run(main)
| 1,508
| 30.4375
| 80
|
py
|
fairness-indicators
|
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| 689
| 45
| 80
|
py
|
fairness-indicators
|
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/summary_v2.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Summaries for Fairness Indicators plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard imports
from tensorboard_plugin_fairness_indicators import metadata
from tensorboard.compat import tf2 as tf
def FairnessIndicators(eval_result_output_dir, step=None, description=None):
"""Write a Fairness Indicators summary.
Arguments:
eval_result_output_dir: Directory output created by
tfma.model_eval_lib.ExtractEvaluateAndWriteResults API, which contains
'metrics' file having MetricsForSlice results.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a constant
`str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with tf.summary.experimental.summary_scope(metadata.PLUGIN_NAME):
return tf.summary.write(
tag=metadata.PLUGIN_NAME,
tensor=tf.constant(eval_result_output_dir),
step=step,
metadata=metadata.CreateSummaryMetadata(description),
)
| 2,135
| 38.555556
| 80
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/example_model.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo script to train and evaluate a model.
This scripts contains boilerplate code to train a DNNClassifier
and evaluate it using Tensorflow Model Analysis. Evaluation
results can be visualized using tools like TensorBoard.
Usage:
1. Train model:
demo_script.train_model(...)
2. Evaluate:
demo_script.evaluate_model(...)
"""
import os
import tempfile
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tensorflow_hub as hub
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators # pylint: disable=unused-import
def train_model(model_dir,
train_tf_file,
label,
text_feature,
feature_map,
module_spec='https://tfhub.dev/google/nnlm-en-dim128/1'):
"""Train model using DNN Classifier.
Args:
model_dir: Directory path to save trained model.
train_tf_file: File containing training TFRecordDataset.
label: Groundtruth label.
text_feature: Text feature to be evaluated.
feature_map: Dict of feature names to their data type.
module_spec: A module spec defining the module to instantiate or a path
where to load a module spec.
Returns:
Trained DNNClassifier.
"""
def train_input_fn():
"""Train Input function."""
def parse_function(serialized):
parsed_example = tf.io.parse_single_example(
serialized=serialized, features=feature_map)
# Adds a weight column to deal with unbalanced classes.
parsed_example['weight'] = tf.add(parsed_example[label], 0.1)
return (parsed_example, parsed_example[label])
train_dataset = tf.data.TFRecordDataset(
filenames=[train_tf_file]).map(parse_function).batch(512)
return train_dataset
text_embedding_column = hub.text_embedding_column(
key=text_feature, module_spec=module_spec)
classifier = tf_estimator.DNNClassifier(
hidden_units=[500, 100],
weight_column='weight',
feature_columns=[text_embedding_column],
n_classes=2,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.003),
model_dir=model_dir)
classifier.train(input_fn=train_input_fn, steps=1000)
return classifier
def evaluate_model(classifier, validate_tf_file, tfma_eval_result_path,
selected_slice, label, feature_map):
"""Evaluate Model using Tensorflow Model Analysis.
Args:
classifier: Trained classifier model to be evaluted.
validate_tf_file: File containing validation TFRecordDataset.
tfma_eval_result_path: Directory path where eval results will be written.
selected_slice: Feature for slicing the data.
label: Groundtruth label.
feature_map: Dict of feature names to their data type.
"""
def eval_input_receiver_fn():
"""Eval Input Receiver function."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_placeholder')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(serialized_tf_example, feature_map)
features['weight'] = tf.ones_like(features[label])
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features[label])
tfma_export_dir = tfma.export.export_eval_savedmodel(
estimator=classifier,
export_dir_base=os.path.join(tempfile.gettempdir(), 'tfma_eval_model'),
eval_input_receiver_fn=eval_input_receiver_fn)
# Define slices that you want the evaluation to run on.
slice_spec = [
tfma.slicer.SingleSliceSpec(), # Overall slice
tfma.slicer.SingleSliceSpec(columns=[selected_slice]),
]
# Add the fairness metrics.
# pytype: disable=module-attr
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9], labels_key=label)
]
# pytype: enable=module-attr
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=tfma_export_dir,
add_metrics_callbacks=add_metrics_callbacks)
# Run the fairness evaluation.
tfma.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=validate_tf_file,
output_path=tfma_eval_result_path,
slice_spec=slice_spec)
| 5,062
| 33.678082
| 126
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/version.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the version string of Fairness Indicators."""
# Note that setup.py uses this version.
__version__ = '0.45.0.dev'
| 722
| 39.166667
| 74
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/__init__.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module for Fairness Indicators."""
# Import version string.
from fairness_indicators.version import __version__
| 717
| 38.888889
| 74
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/example_model_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for example_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import tempfile
from fairness_indicators import example_model
import six
import tensorflow.compat.v1 as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.slicer import slicer_lib as slicer
tf.compat.v1.enable_eager_execution()
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
SLICE = 'slice'
FEATURE_MAP = {
LABEL: tf.io.FixedLenFeature([], tf.float32),
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
SLICE: tf.io.VarLenFeature(tf.string),
}
class ExampleModelTest(tf.test.TestCase):
def setUp(self):
super(ExampleModelTest, self).setUp()
self._base_dir = tempfile.gettempdir()
self._model_dir = os.path.join(
self._base_dir, 'train',
datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
def _create_example(self, comment_text, label, slice_value):
example = tf.train.Example()
example.features.feature[TEXT_FEATURE].bytes_list.value[:] = [
six.ensure_binary(comment_text, 'utf8')
]
example.features.feature[SLICE].bytes_list.value[:] = [
six.ensure_binary(slice_value, 'utf8')
]
example.features.feature[LABEL].float_list.value[:] = [label]
return example
def _create_data(self):
examples = []
examples.append(self._create_example('test comment', 0.0, 'slice1'))
examples.append(self._create_example('toxic comment', 1.0, 'slice1'))
examples.append(self._create_example('non-toxic comment', 0.0, 'slice1'))
examples.append(self._create_example('test comment', 1.0, 'slice2'))
examples.append(self._create_example('non-toxic comment', 0.0, 'slice2'))
examples.append(self._create_example('test comment', 0.0, 'slice3'))
examples.append(self._create_example('toxic comment', 1.0, 'slice3'))
examples.append(self._create_example('toxic comment', 1.0, 'slice3'))
examples.append(
self._create_example('non toxic comment', 0.0, 'slice3'))
examples.append(self._create_example('abc', 0.0, 'slice1'))
examples.append(self._create_example('abcdef', 0.0, 'slice3'))
examples.append(self._create_example('random', 0.0, 'slice1'))
return examples
def _write_tf_records(self, examples):
data_location = os.path.join(self._base_dir, 'input_data.rio')
with tf.io.TFRecordWriter(data_location) as writer:
for example in examples:
writer.write(example.SerializeToString())
return data_location
def test_example_model(self):
train_tf_file = self._write_tf_records(self._create_data())
classifier = example_model.train_model(self._model_dir, train_tf_file,
LABEL, TEXT_FEATURE, FEATURE_MAP)
validate_tf_file = self._write_tf_records(self._create_data())
tfma_eval_result_path = os.path.join(self._model_dir, 'tfma_eval_result')
example_model.evaluate_model(classifier, validate_tf_file,
tfma_eval_result_path, SLICE, LABEL,
FEATURE_MAP)
expected_slice_keys = [
'Overall', 'slice:slice3', 'slice:slice1', 'slice:slice2'
]
evaluation_results = tfma.load_eval_result(tfma_eval_result_path)
self.assertLen(evaluation_results.slicing_metrics, 4)
# Verify if false_positive_rate metrics are computed for all values of
# slice.
for (slice_key, metric_value) in evaluation_results.slicing_metrics:
slice_key = slicer.stringify_slice_key(slice_key)
self.assertIn(slice_key, expected_slice_keys)
self.assertGreaterEqual(
1.0, metric_value['']['']
['post_export_metrics/false_positive_rate@0.50']['doubleValue'])
self.assertLessEqual(
0.0, metric_value['']['']
['post_export_metrics/false_positive_rate@0.50']['doubleValue'])
if __name__ == '__main__':
tf.test.main()
| 4,656
| 37.808333
| 80
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/tutorial_utils/util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Util methods for the example colabs."""
import os
import os.path
import tempfile
import pandas as pd
import tensorflow as tf
import tensorflow_model_analysis as tfma
from google.protobuf import text_format
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
SEXUAL_ORIENTATION_COLUMNS = [
'heterosexual', 'homosexual_gay_or_lesbian', 'bisexual',
'other_sexual_orientation'
]
GENDER_COLUMNS = ['male', 'female', 'transgender', 'other_gender']
RELIGION_COLUMNS = [
'christian', 'jewish', 'muslim', 'hindu', 'buddhist', 'atheist',
'other_religion'
]
RACE_COLUMNS = ['black', 'white', 'asian', 'latino', 'other_race_or_ethnicity']
DISABILITY_COLUMNS = [
'physical_disability', 'intellectual_or_learning_disability',
'psychiatric_or_mental_illness', 'other_disability'
]
IDENTITY_COLUMNS = {
'gender': GENDER_COLUMNS,
'sexual_orientation': SEXUAL_ORIENTATION_COLUMNS,
'religion': RELIGION_COLUMNS,
'race': RACE_COLUMNS,
'disability': DISABILITY_COLUMNS
}
_THRESHOLD = 0.5
def convert_comments_data(input_filename, output_filename=None):
"""Convert the public civil comments data.
In the orginal dataset
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data
for each indentity annotation columns, the value comes
from percent of raters thought the comment referenced the identity. When
processing the raw data, the threshold 0.5 is chosen and the identity terms
are grouped together by their categories. For example if one comment has {
male: 0.3, female: 1.0, transgender: 0.0, heterosexual: 0.8,
homosexual_gay_or_lesbian: 1.0 }. After the processing, the data will be {
gender: [female], sexual_orientation: [heterosexual,
homosexual_gay_or_lesbian] }.
Args:
input_filename: The path to the raw civil comments data, with extension
'tfrecord' or 'csv'.
output_filename: The path to write the processed civil comments data.
Returns:
The file path to the converted dataset.
Raises:
ValueError: If the input_filename does not have a supported extension.
"""
extension = os.path.splitext(input_filename)[1][1:]
if not output_filename:
output_filename = os.path.join(tempfile.mkdtemp(), 'output.' + extension)
if extension == 'tfrecord':
return _convert_comments_data_tfrecord(input_filename, output_filename)
elif extension == 'csv':
return _convert_comments_data_csv(input_filename, output_filename)
raise ValueError(
'input_filename must have supported file extension csv or tfrecord, '
'given: {}'.format(input_filename))
def _convert_comments_data_tfrecord(input_filename, output_filename=None):
"""Convert the public civil comments data, for tfrecord data."""
with tf.io.TFRecordWriter(output_filename) as writer:
for serialized in tf.data.TFRecordDataset(filenames=[input_filename]):
example = tf.train.Example()
example.ParseFromString(serialized.numpy())
if not example.features.feature[TEXT_FEATURE].bytes_list.value:
continue
new_example = tf.train.Example()
new_example.features.feature[TEXT_FEATURE].bytes_list.value.extend(
example.features.feature[TEXT_FEATURE].bytes_list.value)
new_example.features.feature[LABEL].float_list.value.append(
1 if example.features.feature[LABEL].float_list.value[0] >= _THRESHOLD
else 0)
for identity_category, identity_list in IDENTITY_COLUMNS.items():
grouped_identity = []
for identity in identity_list:
if (example.features.feature[identity].float_list.value and
example.features.feature[identity].float_list.value[0] >=
_THRESHOLD):
grouped_identity.append(identity.encode())
new_example.features.feature[identity_category].bytes_list.value.extend(
grouped_identity)
writer.write(new_example.SerializeToString())
return output_filename
def _convert_comments_data_csv(input_filename, output_filename=None):
"""Convert the public civil comments data, for csv data."""
df = pd.read_csv(input_filename)
# Filter out rows with empty comment text values.
df = df[df[TEXT_FEATURE].ne('')]
df = df[df[TEXT_FEATURE].notnull()]
new_df = pd.DataFrame()
new_df[TEXT_FEATURE] = df[TEXT_FEATURE]
# Reduce the label to value 0 or 1.
new_df[LABEL] = df[LABEL].ge(_THRESHOLD).astype(int)
# Extract the list of all identity terms that exceed the threshold.
def identity_conditions(df, identity_list):
group = []
for identity in identity_list:
if df[identity] >= _THRESHOLD:
group.append(identity)
return group
for identity_category, identity_list in IDENTITY_COLUMNS.items():
new_df[identity_category] = df.apply(
identity_conditions, args=((identity_list),), axis=1)
new_df.to_csv(
output_filename,
header=[TEXT_FEATURE, LABEL, *IDENTITY_COLUMNS.keys()],
index=False)
return output_filename
def get_eval_results(model_location,
eval_result_path,
validate_tfrecord_file,
slice_selection='religion',
thresholds=None,
compute_confidence_intervals=True):
"""Get Fairness Indicators eval results."""
if thresholds is None:
thresholds = [0.4, 0.4125, 0.425, 0.4375, 0.45, 0.4675, 0.475, 0.4875, 0.5]
# Define slices that you want the evaluation to run on.
eval_config = text_format.Parse(
"""
model_specs {
label_key: '%s'
}
metrics_specs {
metrics {class_name: "AUC"}
metrics {class_name: "ExampleCount"}
metrics {class_name: "Accuracy"}
metrics {
class_name: "FairnessIndicators"
config: '{"thresholds": %s}'
}
}
slicing_specs {
feature_keys: '%s'
}
slicing_specs {}
options {
compute_confidence_intervals { value: %s }
disabled_outputs{values: "analysis"}
}
""" % (LABEL, thresholds,
slice_selection, 'true' if compute_confidence_intervals else 'false'),
tfma.EvalConfig())
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, tags=[tf.saved_model.SERVING])
# Run the fairness evaluation.
return tfma.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=validate_tfrecord_file,
file_format='tfrecords',
eval_config=eval_config,
output_path=eval_result_path,
extractors=None)
| 7,181
| 33.695652
| 81
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/tutorial_utils/__init__.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init file for fairness_indicators.tutorial_utils."""
from fairness_indicators.tutorial_utils.util import convert_comments_data
from fairness_indicators.tutorial_utils.util import get_eval_results
| 795
| 45.823529
| 74
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/tutorial_utils/util_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fairness_indicators.tutorial_utils.util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import tempfile
from unittest import mock
from fairness_indicators.tutorial_utils import util
import pandas as pd
import tensorflow as tf
import tensorflow_model_analysis as tfma
from google.protobuf import text_format
class UtilTest(tf.test.TestCase):
def _create_example_tfrecord(self):
example = text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list { value: [ "comment 1" ] }}
}
feature { key: "toxicity" value { float_list { value: [ 0.1 ] }}}
feature { key: "heterosexual" value { float_list { value: [ 0.1 ] }}}
feature { key: "homosexual_gay_or_lesbian"
value { float_list { value: [ 0.1 ] }}
}
feature { key: "bisexual" value { float_list { value: [ 0.5 ] }}}
feature { key: "other_sexual_orientation"
value { float_list { value: [ 0.1 ] }}
}
feature { key: "male" value { float_list { value: [ 0.1 ] }}}
feature { key: "female" value { float_list { value: [ 0.2 ] }}}
feature { key: "transgender" value { float_list { value: [ 0.3 ] }}}
feature { key: "other_gender" value { float_list { value: [ 0.4 ] }}}
feature { key: "christian" value { float_list { value: [ 0.0 ] }}}
feature { key: "jewish" value { float_list { value: [ 0.1 ] }}}
feature { key: "muslim" value { float_list { value: [ 0.2 ] }}}
feature { key: "hindu" value { float_list { value: [ 0.3 ] }}}
feature { key: "buddhist" value { float_list { value: [ 0.4 ] }}}
feature { key: "atheist" value { float_list { value: [ 0.5 ] }}}
feature { key: "other_religion"
value { float_list { value: [ 0.6 ] }}
}
feature { key: "black" value { float_list { value: [ 0.1 ] }}}
feature { key: "white" value { float_list { value: [ 0.2 ] }}}
feature { key: "asian" value { float_list { value: [ 0.3 ] }}}
feature { key: "latino" value { float_list { value: [ 0.4 ] }}}
feature { key: "other_race_or_ethnicity"
value { float_list { value: [ 0.5 ] }}
}
feature { key: "physical_disability"
value { float_list { value: [ 0.6 ] }}
}
feature { key: "intellectual_or_learning_disability"
value { float_list { value: [ 0.7 ] }}
}
feature { key: "psychiatric_or_mental_illness"
value { float_list { value: [ 0.8 ] }}
}
feature { key: "other_disability"
value { float_list { value: [ 1.0 ] }}
}
}
""", tf.train.Example())
empty_comment_example = text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list {} }
}
feature { key: "toxicity" value { float_list { value: [ 0.1 ] }}}
}
""", tf.train.Example())
return [example, empty_comment_example]
def _write_tf_records(self, examples):
filename = os.path.join(tempfile.mkdtemp(), 'input.tfrecord')
with tf.io.TFRecordWriter(filename) as writer:
for e in examples:
writer.write(e.SerializeToString())
return filename
def test_convert_data_tfrecord(self):
input_file = self._write_tf_records(self._create_example_tfrecord())
output_file = util.convert_comments_data(input_file)
output_example_list = []
for serialized in tf.data.TFRecordDataset(filenames=[output_file]):
output_example = tf.train.Example()
output_example.ParseFromString(serialized.numpy())
output_example_list.append(output_example)
self.assertEqual(len(output_example_list), 1)
self.assertEqual(
output_example_list[0],
text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list {value: [ "comment 1" ] }}
}
feature { key: "toxicity" value { float_list { value: [ 0.0 ] }}}
feature { key: "sexual_orientation"
value { bytes_list { value: ["bisexual"] }}
}
feature { key: "gender" value { bytes_list { }}}
feature { key: "race"
value { bytes_list { value: [ "other_race_or_ethnicity" ] }}
}
feature { key: "religion"
value { bytes_list {
value: [ "atheist", "other_religion" ] }
}
}
feature { key: "disability" value { bytes_list {
value: [
"physical_disability",
"intellectual_or_learning_disability",
"psychiatric_or_mental_illness",
"other_disability"] }}
}
}
""", tf.train.Example()))
def _create_example_csv(self, use_fake_embedding=False):
header = [
'comment_text',
'toxicity',
'heterosexual',
'homosexual_gay_or_lesbian',
'bisexual',
'other_sexual_orientation',
'male',
'female',
'transgender',
'other_gender',
'christian',
'jewish',
'muslim',
'hindu',
'buddhist',
'atheist',
'other_religion',
'black',
'white',
'asian',
'latino',
'other_race_or_ethnicity',
'physical_disability',
'intellectual_or_learning_disability',
'psychiatric_or_mental_illness',
'other_disability',
]
example = [
'comment 1' if not use_fake_embedding else 0.35,
0.1,
# sexual orientation
0.1,
0.1,
0.5,
0.1,
# gender
0.1,
0.2,
0.3,
0.4,
# religion
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
# race or ethnicity
0.1,
0.2,
0.3,
0.4,
0.5,
# disability
0.6,
0.7,
0.8,
1.0,
]
empty_comment_example = [
'' if not use_fake_embedding else 0.35,
0.1,
0.1,
0.1,
0.5,
0.1,
0.1,
0.2,
0.3,
0.4,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
1.0,
]
return [header, example, empty_comment_example]
def _write_csv(self, examples):
filename = os.path.join(tempfile.mkdtemp(), 'input.csv')
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for example in examples:
csvwriter.writerow(example)
return filename
def test_convert_data_csv(self):
input_file = self._write_csv(self._create_example_csv())
output_file = util.convert_comments_data(input_file)
# Remove the quotes around identity terms list that read_csv injects.
df = pd.read_csv(output_file).replace("'", '', regex=True)
expected_df = pd.DataFrame()
expected_df = expected_df.append(
{
'comment_text':
'comment 1',
'toxicity':
0.0,
'gender': [],
'sexual_orientation': ['bisexual'],
'race': ['other_race_or_ethnicity'],
'religion': ['atheist', 'other_religion'],
'disability': [
'physical_disability', 'intellectual_or_learning_disability',
'psychiatric_or_mental_illness', 'other_disability'
]
},
ignore_index=True)
self.assertEqual(
df.reset_index(drop=True, inplace=True),
expected_df.reset_index(drop=True, inplace=True))
# TODO(b/172260507): we should also look into testing the e2e call with tfma.
@mock.patch(
'tensorflow_model_analysis.default_eval_shared_model', autospec=True)
@mock.patch('tensorflow_model_analysis.run_model_analysis', autospec=True)
def test_get_eval_results_called_correclty(self, mock_run_model_analysis,
mock_shared_model):
mock_model = 'model'
mock_shared_model.return_value = mock_model
model_location = 'saved_model'
eval_results_path = 'eval_results'
data_file = 'data'
util.get_eval_results(model_location, eval_results_path, data_file)
mock_shared_model.assert_called_once_with(
eval_saved_model_path=model_location, tags=[tf.saved_model.SERVING])
expected_eval_config = text_format.Parse(
"""
model_specs {
label_key: 'toxicity'
}
metrics_specs {
metrics {class_name: "AUC"}
metrics {class_name: "ExampleCount"}
metrics {class_name: "Accuracy"}
metrics {
class_name: "FairnessIndicators"
config: '{"thresholds": [0.4, 0.4125, 0.425, 0.4375, 0.45, 0.4675, 0.475, 0.4875, 0.5]}'
}
}
slicing_specs {
feature_keys: 'religion'
}
slicing_specs {}
options {
compute_confidence_intervals { value: true }
disabled_outputs{values: "analysis"}
}
""", tfma.EvalConfig())
mock_run_model_analysis.assert_called_once_with(
eval_shared_model=mock_model,
data_location=data_file,
file_format='tfrecords',
eval_config=expected_eval_config,
output_path=eval_results_path,
extractors=None)
if __name__ == '__main__':
tf.test.main()
| 10,697
| 31.516717
| 98
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/remediation/weight_utils_test.py
|
"""Tests for fairness_indicators.remediation.weight_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Standard imports
from fairness_indicators.remediation import weight_utils
import tensorflow.compat.v1 as tf
EvalResult = collections.namedtuple('EvalResult', ['slicing_metrics'])
class WeightUtilsTest(tf.test.TestCase):
def create_eval_result(self):
return EvalResult(slicing_metrics=[
((), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'doubleValue': 0.08
},
'accuracy': {
'doubleValue': 0.444
}
}
}
}),
((('gender', 'female'),), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'doubleValue': 0.09
},
'accuracy': {
'doubleValue': 0.333
}
}
}
}),
(((u'gender', u'female'),
(u'sexual_orientation', u'homosexual_gay_or_lesbian')), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'doubleValue': 0.1
},
'accuracy': {
'doubleValue': 0.222
}
}
}
}),
])
def create_bounded_result(self):
return EvalResult(slicing_metrics=[
((), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.08,
'methodology': 'POISSON_BOOTSTRAP'
}
},
'accuracy': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.444,
'methodology': 'POISSON_BOOTSTRAP'
}
}
}
}
}),
((('gender', 'female'),), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.09,
'methodology': 'POISSON_BOOTSTRAP'
}
},
'accuracy': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.333,
'methodology': 'POISSON_BOOTSTRAP'
}
}
}
}
}),
(((u'gender', u'female'),
(u'sexual_orientation', u'homosexual_gay_or_lesbian')), {
'': {
'': {
'post_export_metrics/negative_rate@0.10': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.1,
'methodology': 'POISSON_BOOTSTRAP'
}
},
'accuracy': {
'boundedValue': {
'lowerBound': 0.07,
'upperBound': 0.09,
'value': 0.222,
'methodology': 'POISSON_BOOTSTRAP'
}
}
}
}
}),
])
def test_baseline(self):
test_eval_result = self.create_eval_result()
self.assertEqual(
0.08,
weight_utils.get_baseline_value(
test_eval_result, 'Overall',
'post_export_metrics/negative_rate@0.10'))
self.assertEqual(
0.09,
weight_utils.get_baseline_value(
test_eval_result, (('gender', 'female'),),
'post_export_metrics/negative_rate@0.10'))
# Test 'accuracy'.
self.assertEqual(
0.444,
weight_utils.get_baseline_value(test_eval_result, 'Overall',
'accuracy'))
# Test intersectional metrics.
self.assertEqual(
0.222,
weight_utils.get_baseline_value(
test_eval_result,
((u'gender', u'female'),
(u'sexual_orientation', u'homosexual_gay_or_lesbian')),
'accuracy'))
with self.assertRaises(ValueError):
# Test slice not found.
weight_utils.get_baseline_value(test_eval_result,
(('nonexistant', 'slice'),), 'accuracy')
with self.assertRaises(KeyError):
# Test metric not found.
weight_utils.get_baseline_value(test_eval_result, (('gender', 'female'),),
'nonexistent_metric')
def test_get_metric_value_raise_key_error(self):
input_dict = {'': {'': {'accuracy': 0.1}}}
metric_name = 'nonexistent_metric'
with self.assertRaises(KeyError):
weight_utils._get_metric_value(input_dict, metric_name)
def test_get_metric_value_raise_unsupported_value(self):
input_dict = {
'': {
'': {
'accuracy': {
'boundedValue': {1}
}
}
}
}
metric_name = 'accuracy'
with self.assertRaises(TypeError):
weight_utils._get_metric_value(input_dict, metric_name)
def test_get_metric_value_raise_empty_dict(self):
with self.assertRaises(KeyError):
weight_utils._get_metric_value({}, 'metric_name')
def test_create_difference_dictionary(self):
test_eval_result = self.create_eval_result()
res = weight_utils.create_percentage_difference_dictionary(
test_eval_result, 'Overall', 'post_export_metrics/negative_rate@0.10')
self.assertEqual(3, len(res))
self.assertIn('gender-sexual_orientation', res)
self.assertIn('gender', res)
self.assertAlmostEqual(res['gender']['female'], 0.125)
self.assertAlmostEqual(res[''][''], 0)
def test_create_difference_dictionary_baseline(self):
test_eval_result = self.create_eval_result()
res = weight_utils.create_percentage_difference_dictionary(
test_eval_result, (('gender', 'female'),),
'post_export_metrics/negative_rate@0.10')
self.assertEqual(3, len(res))
self.assertIn('gender-sexual_orientation', res)
self.assertIn('gender', res)
self.assertAlmostEqual(res['gender']['female'], 0)
self.assertAlmostEqual(res[''][''], -0.11111111)
def test_create_difference_dictionary_bounded_metrics(self):
test_eval_result = self.create_bounded_result()
res = weight_utils.create_percentage_difference_dictionary(
test_eval_result, 'Overall', 'post_export_metrics/negative_rate@0.10')
self.assertEqual(3, len(res))
self.assertIn('gender-sexual_orientation', res)
self.assertIn('gender', res)
self.assertAlmostEqual(res['gender']['female'], 0.125)
self.assertAlmostEqual(res[''][''], 0)
if __name__ == '__main__':
tf.test.main()
| 7,739
| 33.70852
| 80
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/remediation/__init__.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 596
| 41.642857
| 74
|
py
|
fairness-indicators
|
fairness-indicators-master/fairness_indicators/remediation/weight_utils.py
|
"""Utilities to suggest weights based on model analysis results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Mapping, Text
import tensorflow_model_analysis as tfma
def create_percentage_difference_dictionary(
eval_result: tfma.EvalResult,
baseline_name: Text, metric_name: Text) -> Dict[Text, Any]:
"""Creates dictionary of a % difference between a baseline and other slices.
Args:
eval_result: Loaded eval result from running TensorFlow Model Analysis.
baseline_name: Name of the baseline slice, 'Overall' or a specified tuple.
metric_name: Name of the metric on which to perform comparisons.
Returns:
Dictionary mapping slices to percentage difference from the baseline slice.
"""
baseline_value = get_baseline_value(eval_result, baseline_name, metric_name)
difference = {}
for metrics_tuple in eval_result.slicing_metrics:
slice_key = metrics_tuple[0]
metrics = metrics_tuple[1]
# Concatenate feature name/values for intersectional features.
column = '-'.join([elem[0] for elem in slice_key])
feature_val = '-'.join([elem[1] for elem in slice_key])
if column not in difference:
difference[column] = {}
difference[column][feature_val] = (_get_metric_value(metrics, metric_name)
- baseline_value) / baseline_value
return difference
def _get_metric_value(
nested_dict: Mapping[Text, Mapping[Text, Any]], metric_name: Text) -> float:
"""Returns the value of the named metric from a slice's metrics.
Args:
nested_dict: Dictionary of metrics from slice.
metric_name: Value to return from the metric slice.
Returns:
Percentage value of the baseline slice name requested.
Raises:
KeyError: If the metric name isn't found in the metrics dictionary or if the
input metrics dictionary is empty.
TypeError: If an unsupported value type is found within dictionary slice.
passed.
"""
for value in nested_dict.values():
if metric_name in value['']:
typed_value = value[''][metric_name]
if 'doubleValue' in typed_value:
return typed_value['doubleValue']
if 'boundedValue' in typed_value:
return typed_value['boundedValue']['value']
raise TypeError('Unsupported value type: %s' % typed_value)
else:
raise KeyError('Key %s not found in %s' %
(metric_name, list(value[''].keys())))
raise KeyError(
'Unable to return a metric value because the dictionary passed is empty.')
def get_baseline_value(
eval_result: tfma.EvalResult,
baseline_name: Text, metric_name: Text) -> float:
"""Looks through the evaluation result for the value of the baseline slice.
Args:
eval_result: Loaded eval result from running TensorFlow Model Analysis.
baseline_name: Name of the baseline slice, 'Overall' or a specified tuple.
metric_name: Name of the metric on which to perform comparisons.
Returns:
Percentage value of the baseline slice name requested.
Raises:
Value error if the baseline slice is not found in eval_results.
"""
for metrics_tuple in eval_result.slicing_metrics:
slice_tuple = metrics_tuple[0]
if baseline_name == 'Overall' and not slice_tuple:
return _get_metric_value(metrics_tuple[1], metric_name)
if baseline_name == slice_tuple:
return _get_metric_value(metrics_tuple[1], metric_name)
raise ValueError('Could not find baseline %s in eval_result: %s' %
(baseline_name, eval_result))
| 3,613
| 36.645833
| 80
|
py
|
XGBOD
|
XGBOD-master/xgbod_demo.py
|
'''
Demo codes for XGBOD.
Author: Yue Zhao
notes: the demo code simulates the use of XGBOD with some changes to expedite
the execution. Use the full code for the production.
'''
import os
import random
import scipy.io as scio
import numpy as np
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from xgboost.sklearn import XGBClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from models.utility import get_precn, print_baseline
from models.generate_TOS import get_TOS_knn
from models.generate_TOS import get_TOS_loop
from models.generate_TOS import get_TOS_lof
from models.generate_TOS import get_TOS_svm
from models.generate_TOS import get_TOS_iforest
from models.generate_TOS import get_TOS_hbos
from models.select_TOS import random_select, accurate_select, balance_select
# load data file
# mat = scio.loadmat(os.path.join('datasets', 'speech.mat'))
mat = scio.loadmat(os.path.join('datasets', 'arrhythmia.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'cardio.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'letter.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'mammography.mat'))
X = mat['X']
y = mat['y']
# use unit norm vector X improves knn, LoOP, and LOF results
scaler = StandardScaler().fit(X)
# X_norm = scaler.transform(X)
X_norm = normalize(X)
feature_list = []
# Running KNN-base algorithms to generate addtional features
# predefined range of k
k_range = [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150,
200, 250]
# predefined range of k to be used with LoOP due to high complexity
k_range_short = [1, 3, 5, 10]
# validate the value of k
k_range = [k for k in k_range if k < X.shape[0]]
# predefined range of nu for one-class svm
nu_range = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]
# predefined range for number of estimators in isolation forests
n_range = [10, 20, 50, 70, 100, 150, 200, 250]
##############################################################################
# Generate TOS using KNN based algorithms
feature_list, roc_knn, prc_n_knn, result_knn = get_TOS_knn(X_norm, y, k_range,
feature_list)
# Generate TOS using LoOP
feature_list, roc_loop, prc_n_loop, result_loop = get_TOS_loop(X, y,
k_range_short,
feature_list)
# Generate TOS using LOF
feature_list, roc_lof, prc_n_lof, result_lof = get_TOS_lof(X_norm, y, k_range,
feature_list)
# Generate TOS using one class svm
feature_list, roc_ocsvm, prc_n_ocsvm, result_ocsvm = get_TOS_svm(X, y,
nu_range,
feature_list)
# Generate TOS using isolation forests
feature_list, roc_if, prc_n_if, result_if = get_TOS_iforest(X, y, n_range,
feature_list)
# Generate TOS using isolation forests
feature_list, roc_hbos, prc_n_hbos, result_hbos = get_TOS_hbos(X, y, k_range,
feature_list)
##############################################################################
# combine the feature space by concanating various TOS
X_train_new_orig = np.concatenate(
(result_knn, result_loop, result_lof, result_ocsvm, result_if), axis=1)
X_train_all_orig = np.concatenate((X, X_train_new_orig), axis=1)
# combine ROC and Precision@n list
roc_list = roc_knn + roc_loop + roc_lof + roc_ocsvm + roc_if
prc_n_list = prc_n_knn + prc_n_loop + prc_n_lof + prc_n_ocsvm + prc_n_if
# get the results of baselines
print_baseline(X_train_new_orig, y, roc_list, prc_n_list)
##############################################################################
# select TOS using different methods
p = 10 # number of selected TOS
# random selection
# please be noted the actual random selection happens within the
# train-test split, with p repetitions.
X_train_new_rand, X_train_all_rand = random_select(X, X_train_new_orig,
roc_list, p)
# accurate selection
X_train_new_accu, X_train_all_accu = accurate_select(X, X_train_new_orig,
roc_list, p)
# balance selection
X_train_new_bal, X_train_all_bal = balance_select(X, X_train_new_orig,
roc_list, p)
###############################################################################
# build various classifiers
# it is noted that the data split should happen as the first stage
# test data should not be exposed. However, with a relatively large number of
# repetitions, the demo code would generate a similar result.
# the full code uses the containers to save the intermediate TOS models. The
# codes would be shared after the cleanup.
ite = 30 # number of iterations
test_size = 0.4 # training = 60%, testing = 40%
result_dict = {}
clf_list = [XGBClassifier(), LogisticRegression(penalty="l1"),
LogisticRegression(penalty="l2")]
clf_name_list = ['xgb', 'lr1', 'lr2']
# initialize the result dictionary
for clf_name in clf_name_list:
result_dict[clf_name + 'ROC' + 'o'] = []
result_dict[clf_name + 'ROC' + 's'] = []
result_dict[clf_name + 'ROC' + 'n'] = []
result_dict[clf_name + 'PRC@n' + 'o'] = []
result_dict[clf_name + 'PRC@n' + 's'] = []
result_dict[clf_name + 'PRC@n' + 'n'] = []
for i in range(ite):
s_feature_rand = random.sample(range(0, len(roc_list)), p)
X_train_new_rand = X_train_new_orig[:, s_feature_rand]
X_train_all_rand = np.concatenate((X, X_train_new_rand), axis=1)
original_len = X.shape[1]
# use all TOS
X_train, X_test, y_train, y_test = train_test_split(X_train_all_orig, y,
test_size=test_size)
# # use Random Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_rand, y,
# test_size=test_size)
# # use Accurate Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_accu, y,
# test_size=test_size)
# # use Balance Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_bal, y,
# test_size=test_size)
# use original features
X_train_o = X_train[:, 0:original_len]
X_test_o = X_test[:, 0:original_len]
X_train_n = X_train[:, original_len:]
X_test_n = X_test[:, original_len:]
for clf, clf_name in zip(clf_list, clf_name_list):
print('processing', clf_name, 'round', i + 1)
if clf_name != 'xgb':
clf = BalancedBaggingClassifier(base_estimator=clf,
ratio='auto',
replacement=False)
# fully supervised
clf.fit(X_train_o, y_train.ravel())
y_pred = clf.predict_proba(X_test_o)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 'o'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 'o'].append(prec_n)
# unsupervised
clf.fit(X_train_n, y_train.ravel())
y_pred = clf.predict_proba(X_test_n)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 'n'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 'n'].append(prec_n)
# semi-supervised
clf.fit(X_train, y_train.ravel())
y_pred = clf.predict_proba(X_test)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 's'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 's'].append(prec_n)
for eva in ['ROC', 'PRC@n']:
print()
for clf_name in clf_name_list:
print(np.round(np.mean(result_dict[clf_name + eva + 'o']), decimals=4),
eva, clf_name, 'original features')
print(np.round(np.mean(result_dict[clf_name + eva + 'n']), decimals=4),
eva, clf_name, 'TOS only')
print(np.round(np.mean(result_dict[clf_name + eva + 's']), decimals=4),
eva, clf_name, 'original feature + TOS')
| 8,726
| 39.21659
| 79
|
py
|
XGBOD
|
XGBOD-master/plots.py
|
import os
import matplotlib.pyplot as plt
import numpy as np
# initialize the results of the experiements
# arrhythmia
prc_gr_arr = [0.5606, 0.5976, 0.5986, 0.6053, 0.6109, 0.6219, 0.6076, 0.6115]
prc_ac_arr = [0.5606, 0.5976, 0.5719, 0.5961, 0.6041, 0.5792, 0.6019, 0.6115]
prc_rd_arr = [0.5606, 0.5993, 0.5788, 0.6356, 0.5908, 0.6094, 0.6202, 0.6115]
# letter
prc_gr_lt = [0.6003, 0.7300, 0.7234, 0.7199, 0.7169, 0.7285, 0.7323, 0.7376]
prc_ac_lt = [0.6003, 0.7300, 0.7210, 0.7272, 0.7477, 0.7302, 0.7308, 0.7376]
prc_rd_lt = [0.6003, 0.6653, 0.7140, 0.7248, 0.7397, 0.7302, 0.7232, 0.7376]
# cardio
prc_gr_car = [0.9304, 0.9290, 0.9374, 0.9385, 0.9296, 0.9351, 0.9327, 0.9332]
prc_ac_car = [0.9304, 0.9290, 0.9314, 0.9315, 0.9337, 0.9354, 0.9331, 0.9332]
prc_rd_car = [0.9304, 0.9297, 0.9342, 0.9364, 0.9315, 0.9267, 0.9248, 0.9332]
# speech
prc_gr_sp = [0.1455, 0.2658, 0.2733, 0.3203, 0.3290, 0.3107, 0.3355, 0.2492]
prc_ac_sp = [0.1455, 0.2658, 0.2367, 0.2630, 0.3103, 0.2983, 0.3255, 0.2492]
prc_rd_sp = [0.1455, 0.1356, 0.1814, 0.2101, 0.3194, 0.3053, 0.2940, 0.2492]
# mammography
prc_gr_ma = [0.6974, 0.6853, 0.6719, 0.6720, 0.6620, 0.6717, 0.6687, 0.6673]
prc_ac_ma = [0.6974, 0.6853, 0.6915, 0.6841, 0.6965, 0.6631, 0.6655, 0.6673]
prc_rd_ma = [0.6974, 0.6812, 0.6823, 0.6649, 0.6693, 0.6619, 0.6654, 0.6673]
# x-axis
x = [0, 1, 5, 10, 30, 50, 70, 100]
# main plots
fig = plt.figure(figsize=(8, 10))
lw = 2
ax = fig.add_subplot(511)
plt.plot(x, prc_rd_arr, color='black', linestyle='-.', marker='s',
lw=lw, label='Random Selection')
plt.plot(x, prc_gr_arr, color='blue', linestyle='--', marker='^',
lw=lw, label='Balance Selection')
plt.plot(x, prc_ac_arr, color='red', linestyle='-', marker='o',
lw=lw, label='Accurate Selection')
plt.xlim([-0.5, 100.5])
plt.xticks(np.arange(0, 100, 5))
plt.ylabel('Precision@n', fontsize=12)
plt.title('Arrhythmia', fontsize=12)
plt.legend(loc="lower right")
#########################################################################
ax = fig.add_subplot(512)
plt.plot(x, prc_rd_lt, color='black', linestyle='-.', marker='s',
lw=lw, label='Random Selection')
plt.plot(x, prc_gr_lt, color='blue', linestyle='--', marker='^',
lw=lw, label='Balance Selection')
plt.plot(x, prc_ac_lt, color='red', linestyle='--', marker='o',
lw=lw, label='Accurate Selection')
plt.xlim([-0.5, 100.5])
plt.xticks(np.arange(0, 100, 5))
plt.ylabel('Precision@n', fontsize=12)
plt.title('Letter', fontsize=12)
plt.legend(loc="lower right")
#########################################################################
ax = fig.add_subplot(513)
plt.plot(x, prc_rd_car, color='black', linestyle='-.', marker='s',
lw=lw, label='Random Selection')
plt.plot(x, prc_gr_car, color='blue', linestyle='--', marker='^',
lw=lw, label='Balance Selection')
plt.plot(x, prc_ac_car, color='red', linestyle='--', marker='o',
lw=lw, label='Accurate Selection')
plt.xlim([-0.5, 100.5])
plt.xticks(np.arange(0, 100, 5))
plt.ylabel('Precision@n', fontsize=12)
plt.title('Cardio', fontsize=12)
plt.legend(loc="lower right")
#########################################################################
ax = fig.add_subplot(514)
plt.plot(x, prc_rd_sp, color='black', linestyle='-.', marker='s',
lw=lw, label='Random Selection')
plt.plot(x, prc_gr_sp, color='blue', linestyle='--', marker='^',
lw=lw, label='Balance Selection')
plt.plot(x, prc_ac_sp, color='red', linestyle='--', marker='o',
lw=lw, label='Accurate Selection')
plt.xlim([-0.5, 100.5])
plt.xticks(np.arange(0, 100, 5))
plt.ylabel('Precision@n', fontsize=12)
plt.title('Speech', fontsize=12)
plt.legend(loc="lower right")
#########################################################################
ax = fig.add_subplot(515)
plt.plot(x, prc_rd_ma, color='black', linestyle='-.', marker='s',
lw=lw, label='Random Selection')
plt.plot(x, prc_gr_ma, color='blue', linestyle='--', marker='^',
lw=lw, label='Balance Selection')
plt.plot(x, prc_ac_ma, color='red', linestyle='--', marker='o',
lw=lw, label='Accurate Selection')
plt.xlim([-0.5, 100.5])
plt.xticks(np.arange(0, 100, 5))
plt.xlabel('Number of Selected ODS')
plt.ylabel('Precision@n', fontsize=12)
plt.title('Mammography', fontsize=12)
plt.legend(loc="upper right")
#########################################################################
plt.tight_layout()
plt.savefig(os.path.join('figs', 'results.png'), dpi=300)
plt.show()
| 4,491
| 37.393162
| 77
|
py
|
XGBOD
|
XGBOD-master/xgbod_full.py
|
import os
import pandas as pd
import numpy as np
import scipy.io as scio
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from xgboost.sklearn import XGBClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from PyNomaly import loop
from models.knn import Knn
from models.utility import get_precn, print_baseline
# use one dataset at a time; more datasets could be added to /datasets folder
# the experiment codes use a bit more setting up, otherwise the
# exact reproduction is infeasible. Clean-up codes are going to be moved
# load data file
mat = scio.loadmat(os.path.join('datasets', 'letter.mat'))
ite = 30 # number of iterations
test_size = 0.4 # training = 60%, testing = 40%
X_orig = mat['X']
y_orig = mat['y']
# outlier percentage
out_perc = np.count_nonzero(y_orig) / len(y_orig)
# define classifiers to use
clf_list = [XGBClassifier(), LogisticRegression(penalty="l1"),
LogisticRegression(penalty="l2")]
clf_name_list = ['xgb', 'lr1', 'lr2']
# initialize the container to store the results
result_dict = {}
# initialize the result dictionary
for clf_name in clf_name_list:
result_dict[clf_name + 'roc' + 'o'] = []
result_dict[clf_name + 'roc' + 's'] = []
result_dict[clf_name + 'roc' + 'n'] = []
result_dict[clf_name + 'precn' + 'o'] = []
result_dict[clf_name + 'precn' + 's'] = []
result_dict[clf_name + 'precn' + 'n'] = []
for t in range(ite):
print('\nProcessing trial', t + 1, 'out of', ite)
# split X and y for training and validation
X, X_test, y, y_test = train_test_split(X_orig, y_orig,
test_size=test_size)
# reserve the normalized data
scaler = Normalizer().fit(X)
X_norm = scaler.transform(X)
X_test_norm = scaler.transform(X_test)
feature_list = []
# predefined range of K
k_list_pre = [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90,
100, 150, 200, 250]
# trim the list in case of small sample size
k_list = [k for k in k_list_pre if k < X.shape[0]]
###########################################################################
train_knn = np.zeros([X.shape[0], len(k_list)])
test_knn = np.zeros([X_test.shape[0], len(k_list)])
roc_knn = []
prec_n_knn = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='largest')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_' + str(k))
roc_knn.append(roc)
prec_n_knn.append(prec_n)
train_knn[:, i] = train_score
test_knn[:, i] = pred_score.ravel()
###########################################################################
train_knn_mean = np.zeros([X.shape[0], len(k_list)])
test_knn_mean = np.zeros([X_test.shape[0], len(k_list)])
roc_knn_mean = []
prec_n_knn_mean = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='mean')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn_mean roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_mean_' + str(k))
roc_knn_mean.append(roc)
prec_n_knn_mean.append(prec_n)
train_knn_mean[:, i] = train_score
test_knn_mean[:, i] = pred_score.ravel()
###########################################################################
train_knn_median = np.zeros([X.shape[0], len(k_list)])
test_knn_median = np.zeros([X_test.shape[0], len(k_list)])
roc_knn_median = []
prec_n_knn_median = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='median')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn_median roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_median_' + str(k))
roc_knn_median.append(roc)
prec_n_knn_median.append(prec_n)
train_knn_median[:, i] = train_score
test_knn_median[:, i] = pred_score.ravel()
###########################################################################
train_lof = np.zeros([X.shape[0], len(k_list)])
test_lof = np.zeros([X_test.shape[0], len(k_list)])
roc_lof = []
prec_n_lof = []
for i in range(len(k_list)):
k = k_list[i]
clf = LocalOutlierFactor(n_neighbors=k)
clf.fit(X_norm)
# save the train sets
train_score = clf.negative_outlier_factor_ * -1
# flip the score
pred_score = clf._decision_function(X_test_norm) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('lof roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('lof_' + str(k))
roc_lof.append(roc)
prec_n_lof.append(prec_n)
train_lof[:, i] = train_score
test_lof[:, i] = pred_score
###########################################################################
# Noted that LoOP is not really used for prediction since its high
# computational complexity
# However, it is included to demonstrate the effectiveness of XGBOD only
df_X = pd.DataFrame(np.concatenate([X_norm, X_test_norm], axis=0))
# predefined range of K
k_list = [1, 5, 10, 20]
train_loop = np.zeros([X.shape[0], len(k_list)])
test_loop = np.zeros([X_test.shape[0], len(k_list)])
roc_loop = []
prec_n_loop = []
for i in range(len(k_list)):
k = k_list[i]
clf = loop.LocalOutlierProbability(df_X, n_neighbors=k).fit()
score = clf.local_outlier_probabilities.astype(float)
# save the train sets
train_score = score[0:X.shape[0]]
# flip the score
pred_score = score[X.shape[0]:]
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('loop roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('loop_' + str(k))
roc_loop.append(roc)
prec_n_loop.append(prec_n)
train_loop[:, i] = train_score
test_loop[:, i] = pred_score
##########################################################################
nu_list = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]
train_svm = np.zeros([X.shape[0], len(nu_list)])
test_svm = np.zeros([X_test.shape[0], len(nu_list)])
roc_svm = []
prec_n_svm = []
for i in range(len(nu_list)):
nu = nu_list[i]
clf = OneClassSVM(nu=nu)
clf.fit(X)
train_score = clf.decision_function(X) * -1
pred_score = clf.decision_function(X_test) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('svm roc / pren @ {nu} is {roc} {pren}'.format(nu=nu, roc=roc,
pren=prec_n))
feature_list.append('svm_' + str(nu))
roc_svm.append(roc)
prec_n_svm.append(prec_n)
train_svm[:, i] = train_score.ravel()
test_svm[:, i] = pred_score.ravel()
###########################################################################
n_list = [10, 20, 50, 70, 100, 150, 200, 250]
train_if = np.zeros([X.shape[0], len(n_list)])
test_if = np.zeros([X_test.shape[0], len(n_list)])
roc_if = []
prec_n_if = []
for i in range(len(n_list)):
n = n_list[i]
clf = IsolationForest(n_estimators=n)
clf.fit(X)
train_score = clf.decision_function(X) * -1
pred_score = clf.decision_function(X_test) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('if roc / pren @ {n} is {roc} {pren}'.format(n=n, roc=roc,
pren=prec_n))
feature_list.append('if_' + str(n))
roc_if.append(roc)
prec_n_if.append(prec_n)
train_if[:, i] = train_score
test_if[:, i] = pred_score
#########################################################################
X_train_new = np.concatenate((train_knn, train_knn_mean, train_knn_median,
train_lof, train_loop, train_svm, train_if),
axis=1)
X_test_new = np.concatenate((test_knn, test_knn_mean, test_knn_median,
test_lof, test_loop, test_svm, test_if),
axis=1)
X_train_all = np.concatenate((X, X_train_new), axis=1)
X_test_all = np.concatenate((X_test, X_test_new), axis=1)
roc_list = roc_knn + roc_knn_mean + roc_knn_median + roc_lof + roc_loop + roc_svm + roc_if
prec_n_list = prec_n_knn + prec_n_knn_mean + prec_n_knn_median + prec_n_lof + prec_n_loop + prec_n_svm + prec_n_if
# get the results of baselines
print_baseline(X_test_new, y_test, roc_list, prec_n_list)
###########################################################################
# select TOS using different methods
p = 10 # number of selected TOS
# TODO: supplement the cleaned up version for selection methods
##############################################################################
for clf, clf_name in zip(clf_list, clf_name_list):
print('processing', clf_name)
if clf_name != 'xgb':
clf = BalancedBaggingClassifier(base_estimator=clf,
ratio='auto',
replacement=False)
# fully supervised
clf.fit(X, y.ravel())
y_pred = clf.predict_proba(X_test)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 'o'].append(roc_score)
result_dict[clf_name + 'precn' + 'o'].append(prec_n)
# unsupervised
clf.fit(X_train_new, y.ravel())
y_pred = clf.predict_proba(X_test_new)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 'n'].append(roc_score)
result_dict[clf_name + 'precn' + 'n'].append(prec_n)
# semi-supervised
clf.fit(X_train_all, y.ravel())
y_pred = clf.predict_proba(X_test_all)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 's'].append(roc_score)
result_dict[clf_name + 'precn' + 's'].append(prec_n)
for eva in ['roc', 'precn']:
print()
for clf_name in clf_name_list:
print(np.round(np.mean(result_dict[clf_name + eva + 'o']), decimals=4),
eva, clf_name, 'old')
print(np.round(np.mean(result_dict[clf_name + eva + 'n']), decimals=4),
eva, clf_name, 'new')
print(np.round(np.mean(result_dict[clf_name + eva + 's']), decimals=4),
eva, clf_name, 'all')
| 12,621
| 35.479769
| 118
|
py
|
XGBOD
|
XGBOD-master/models/knn.py
|
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KDTree
from sklearn.exceptions import NotFittedError
from scipy.stats import scoreatpercentile
class Knn(object):
'''
Knn class for outlier detection
support original knn, average knn, and median knn
'''
def __init__(self, n_neighbors=1, contamination=0.05, method='largest'):
self.n_neighbors = n_neighbors
self.contamination = contamination
self.method = method
def fit(self, X_train):
self.X_train = X_train
self._isfitted = True
self.tree = KDTree(X_train)
neigh = NearestNeighbors()
neigh.fit(self.X_train)
result = neigh.kneighbors(n_neighbors=self.n_neighbors,
return_distance=True)
dist_arr = result[0]
if self.method == 'largest':
dist = dist_arr[:, -1]
elif self.method == 'mean':
dist = np.mean(dist_arr, axis=1)
elif self.method == 'median':
dist = np.median(dist_arr, axis=1)
threshold = scoreatpercentile(dist, 100 * (1 - self.contamination))
self.threshold = threshold
self.decision_scores = dist.ravel()
self.y_pred = (self.decision_scores > self.threshold).astype('int')
def decision_function(self, X_test):
if not self._isfitted:
NotFittedError('Knn is not fitted yet')
# initialize the output score
pred_score = np.zeros([X_test.shape[0], 1])
for i in range(X_test.shape[0]):
x_i = X_test[i, :]
x_i = np.asarray(x_i).reshape(1, x_i.shape[0])
# get the distance of the current point
dist_arr, ind_arr = self.tree.query(x_i, k=self.n_neighbors)
if self.method == 'largest':
dist = dist_arr[:, -1]
elif self.method == 'mean':
dist = np.mean(dist_arr, axis=1)
elif self.method == 'median':
dist = np.median(dist_arr, axis=1)
pred_score_i = dist[-1]
# record the current item
pred_score[i, :] = pred_score_i
return pred_score
def predict(self, X_test):
pred_score = self.decision_function(X_test)
return (pred_score > self.threshold).astype('int')
##############################################################################
# samples = [[-1, 0], [0., 0.], [1., 1], [2., 5.], [3, 1]]
#
# clf = Knn()
# clf.fit(samples)
#
# scores = clf.decision_function(np.asarray([[2, 3], [6, 8]])).ravel()
# assert (scores[0] == [2])
# assert (scores[1] == [5])
# #
# labels = clf.predict(np.asarray([[2, 3], [6, 8]])).ravel()
# assert (labels[0] == [0])
# assert (labels[1] == [1])
| 2,777
| 29.195652
| 78
|
py
|
XGBOD
|
XGBOD-master/models/hbos.py
|
import numpy as np
import math
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import scoreatpercentile
class Hbos(object):
def __init__(self, bins=10, alpha=0.3, beta=0.5, contamination=0.05):
self.bins = bins
self.alpha = alpha
self.beta = beta
self.contamination = contamination
def fit(self, X):
self.n, self.d = X.shape[0], X.shape[1]
out_scores = np.zeros([self.n, self.d])
hist = np.zeros([self.bins, self.d])
bin_edges = np.zeros([self.bins + 1, self.d])
# this is actually the fitting
for i in range(self.d):
hist[:, i], bin_edges[:, i] = np.histogram(X[:, i], bins=self.bins,
density=True)
# check the integrity
assert (
math.isclose(np.sum(hist[:, i] * np.diff(bin_edges[:, i])), 1))
# calculate the threshold
for i in range(self.d):
# find histogram assignments of data points
bin_ind = np.digitize(X[:, i], bin_edges[:, i], right=False)
# very important to do scaling. Not necessary to use min max
density_norm = MinMaxScaler().fit_transform(
hist[:, i].reshape(-1, 1))
out_score = np.log(1 / (density_norm + self.alpha))
for j in range(self.n):
# out sample left
if bin_ind[j] == 0:
dist = np.abs(X[j, i] - bin_edges[0, i])
bin_width = bin_edges[1, i] - bin_edges[0, i]
# assign it to bin 0
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j]]
else:
out_scores[j, i] = np.max(out_score)
# out sample right
elif bin_ind[j] == bin_edges.shape[0]:
dist = np.abs(X[j, i] - bin_edges[-1, i])
bin_width = bin_edges[-1, i] - bin_edges[-2, i]
# assign it to bin k
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j] - 2]
else:
out_scores[j, i] = np.max(out_score)
else:
out_scores[j, i] = out_score[bin_ind[j] - 1]
out_scores_sum = np.sum(out_scores, axis=1)
self.threshold = scoreatpercentile(out_scores_sum,
100 * (1 - self.contamination))
self.hist = hist
self.bin_edges = bin_edges
self.decision_scores = out_scores_sum
self.y_pred = (self.decision_scores > self.threshold).astype('int')
def decision_function(self, X_test):
n_test = X_test.shape[0]
out_scores = np.zeros([n_test, self.d])
for i in range(self.d):
# find histogram assignments of data points
bin_ind = np.digitize(X_test[:, i], self.bin_edges[:, i],
right=False)
# very important to do scaling. Not necessary to use minmax
density_norm = MinMaxScaler().fit_transform(
self.hist[:, i].reshape(-1, 1))
out_score = np.log(1 / (density_norm + self.alpha))
for j in range(n_test):
# out sample left
if bin_ind[j] == 0:
dist = np.abs(X_test[j, i] - self.bin_edges[0, i])
bin_width = self.bin_edges[1, i] - self.bin_edges[0, i]
# assign it to bin 0
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j]]
else:
out_scores[j, i] = np.max(out_score)
# out sample right
elif bin_ind[j] == self.bin_edges.shape[0]:
dist = np.abs(X_test[j, i] - self.bin_edges[-1, i])
bin_width = self.bin_edges[-1, i] - self.bin_edges[-2, i]
# assign it to bin k
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j] - 2]
else:
out_scores[j, i] = np.max(out_score)
else:
out_scores[j, i] = out_score[bin_ind[j] - 1]
out_scores_sum = np.sum(out_scores, axis=1)
return out_scores_sum
def predict(self, X_test):
pred_score = self.decision_function(X_test)
return (pred_score > self.threshold).astype('int')
| 4,636
| 38.632479
| 79
|
py
|
XGBOD
|
XGBOD-master/models/utility.py
|
import numpy as np
from scipy.stats import scoreatpercentile
from sklearn.metrics import precision_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
def get_precn(y, y_pred):
'''
Utlity function to calculate precision@n
:param y: ground truth
:param y_pred: number of outliers
:return: score
'''
# calculate the percentage of outliers
out_perc = np.count_nonzero(y) / len(y)
threshold = scoreatpercentile(y_pred, 100 * (1 - out_perc))
y_pred = (y_pred > threshold).astype('int')
return precision_score(y, y_pred)
def precision_n(y_pred, y, n):
'''
Utlity function to calculate precision@n
:param y_pred: predicted value
:param y: ground truth
:param n: number of outliers
:return: scaler score
'''
y_pred = np.asarray(y_pred)
y = np.asarray(y)
length = y.shape[0]
assert (y_pred.shape == y.shape)
y_sorted = np.partition(y_pred, int(length - n))
threshold = y_sorted[int(length - n)]
y_n = np.greater_equal(y_pred, threshold).astype(int)
# print(threshold, y_n, precision_score(y, y_n))
return precision_score(y, y_n)
def get_top_n(roc_list, n, top=True):
'''
for use of Accurate Selection only
:param roc_list: a li
:param n:
:param top:
:return:
'''
roc_list = np.asarray(roc_list)
length = roc_list.shape[0]
roc_sorted = np.partition(roc_list, length - n)
threshold = roc_sorted[int(length - n)]
if top:
return np.where(np.greater_equal(roc_list, threshold))
else:
return np.where(np.less(roc_list, threshold))
def print_baseline(X_train_new_orig, y, roc_list, prec_list):
max_value_idx = roc_list.index(max(roc_list))
print()
print('Highest TOS ROC:', roc_list[max_value_idx])
print('Highest TOS Precison@n', max(prec_list))
# normalized score
X_train_all_norm = StandardScaler().fit_transform(X_train_new_orig)
X_train_all_norm_mean = np.mean(X_train_all_norm, axis=1)
roc = np.round(roc_auc_score(y, X_train_all_norm_mean), decimals=4)
prec_n = np.round(get_precn(y, X_train_all_norm_mean), decimals=4)
print('Average TOS ROC:', roc)
print('Average TOS Precision@n', prec_n)
| 2,271
| 26.373494
| 71
|
py
|
XGBOD
|
XGBOD-master/models/generate_TOS.py
|
import numpy as np
import pandas as pd
from models.utility import get_precn
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
from PyNomaly import loop
from models.hbos import Hbos
def knn(X, n_neighbors):
'''
Utility function to return k-average, k-median, knn
Since these three functions are similar, so is inluded in the same func
:param X: train data
:param n_neighbors: number of neighbors
:return:
'''
neigh = NearestNeighbors()
neigh.fit(X)
res = neigh.kneighbors(n_neighbors=n_neighbors, return_distance=True)
# k-average, k-median, knn
return np.mean(res[0], axis=1), np.median(res[0], axis=1), res[0][:, -1]
def get_TOS_knn(X, y, k_list, feature_list):
knn_clf = ["knn_mean", "knn_median", "knn_kth"]
result_knn = np.zeros([X.shape[0], len(k_list) * len(knn_clf)])
roc_knn = []
prec_knn = []
for i in range(len(k_list)):
k = k_list[i]
k_mean, k_median, k_k = knn(X, n_neighbors=k)
knn_result = [k_mean, k_median, k_k]
for j in range(len(knn_result)):
score_pred = knn_result[j]
clf = knn_clf[j]
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('{clf} @ {k} - ROC: {roc} Precision@n: {pren}'.
format(clf=clf, k=k, roc=roc, pren=prec_n))
feature_list.append(clf + str(k))
roc_knn.append(roc)
prec_knn.append(prec_n)
result_knn[:, i * len(knn_result) + j] = score_pred
print()
return feature_list, roc_knn, prec_knn, result_knn
def get_TOS_loop(X, y, k_list, feature_list):
# only compatible with pandas
df_X = pd.DataFrame(X)
result_loop = np.zeros([X.shape[0], len(k_list)])
roc_loop = []
prec_loop = []
for i in range(len(k_list)):
k = k_list[i]
clf = loop.LocalOutlierProbability(df_X, n_neighbors=k).fit()
score_pred = clf.local_outlier_probabilities.astype(float)
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('LoOP @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('loop_' + str(k))
roc_loop.append(roc)
prec_loop.append(prec_n)
result_loop[:, i] = score_pred
print()
return feature_list, roc_loop, prec_loop, result_loop
def get_TOS_lof(X, y, k_list, feature_list):
result_lof = np.zeros([X.shape[0], len(k_list)])
roc_lof = []
prec_lof = []
for i in range(len(k_list)):
k = k_list[i]
clf = LocalOutlierFactor(n_neighbors=k)
y_pred = clf.fit_predict(X)
score_pred = clf.negative_outlier_factor_
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, score_pred * -1), decimals=4)
print('LOF @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('lof_' + str(k))
roc_lof.append(roc)
prec_lof.append(prec_n)
result_lof[:, i] = score_pred * -1
print()
return feature_list, roc_lof, prec_lof, result_lof
def get_TOS_hbos(X, y, k_list, feature_list):
result_hbos = np.zeros([X.shape[0], len(k_list)])
roc_hbos = []
prec_hbos = []
k_list = [3, 5, 7, 9, 12, 15, 20, 25, 30, 50]
for i in range(len(k_list)):
k = k_list[i]
clf = Hbos(bins=k, alpha=0.3)
clf.fit(X)
score_pred = clf.decision_scores
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('HBOS @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('hbos_' + str(k))
roc_hbos.append(roc)
prec_hbos.append(prec_n)
result_hbos[:, i] = score_pred
print()
return feature_list, roc_hbos, prec_hbos, result_hbos
def get_TOS_svm(X, y, nu_list, feature_list):
result_ocsvm = np.zeros([X.shape[0], len(nu_list)])
roc_ocsvm = []
prec_ocsvm = []
for i in range(len(nu_list)):
nu = nu_list[i]
clf = OneClassSVM(nu=nu)
clf.fit(X)
score_pred = clf.decision_function(X)
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(
get_precn(y, score_pred * -1), decimals=4)
print('svm @ {nu} - ROC: {roc} Precision@n: {pren}'.format(nu=nu,
roc=roc,
pren=prec_n))
feature_list.append('ocsvm_' + str(nu))
roc_ocsvm.append(roc)
prec_ocsvm.append(prec_n)
result_ocsvm[:, i] = score_pred.reshape(score_pred.shape[0]) * -1
print()
return feature_list, roc_ocsvm, prec_ocsvm, result_ocsvm
def get_TOS_iforest(X, y, n_list, feature_list):
result_if = np.zeros([X.shape[0], len(n_list)])
roc_if = []
prec_if = []
for i in range(len(n_list)):
n = n_list[i]
clf = IsolationForest(n_estimators=n)
clf.fit(X)
score_pred = clf.decision_function(X)
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, y_pred=(score_pred * -1)), decimals=4)
print('Isolation Forest @ {n} - ROC: {roc} Precision@n: {pren}'.format(
n=n,
roc=roc,
pren=prec_n))
feature_list.append('if_' + str(n))
roc_if.append(roc)
prec_if.append(prec_n)
result_if[:, i] = score_pred.reshape(score_pred.shape[0]) * -1
print()
return feature_list, roc_if, prec_if, result_if
| 6,799
| 34.416667
| 81
|
py
|
XGBOD
|
XGBOD-master/models/glosh.py
|
import hdbscan
import numpy as np
from sklearn.preprocessing import StandardScaler
from models.utility import get_precn
class Glosh(object):
def __init__(self, min_cluster_size=5):
self.min_cluster_size = min_cluster_size
def fit(self, X_train):
self.X_train = X_train
def sample_scores(self, X_test):
# initialize the outputs
pred_score = np.zeros([X_test.shape[0], 1])
for i in range(X_test.shape[0]):
x_i = X_test[i, :]
x_i = np.asarray(x_i).reshape(1, x_i.shape[0])
x_comb = np.concatenate((self.X_train, x_i), axis=0)
x_comb_norm = StandardScaler().fit_transform(x_comb)
clusterer = hdbscan.HDBSCAN()
clusterer.fit(x_comb_norm)
# print(clusterer.outlier_scores_[-1])
# record the current item
pred_score[i, :] = clusterer.outlier_scores_[-1]
return pred_score
def evaluate(self, X_test, y_test):
pred_score = self.sample_scores(X_test)
prec_n = (get_precn(y_test, pred_score))
print("precision@n", prec_n)
| 1,118
| 27.692308
| 64
|
py
|
XGBOD
|
XGBOD-master/models/__init__.py
| 0
| 0
| 0
|
py
|
|
XGBOD
|
XGBOD-master/models/select_TOS.py
|
import random
import numpy as np
from scipy.stats import pearsonr
from models.utility import get_top_n
def random_select(X, X_train_new_orig, roc_list, p):
s_feature_rand = random.sample(range(0, len(roc_list)), p)
X_train_new_rand = X_train_new_orig[:, s_feature_rand]
X_train_all_rand = np.concatenate((X, X_train_new_rand), axis=1)
# print(s_feature_rand)
return X_train_new_rand, X_train_all_rand
def accurate_select(X, X_train_new_orig, roc_list, p):
s_feature_accu = get_top_n(roc_list=roc_list, n=p, top=True)
X_train_new_accu = X_train_new_orig[:, s_feature_accu[0][0:p]]
X_train_all_accu = np.concatenate((X, X_train_new_accu), axis=1)
# print(s_feature_accu)
return X_train_new_accu, X_train_all_accu
def balance_select(X, X_train_new_orig, roc_list, p):
s_feature_balance = []
pearson_list = np.zeros([len(roc_list), 1])
# handle the first value
max_value_idx = roc_list.index(max(roc_list))
s_feature_balance.append(max_value_idx)
roc_list[max_value_idx] = -1
for i in range(p - 1):
for j in range(len(roc_list)):
pear = pearsonr(X_train_new_orig[:, max_value_idx],
X_train_new_orig[:, j])
# update the pearson
pearson_list[j] = np.abs(pearson_list[j]) + np.abs(pear[0])
discounted_roc = np.true_divide(roc_list, pearson_list.transpose())
max_value_idx = np.argmax(discounted_roc)
s_feature_balance.append(max_value_idx)
roc_list[max_value_idx] = -1
X_train_new_balance = X_train_new_orig[:, s_feature_balance]
X_train_all_balance = np.concatenate((X, X_train_new_balance), axis=1)
# print(s_feature_balance)
return X_train_new_balance, X_train_all_balance
| 1,777
| 29.655172
| 75
|
py
|
La-MAML
|
La-MAML-main/main.py
|
import importlib
import datetime
import argparse
import time
import os
import ipdb
from tqdm import tqdm
import torch
from torch.autograd import Variable
import parser as file_parser
from metrics.metrics import confusion_matrix
from utils import misc_utils
from main_multi_task import life_experience_iid, eval_iid_tasks
def eval_class_tasks(model, tasks, args):
model.eval()
result = []
for t, task_loader in enumerate(tasks):
rt = 0
for (i, (x, y)) in enumerate(task_loader):
if args.cuda:
x = x.cuda()
_, p = torch.max(model(x, t).data.cpu(), 1, keepdim=False)
rt += (p == y).float().sum()
result.append(rt / len(task_loader.dataset))
return result
def eval_tasks(model, tasks, args):
model.eval()
result = []
for i, task in enumerate(tasks):
t = i
x = task[1]
y = task[2]
rt = 0
eval_bs = x.size(0)
for b_from in range(0, x.size(0), eval_bs):
b_to = min(b_from + eval_bs, x.size(0) - 1)
if b_from == b_to:
xb = x[b_from].view(1, -1)
yb = torch.LongTensor([y[b_to]]).view(1, -1)
else:
xb = x[b_from:b_to]
yb = y[b_from:b_to]
if args.cuda:
xb = xb.cuda()
_, pb = torch.max(model(xb, t).data.cpu(), 1, keepdim=False)
rt += (pb == yb).float().sum()
result.append(rt / x.size(0))
return result
def life_experience(model, inc_loader, args):
result_val_a = []
result_test_a = []
result_val_t = []
result_test_t = []
time_start = time.time()
test_tasks = inc_loader.get_tasks("test")
val_tasks = inc_loader.get_tasks("val")
evaluator = eval_tasks
if args.loader == "class_incremental_loader":
evaluator = eval_class_tasks
for task_i in range(inc_loader.n_tasks):
task_info, train_loader, _, _ = inc_loader.new_task()
for ep in range(args.n_epochs):
model.real_epoch = ep
prog_bar = tqdm(train_loader)
for (i, (x, y)) in enumerate(prog_bar):
if((i % args.log_every) == 0):
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
v_x = x
v_y = y
if args.arch == 'linear':
v_x = x.view(x.size(0), -1)
if args.cuda:
v_x = v_x.cuda()
v_y = v_y.cuda()
model.train()
loss = model.observe(Variable(v_x), Variable(v_y), task_info["task"])
prog_bar.set_description(
"Task: {} | Epoch: {}/{} | Iter: {} | Loss: {} | Acc: Total: {} Current Task: {} ".format(
task_info["task"], ep+1, args.n_epochs, i%(1000*args.n_epochs), round(loss, 3),
round(sum(result_val_a[-1]).item()/len(result_val_a[-1]), 5), round(result_val_a[-1][task_info["task"]].item(), 5)
)
)
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
if args.calc_test_accuracy:
result_test_a.append(evaluator(model, test_tasks, args))
result_test_t.append(task_info["task"])
print("####Final Validation Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_val_a[-1])/len(result_val_a[-1]), result_val_a[-1]))
if args.calc_test_accuracy:
print("####Final Test Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_test_a[-1])/len(result_test_a[-1]), result_test_a[-1]))
time_end = time.time()
time_spent = time_end - time_start
return torch.Tensor(result_val_t), torch.Tensor(result_val_a), torch.Tensor(result_test_t), torch.Tensor(result_test_a), time_spent
def save_results(args, result_val_t, result_val_a, result_test_t, result_test_a, model, spent_time):
fname = os.path.join(args.log_dir, 'results')
# save confusion matrix and print one line of stats
val_stats = confusion_matrix(result_val_t, result_val_a, args.log_dir, 'results.txt')
one_liner = str(vars(args)) + ' # val: '
one_liner += ' '.join(["%.3f" % stat for stat in val_stats])
test_stats = 0
if args.calc_test_accuracy:
test_stats = confusion_matrix(result_test_t, result_test_a, args.log_dir, 'results.txt')
one_liner += ' # test: ' + ' '.join(["%.3f" % stat for stat in test_stats])
print(fname + ': ' + one_liner + ' # ' + str(spent_time))
# save all results in binary file
torch.save((result_val_t, result_val_a, model.state_dict(),
val_stats, one_liner, args), fname + '.pt')
return val_stats, test_stats
def main():
parser = file_parser.get_parser()
args = parser.parse_args()
# initialize seeds
misc_utils.init_seed(args.seed)
# set up loader
# 2 options: class_incremental and task_incremental
# experiments in the paper only use task_incremental
Loader = importlib.import_module('dataloaders.' + args.loader)
loader = Loader.IncrementalLoader(args, seed=args.seed)
n_inputs, n_outputs, n_tasks = loader.get_dataset_info()
# setup logging
timestamp = misc_utils.get_date_time()
args.log_dir, args.tf_dir = misc_utils.log_dir(args, timestamp)
# load model
Model = importlib.import_module('model.' + args.model)
model = Model.Net(n_inputs, n_outputs, n_tasks, args)
if args.cuda:
try:
model.net.cuda()
except:
pass
# run model on loader
if args.model == "iid2":
# oracle baseline with all task data shown at same time
result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience_iid(
model, loader, args)
else:
# for all the CL baselines
result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience(
model, loader, args)
# save results in files or print on terminal
save_results(args, result_val_t, result_val_a, result_test_t, result_test_a, model, spent_time)
if __name__ == "__main__":
main()
| 6,437
| 32.185567
| 154
|
py
|
La-MAML
|
La-MAML-main/parser.py
|
# coding=utf-8
import os
import argparse
def get_parser():
parser = argparse.ArgumentParser(description='Continual learning')
parser.add_argument('--expt_name', type=str, default='test_lamaml',
help='name of the experiment')
# model details
parser.add_argument('--model', type=str, default='single',
help='algo to train')
parser.add_argument('--arch', type=str, default='linear',
help='arch to use for training', choices = ['linear', 'pc_cnn'])
parser.add_argument('--n_hiddens', type=int, default=100,
help='number of hidden neurons at each layer')
parser.add_argument('--n_layers', type=int, default=2,
help='number of hidden layers')
parser.add_argument('--xav_init', default=False , action='store_true',
help='Use xavier initialization')
# optimizer parameters influencing all models
parser.add_argument("--glances", default=1, type=int,
help="Number of times the model is allowed to train over a set of samples in the single pass setting")
parser.add_argument('--n_epochs', type=int, default=1,
help='Number of epochs per task')
parser.add_argument('--batch_size', type=int, default=1,
help='the amount of items received by the algorithm at one time (set to 1 across all ' +
'experiments). Variable name is from GEM project.')
parser.add_argument('--replay_batch_size', type=float, default=20,
help='The batch size for experience replay.')
parser.add_argument('--memories', type=int, default=5120,
help='number of total memories stored in a reservoir sampling based buffer')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate (For baselines)')
# experiment parameters
parser.add_argument('--cuda', default=False , action='store_true',
help='Use GPU')
parser.add_argument('--seed', type=int, default=0,
help='random seed of model')
parser.add_argument('--log_every', type=int, default=1000,
help='frequency of checking the validation accuracy, in minibatches')
parser.add_argument('--log_dir', type=str, default='logs/',
help='the directory where the logs will be saved')
parser.add_argument('--tf_dir', type=str, default='',
help='(not set by user)')
parser.add_argument('--calc_test_accuracy', default=False , action='store_true',
help='Calculate test accuracy along with val accuracy')
# data parameters
parser.add_argument('--data_path', default='data/',
help='path where data is located')
parser.add_argument('--loader', type=str, default='task_incremental_loader',
help='data loader to use')
parser.add_argument('--samples_per_task', type=int, default=-1,
help='training samples per task (all if negative)')
parser.add_argument('--shuffle_tasks', default=False, action='store_true',
help='present tasks in order')
parser.add_argument('--classes_per_it', type=int, default=4,
help='number of classes in every batch')
parser.add_argument('--iterations', type=int, default=5000,
help='number of classes in every batch')
parser.add_argument("--dataset", default="mnist_rotations", type=str,
help="Dataset to train and test on.")
parser.add_argument("--workers", default=3, type=int,
help="Number of workers preprocessing the data.")
parser.add_argument("--validation", default=0., type=float,
help="Validation split (0. <= x <= 1.).")
parser.add_argument("-order", "--class_order", default="old", type=str,
help="define classes order of increment ",
choices = ["random", "chrono", "old", "super"])
parser.add_argument("-inc", "--increment", default=5, type=int,
help="number of classes to increment by in class incremental loader")
parser.add_argument('--test_batch_size', type=int, default=100000 ,
help='batch size to use during testing.')
# La-MAML parameters
parser.add_argument('--opt_lr', type=float, default=1e-1,
help='learning rate for LRs')
parser.add_argument('--opt_wt', type=float, default=1e-1,
help='learning rate for weights')
parser.add_argument('--alpha_init', type=float, default=1e-3,
help='initialization for the LRs')
parser.add_argument('--learn_lr', default=False, action='store_true',
help='model should update the LRs during learning')
parser.add_argument('--sync_update', default=False , action='store_true',
help='the LRs and weights should be updated synchronously')
parser.add_argument('--grad_clip_norm', type=float, default=2.0,
help='Clip the gradients by this value')
parser.add_argument("--cifar_batches", default=3, type=int,
help="Number of batches in inner trajectory")
parser.add_argument('--use_old_task_memory', default=False, action='store_true',
help='Use only old task samples for replay buffer data')
parser.add_argument('--second_order', default=False , action='store_true',
help='use second order MAML updates')
# memory parameters for GEM | AGEM | ICARL
parser.add_argument('--n_memories', type=int, default=0,
help='number of memories per task')
parser.add_argument('--memory_strength', default=0, type=float,
help='memory strength (meaning depends on memory)')
parser.add_argument('--steps_per_sample', default=1, type=int,
help='training steps per batch')
# parameters specific to MER
parser.add_argument('--gamma', type=float, default=1.0,
help='gamma learning rate parameter')
parser.add_argument('--beta', type=float, default=1.0,
help='beta learning rate parameter')
parser.add_argument('--s', type=float, default=1,
help='current example learning rate multiplier (s)')
parser.add_argument('--batches_per_example', type=float, default=1,
help='the number of batch per incoming example')
# parameters specific to Meta-BGD
parser.add_argument('--bgd_optimizer', type=str, default="bgd", choices=["adam", "adagrad", "bgd", "sgd"],
help='Optimizer.')
parser.add_argument('--optimizer_params', default="{}", type=str, nargs='*',
help='Optimizer parameters')
parser.add_argument('--train_mc_iters', default=5, type=int,
help='Number of MonteCarlo samples during training(default 10)')
parser.add_argument('--std_init', default=5e-2, type=float,
help='STD init value (default 5e-2)')
parser.add_argument('--mean_eta', default=1, type=float,
help='Eta for mean step (default 1)')
parser.add_argument('--fisher_gamma', default=0.95, type=float,
help='')
return parser
| 7,611
| 53.76259
| 127
|
py
|
La-MAML
|
La-MAML-main/main_multi_task.py
|
import time
import os
from tqdm import tqdm
import torch
from torch.autograd import Variable
def eval_iid_tasks(model, tasks, args):
model.eval()
result = []
for t, task_loader in enumerate(tasks):
rt = 0
for (i, (x, y, super_y)) in enumerate(task_loader):
if args.cuda:
x = x.cuda()
_, p = torch.max(model(x, super_y).data.cpu(), 1, keepdim=False)
rt += (p == y).float().sum()
result.append(rt / len(task_loader.dataset))
return result
def life_experience_iid(model, inc_loader, args):
result_val_a = []
result_test_a = []
result_val_t = []
result_test_t = []
time_start = time.time()
test_tasks = inc_loader.get_tasks("test")
val_tasks = inc_loader.get_tasks("val")
task_info, train_loader, _, _ = inc_loader.new_task()
evaluator = eval_iid_tasks
for ep in range(args.n_epochs):
model.real_epoch = ep
prog_bar = tqdm(train_loader)
for (i, (x, y, super_y)) in enumerate(prog_bar):
if((i % args.log_every) == 0):
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
v_x = x
v_y = y
if args.arch == 'linear':
v_x = x.view(x.size(0), -1)
super_v_y = super_y
if args.cuda:
v_x = v_x.cuda()
v_y = v_y.cuda()
super_v_y = super_v_y.cuda()
model.train()
loss = model.observe(Variable(v_x), Variable(v_y), Variable(super_v_y))
prog_bar.set_description(
"Epoch: {}/{} | Iter: {} | Loss: {} | Acc: Total: {}".format(
ep+1, args.n_epochs, i%(1000*args.n_epochs), round(loss, 3),
round(sum(result_val_a[-1]).item()/len(result_val_a[-1]), 5)
)
)
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
if args.calc_test_accuracy:
result_test_a.append(evaluator(model, test_tasks, args))
result_test_t.append(task_info["task"])
print("####Final Validation Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_val_a[-1])/len(result_val_a[-1]), result_val_a[-1]))
if args.calc_test_accuracy:
print("####Final Test Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_test_a[-1])/len(result_test_a[-1]), result_test_a[-1]))
time_end = time.time()
time_spent = time_end - time_start
return torch.Tensor(result_val_t), torch.Tensor(result_val_a), torch.Tensor(result_test_t), torch.Tensor(result_test_a), time_spent
| 2,818
| 30.674157
| 154
|
py
|
La-MAML
|
La-MAML-main/download.py
|
########################################################################
#
# Functions for downloading and extracting data-files from the internet.
#
# Implemented in Python 3.5
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2016 by Magnus Erik Hvass Pedersen
#
########################################################################
import sys
import os
import urllib.request
import tarfile
import zipfile
########################################################################
def _print_download_progress(count, block_size, total_size):
"""
Function used for printing the download progress.
Used as a call-back function in maybe_download_and_extract().
"""
# Percentage completion.
pct_complete = float(count * block_size) / total_size
# Limit it because rounding errors may cause it to exceed 100%.
pct_complete = min(1.0, pct_complete)
# Status-message. Note the \r which means the line should overwrite itself.
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
# Print it.
sys.stdout.write(msg)
sys.stdout.flush()
########################################################################
def download(base_url, filename, download_dir):
"""
Download the given file if it does not already exist in the download_dir.
:param base_url: The internet URL without the filename.
:param filename: The filename that will be added to the base_url.
:param download_dir: Local directory for storing the file.
:return: Nothing.
"""
# Path for local file.
save_path = os.path.join(download_dir, filename)
# Check if the file already exists, otherwise we need to download it now.
if not os.path.exists(save_path):
# Check if the download directory exists, otherwise create it.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
print("Downloading", filename, "...")
# Download the file from the internet.
url = base_url + filename
file_path, _ = urllib.request.urlretrieve(url=url,
filename=save_path,
reporthook=_print_download_progress)
print(" Done!")
def maybe_download_and_extract(url, download_dir):
"""
Download and extract the data if it doesn't already exist.
Assumes the url is a tar-ball file.
:param url:
Internet URL for the tar-file to download.
Example: "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
:param download_dir:
Directory where the downloaded file is saved.
Example: "data/CIFAR-10/"
:return:
Nothing.
"""
# Filename for saving the file downloaded from the internet.
# Use the filename from the URL and add it to the download_dir.
filename = url.split('/')[-1]
file_path = os.path.join(download_dir, filename)
# Check if the file already exists.
# If it exists then we assume it has also been extracted,
# otherwise we need to download and extract it now.
if not os.path.exists(file_path):
# Check if the download directory exists, otherwise create it.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
# Download the file from the internet.
file_path, _ = urllib.request.urlretrieve(url=url,
filename=file_path,
reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
# Unpack the zip-file.
zipfile.ZipFile(file=file_path, mode="r").extractall(download_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
# Unpack the tar-ball.
tarfile.open(name=file_path, mode="r:gz").extractall(download_dir)
print("Done.")
else:
print("Data has apparently already been downloaded and unpacked.")
########################################################################
| 4,353
| 32.236641
| 86
|
py
|
La-MAML
|
La-MAML-main/val_data_format.py
|
import io
import glob
import os
from shutil import move
from os.path import join
from os import listdir, rmdir
target_folder = './tiny-imagenet-200/val/'
val_dict = {}
with open('./tiny-imagenet-200/val/val_annotations.txt', 'r') as f:
for line in f.readlines():
split_line = line.split('\t')
val_dict[split_line[0]] = split_line[1]
paths = glob.glob('./tiny-imagenet-200/val/images/*')
for path in paths:
file = path.split('/')[-1]
folder = val_dict[file]
if not os.path.exists(target_folder + str(folder)):
os.mkdir(target_folder + str(folder))
os.mkdir(target_folder + str(folder) + '/images')
for path in paths:
file = path.split('/')[-1]
folder = val_dict[file]
dest = target_folder + str(folder) + '/images/' + str(file)
move(path, dest)
rmdir('./tiny-imagenet-200/val/images')
| 882
| 26.59375
| 67
|
py
|
La-MAML
|
La-MAML-main/get_data.py
|
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import download
import argparse
def get_mnist_data(url, data_dir):
print("Downloading {} into {}".format(url, data_dir))
download.maybe_download_and_extract(url, data_dir)
def get_datasets():
parser = argparse.ArgumentParser()
parser.add_argument("dataset", help="Either the name of the dataset (rotations, permutations, manypermutations), or `all` to download all datasets")
args = parser.parse_args()
# Change dir to the location of this file (repo's root)
get_data_path = os.path.realpath(__file__)
os.chdir(os.path.dirname(get_data_path))
data_dir = os.path.join(os.getcwd(), 'data')
# get files
mnist_rotations = "https://nlp.stanford.edu/data/mer/mnist_rotations.tar.gz"
mnist_permutations = "https://nlp.stanford.edu/data/mer/mnist_permutations.tar.gz"
mnist_many = "https://nlp.stanford.edu/data/mer/mnist_manypermutations.tar.gz"
all = {"rotations": mnist_rotations, "permutations": mnist_permutations, "manypermutations": mnist_many}
if args.dataset == "all":
for dataset in all.values():
get_mnist_data(dataset, data_dir)
else:
get_mnist_data(all[args.dataset], data_dir)
if __name__ == "__main__":
get_datasets()
| 1,423
| 34.6
| 152
|
py
|
La-MAML
|
La-MAML-main/metrics/metrics.py
|
### We directly copied the metrics.py model file from the GEM project https://github.com/facebookresearch/GradientEpisodicMemory
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import ipdb
import os
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import torch
def task_changes(result_t):
n_tasks = int(result_t.max() + 1)
changes = []
current = result_t[0]
for i, t in enumerate(result_t):
if t != current:
changes.append(i)
current = t
return n_tasks, changes
def confusion_matrix(result_t, result_a, log_dir, fname=None):
nt, changes = task_changes(result_t)
fname = os.path.join(log_dir, fname)
baseline = result_a[0]
changes = torch.LongTensor(changes + [result_a.size(0)]) - 1
result = result_a[(torch.LongTensor(changes))]
# acc[t] equals result[t,t]
acc = result.diag()
fin = result[nt - 1]
# bwt[t] equals result[T,t] - acc[t]
bwt = result[nt - 1] - acc
# fwt[t] equals result[t-1,t] - baseline[t]
fwt = torch.zeros(nt)
for t in range(1, nt):
fwt[t] = result[t - 1, t] - baseline[t]
if fname is not None:
f = open(fname, 'w')
print(' '.join(['%.4f' % r for r in baseline]), file=f)
print('|', file=f)
for row in range(result.size(0)):
print(' '.join(['%.4f' % r for r in result[row]]), file=f)
print('', file=f)
print('Diagonal Accuracy: %.4f' % acc.mean(), file=f)
print('Final Accuracy: %.4f' % fin.mean(), file=f)
print('Backward: %.4f' % bwt.mean(), file=f)
print('Forward: %.4f' % fwt.mean(), file=f)
f.close()
colors = cm.nipy_spectral(np.linspace(0, 1, len(result)))
figure = plt.figure(figsize=(8, 8))
ax = plt.gca()
data = np.array(result_a)
for i in range(len(data[0])):
plt.plot(range(data.shape[0]), data[:,i], label=str(i), color=colors[i], linewidth=2)
plt.savefig(log_dir + '/' + 'task_wise_accuracy.png')
stats = []
stats.append(acc.mean())
stats.append(fin.mean())
stats.append(bwt.mean())
stats.append(fwt.mean())
return stats
| 2,348
| 28
| 128
|
py
|
La-MAML
|
La-MAML-main/dataloaders/idataset.py
|
import numpy as np
from PIL import Image
import torch
from torchvision import datasets, transforms
import os
from dataloaders import cifar_info
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, x, y, trsf, pretrsf = None, imgnet_like = False, super_y = None):
self.x, self.y = x, y
self.super_y = super_y
# transforms to be applied before and after conversion to imgarray
self.trsf = trsf
self.pretrsf = pretrsf
# if not from imgnet, needs to be converted to imgarray first
self.imgnet_like = imgnet_like
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
if self.super_y is not None:
super_y = self.super_y[idx]
if(self.pretrsf is not None):
x = self.pretrsf(x)
if(not self.imgnet_like):
x = Image.fromarray(x)
x = self.trsf(x)
if self.super_y is not None:
return x, y, super_y
else:
return x, y
class DummyArrayDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x, self.y = x, y
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
return x, y
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif dataset_name == "tinyimagenet":
return iImgnet
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
class DataHandler:
base_dataset = None
train_transforms = []
common_transforms = [transforms.ToTensor()]
class_order = None
class iImgnet(DataHandler):
base_dataset = datasets.ImageFolder
top_transforms = [
lambda x: Image.open(x[0]).convert('RGB'),
]
train_transforms = [
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip() #,
#transforms.ColorJitter(brightness=63 / 255)
]
common_transforms = [
transforms.Resize((64, 64)),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]
class_order = [
i for i in range(200)
]
class iCIFAR10(DataHandler):
base_dataset = datasets.cifar.CIFAR10
base_dataset_hierarchy = cifar_info.CIFAR10
top_transforms = [
]
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255)
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]
class iCIFAR100(iCIFAR10):
base_dataset = datasets.cifar.CIFAR100
base_dataset_hierarchy = cifar_info.CIFAR100
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
# update: class order can now be chosen randomly since it just depends on seed
class_order = [
87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18,
24, 32, 45, 88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59,
25, 20, 80, 73, 1, 28, 6, 46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21,
60, 19, 70, 90, 89, 43, 5, 42, 65, 76, 40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7,
34, 55, 54, 26, 35, 39
] ## some random class order
class_order_super = [4, 95, 55, 30, 72, 73, 1, 67, 32, 91, 62, 92, 70, 54, 82, 10, 61, 28, 9, 16, 53,
83, 51, 0, 57, 87, 86, 40, 39, 22, 25, 5, 94, 84, 20, 18, 6, 7, 14, 24, 88, 97,
3, 43, 42, 17, 37, 12, 68, 76, 71, 60, 33, 23, 49, 38, 21, 15, 31, 19, 75, 66, 34,
63, 64, 45, 99, 26, 77, 79, 46, 98, 11, 2, 35, 93, 78, 44, 29, 27, 80, 65, 74, 50,
36, 52, 96, 56, 47, 59, 90, 58, 48, 13, 8, 69, 81, 41, 89, 85
] ## parent-wise split
| 4,465
| 29.8
| 105
|
py
|
La-MAML
|
La-MAML-main/dataloaders/cifar_info.py
|
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import check_integrity, download_and_extract_archive
import ipdb
# defining the mapping from parent classes to fine-grained classes in cifar
# in case one needs to split tasks by parent class
super_class_to_class = {
'aquatic_mammals' : ['beaver', 'dolphin', 'otter', 'seal', 'whale'],
'fish' : ['aquarium_fish', 'flatfish', 'ray', 'shark', 'trout'],
'flowers' : ['orchid', 'poppy', 'rose', 'sunflower', 'tulip'],
'food_containers' : ['bottle', 'bowl', 'can', 'cup', 'plate'],
'fruit_and_vegetables' : ['apple', 'mushroom', 'orange', 'pear', 'sweet_pepper'],
'household_electrical_devices' : ['clock', 'keyboard', 'lamp', 'telephone', 'television'],
'household_furniture' : ['bed', 'chair', 'couch', 'table', 'wardrobe'],
'insects' : ['bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach'],
'large_carnivores' : ['bear', 'leopard', 'lion', 'tiger', 'wolf'],
'large_man-made_outdoor_things' : ['bridge', 'castle', 'house', 'road', 'skyscraper'],
'large_natural_outdoor_scenes' : ['cloud', 'forest', 'mountain', 'plain', 'sea'],
'large_omnivores_and_herbivores' : ['camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo'],
'medium_mammals' : ['fox', 'porcupine', 'possum', 'raccoon', 'skunk'],
'non-insect_invertebrates' : ['crab', 'lobster', 'snail', 'spider', 'worm'],
'people' : ['baby', 'boy', 'girl', 'man', 'woman'],
'reptiles' : ['crocodile', 'dinosaur', 'lizard', 'snake', 'turtle'],
'small_mammals' : ['hamster', 'mouse', 'rabbit', 'shrew', 'squirrel'],
'trees': ['maple_tree', 'oak_tree', 'palm_tree', 'pine_tree', 'willow_tree'],
'vehicles_1' : ['bicycle', 'bus', 'motorcycle', 'pickup_truck', 'train'],
'vehicles_2' : ['lawn_mower', 'rocket', 'streetcar', 'tank', 'tractor']
}
class CIFAR10(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(CIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.high_level_supervise = True
self.data = []
self.targets = []
self.super_targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.super_targets.extend(entry['coarse_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.super_classes = data[self.meta['coarse_key']]
self.get_class_ids()
def get_class_ids(self):
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
self.super_class_to_idx = {_class: i for i, _class in enumerate(self.super_classes)}
high_ids = []
low_ids = []
low_idxs = np.arange(len(self.classes))
for key in super_class_to_class:
for classes in super_class_to_class[key]:
high_ids.append(self.super_class_to_idx[key])
low_ids.append(self.class_to_idx[classes])
high_ids_np = np.array(high_ids)
low_ids_np = np.array(low_ids)
self.low_high_map = np.stack([low_ids_np, high_ids_np], axis = 1)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, super_target = self.data[index], self.targets[index], self.super_targets[index]
if(self.high_level_supervise):
target = super_target
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, super_target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'coarse_key': 'coarse_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 8,912
| 36.607595
| 100
|
py
|
La-MAML
|
La-MAML-main/dataloaders/task_sampler.py
|
# coding=utf-8
import numpy as np
import torch
import warnings
import ipdb
class MultiTaskSampler(object):
'''
MultiTaskSampler: yield a batch of indexes at each iteration.
Indexes are calculated by keeping in account 'classes_per_it' and 'num_samples',
In fact at every iteration the batch indexes will refer to 'num_support' + 'num_query' samples
for 'classes_per_it' random classes.
__len__ returns the number of episodes per epoch (same as 'self.iterations').
'''
def __init__(self, labels, classes_per_it, num_samples, iterations):
'''
Initialize the MultiTaskSampler object
Args:
- labels: an iterable containing all the labels for the current dataset
samples indexes will be infered from this iterable.
- classes_per_it: number of random classes for each iteration
- num_samples: number of samples for each iteration for each class (support + query)
- iterations: number of iterations (episodes) per epoch
'''
super(MultiTaskSampler, self).__init__()
self.labels = labels
self.classes_per_it = classes_per_it
self.sample_per_class = num_samples
self.iterations = iterations
self.classes, self.counts = np.unique(self.labels, return_counts=True)
if self.classes_per_it > len(self.classes):
warnings.warn('Number of classes per iteration is higher than the number of unique labels')
self.classes_per_it = len(self.classes)
self.classes = torch.LongTensor(self.classes)
# create a matrix, indexes, of dim: classes X max(elements per class)
# fill it with nans
# for every class c, fill the relative row with the indices samples belonging to c
# in numel_per_class we store the number of samples for each class/row
self.idxs = range(len(self.labels))
self.indexes = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan
self.indexes = torch.Tensor(self.indexes)
self.numel_per_class = torch.zeros_like(self.classes)
for idx, label in enumerate(self.labels):
label_idx = np.argwhere(self.classes == label).item()
self.indexes[label_idx, np.where(np.isnan(self.indexes[label_idx]))[0][0]] = idx
self.numel_per_class[label_idx] += 1
def __iter__(self):
'''
yield a batch of indexes
'''
spc = self.sample_per_class
cpi = self.classes_per_it
for it in range(self.iterations):
batch_size = spc * cpi
batch = torch.LongTensor(batch_size)
c_idxs = torch.randperm(len(self.classes))[:cpi]
for i, c in enumerate(self.classes[c_idxs]):
s = slice(i * spc, (i + 1) * spc)
# FIXME when torch.argwhere will exist
label_idx = torch.arange(len(self.classes)).long()[self.classes == c].item()
sample_idxs = torch.randperm(self.numel_per_class[label_idx])[:spc]
batch[s] = self.indexes[label_idx][sample_idxs]
batch = batch[torch.randperm(len(batch))]
yield batch
def __len__(self):
'''
returns the number of iterations (episodes) per epoch
'''
return self.iterations
| 3,363
| 39.047619
| 103
|
py
|
La-MAML
|
La-MAML-main/dataloaders/class_incremental_loader.py
|
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets, transforms
from dataloaders.idataset import _get_datasets, DummyDataset
import random
import ipdb
# --------
# Datasets CIFAR and TINYIMAGENET
# --------
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
dataset_name=opt.dataset
validation_split=opt.validation
self.increment=opt.increment
datasets = _get_datasets(dataset_name)
self._setup_data(
datasets,
class_order_type=opt.class_order,
seed=seed,
increment=self.increment,
validation_split=validation_split
)
self.validation_split = validation_split
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
self.top_transforms = datasets[0].top_transforms
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks(validation_split)
@property
def n_tasks(self):
return len(self.increments)
def new_task(self, memory=None):
if self._current_task >= len(self.increments):
raise Exception("No more tasks.")
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
x_train, y_train = self._select(
self.data_train, self.targets_train, low_range=min_class, high_range=max_class
)
x_val, y_val = self._select(
self.data_val, self.targets_val, low_range=min_class, high_range=max_class
)
x_test, y_test = self._select(self.data_test, self.targets_test, high_range=max_class)
if memory is not None:
data_memory, targets_memory = memory
print("Set memory of size: {}.".format(data_memory.shape[0]))
x_train = np.concatenate((x_train, data_memory))
y_train = np.concatenate((y_train, targets_memory))
train_loader = self._get_loader(x_train, y_train, mode="train")
val_loader = self._get_loader(x_val, y_val, mode="train") if len(x_val) > 0 else None
test_loader = self._get_loader(x_test, y_test, mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": x_train.shape[0],
"n_test_data": x_test.shape[0]
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _setup_test_tasks(self, validation_split):
self.test_tasks = []
self.val_tasks = []
for i in range(len(self.increments)):
min_class = sum(self.increments[:i])
max_class = sum(self.increments[:i + 1])
x_test, y_test = self._select(self.data_test, self.targets_test, low_range=min_class, high_range=max_class)
self.test_tasks.append(self._get_loader(x_test, y_test, mode="test"))
if validation_split > 0.0:
x_val, y_val = self._select(self.data_val, self.targets_val, low_range=min_class, high_range=max_class)
self.val_tasks.append(self._get_loader(x_val, y_val, mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'val':
if self.validation_split > 0.0:
return self.val_tasks
else:
return self.test_tasks
elif dataset_type == 'test':
return self.test_tasks
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
if(self._opt.dataset == 'tinyimagenet'):
n_inputs = 3*64*64
else:
n_inputs = self.data_train.shape[3]*self.data_train.shape[1]*self.data_train.shape[2]
n_outputs = self._opt.increment * len(self.increments)
n_task = len(self.increments)
return n_inputs, n_outputs, n_task
def _select(self, x, y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(y >= low_range, y < high_range))[0]
return x[idxes], y[idxes]
def _get_loader(self, x, y, shuffle=True, mode="train"):
if mode == "train":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])
batch_size = self._batch_size
elif mode == "test":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose(self.common_transforms)
batch_size = self._test_batch_size
elif mode == "flip":
trsf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=1.), *self.common_transforms]
)
batch_size = self._test_batch_size
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet'),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
def _setup_data(self, datasets, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
self.data_train, self.targets_train = [], []
self.data_test, self.targets_test = [], []
self.data_val, self.targets_val = [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self._opt.dataset == 'tinyimagenet'):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path + 'train/')
test_dataset = dataset.base_dataset(root_path + 'val/')
train_dataset.data = train_dataset.samples
test_dataset.data = test_dataset.samples
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
else:
print("Classes are presented in a chronological order")
else:
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path, train=True, download=True)
test_dataset = dataset.base_dataset(root_path, train=False, download=True)
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
elif class_order_type == 'super' and dataset.class_order_super is not None:
order = dataset.class_order_super
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
@staticmethod
def _split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
x = x[shuffled_indexes]
y = y[shuffled_indexes]
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
@staticmethod
def _list_split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
c = list(zip(x, y))
random.shuffle(c)
x, y = zip(*c)
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val_i = [x[i] for i in val_indexes]
y_val_i = [y[i] for i in val_indexes]
x_train_i = [x[i] for i in train_indexes]
y_train_i = [y[i] for i in train_indexes]
x_val.append(x_val_i)
y_val.append(y_val_i)
x_train.append(x_train_i)
y_train.append(y_train_i)
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
def get_idx_data(self, idx, batch_size, mode="test", data_source="train"):
"""Returns a custom loader with specific idxs only.
:param idx: A list of data indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if data_source == "train":
x, y = self.data_train, self.targets_train
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
y, sorted_idx = y.sort()
sampler = torch.utils.data.sampler.SubsetRandomSampler(idx)
trsf = transforms.Compose(self.common_transforms)
loader = DataLoader(
DummyDataset(x[sorted_idx], y, trsf),
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=self._workers)
def get_custom_loader(self, class_indexes, mode="test", data_source="train"):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y = self.data_train, self.targets_train
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets = [], []
for class_index in class_indexes:
class_data, class_targets = self._select(
x, y, low_range=class_index, high_range=class_index + 1
)
data.append(class_data)
targets.append(class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
return data, self._get_loader(data, targets, shuffle=False, mode=mode)
| 14,676
| 38.138667
| 119
|
py
|
La-MAML
|
La-MAML-main/dataloaders/multi_task_loader.py
|
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets, transforms
from dataloaders.idataset import _get_datasets, DummyDataset
from dataloaders.task_sampler import MultiTaskSampler
import random
import ipdb
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
dataset_name=opt.dataset
validation_split=opt.validation
self.increment=opt.increment
datasets = _get_datasets(dataset_name)
self._setup_data(
datasets,
class_order_type=opt.class_order,
seed=seed,
increment=self.increment,
validation_split=validation_split
)
self.validation_split = validation_split
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
self.top_transforms = datasets[0].top_transforms
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks(validation_split)
@property
def n_tasks(self):
return len(self.increments)
def new_task(self):
min_class = 0
max_class = max(self.targets_train) + 1
x_train, y_train, super_y_train = self._select(
self.data_train, self.targets_train, self.super_targets_train, low_range=min_class, high_range=max_class
)
x_val, y_val, super_y_val = self._select(
self.data_val, self.targets_val, self.super_targets_val, low_range=min_class, high_range=max_class
)
x_test, y_test, super_y_test = self._select(self.data_test, self.targets_test, self.super_targets_test, high_range=max_class)
train_loader = self._get_loader(x_train, y_train, super_y_train, mode="train")
val_loader = self._get_loader(x_val, y_val, super_y_val, mode="train") if len(x_val) > 0 else None
test_loader = self._get_loader(x_test, y_test, super_y_test, mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": x_train.shape[0],
"n_test_data": x_test.shape[0]
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _setup_test_tasks(self, validation_split):
self.test_tasks = []
self.val_tasks = []
for i in range(len(self.increments)):
min_class = i
max_class = i+1
x_test, y_test, super_y_test = self._select_super(self.data_test, self.targets_test, self.super_targets_test, low_range=min_class, high_range=max_class)
self.test_tasks.append(self._get_loader(x_test, y_test, super_y_test, mode="test"))
if validation_split > 0.0:
x_val, y_val, super_y_val = self._select_super(self.data_val, self.targets_val, self.super_targets_val, low_range=min_class, high_range=max_class)
self.val_tasks.append(self._get_loader(x_val, y_val, super_y_val, mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'val':
if self.validation_split > 0.0:
return self.val_tasks
else:
return self.test_tasks
elif dataset_type == 'test':
return self.test_tasks
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
if(self._opt.dataset == 'tinyimagenet'):
n_inputs = 3*64*64
else:
n_inputs = self.data_train.shape[3]*self.data_train.shape[1]*self.data_train.shape[2]
n_outputs = self._opt.increment * len(self.increments)
n_task = len(self.increments)
return n_inputs, n_outputs, n_task
def _select(self, x, y, super_y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(y >= low_range, y < high_range))[0]
return x[idxes], y[idxes], super_y[idxes]
def _select_super(self, x, y, super_y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(super_y >= low_range, super_y < high_range))[0]
return x[idxes], y[idxes], super_y[idxes]
def _get_loader(self, x, y, super_y, shuffle=True, mode="train"):
if mode == "train":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])
batch_size = self._batch_size
sampler = self._get_sampler(super_y, mode)
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet', super_y),
batch_sampler=sampler,
shuffle=False,
num_workers=self._workers
)
elif mode == "test" or mode == "flip":
if mode == "test":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose(self.common_transforms)
batch_size = self._test_batch_size
elif mode == "flip":
trsf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=1.), *self.common_transforms]
)
batch_size = self._test_batch_size
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet', super_y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
def _get_sampler(self, labels, mode):
assert self._batch_size%self._opt.classes_per_it == 0, \
"Batch size should be a multiple of number of desired classes in a iter"
if 'train' in mode:
classes_per_it = self._opt.classes_per_it
num_samples = int(self._batch_size/self._opt.classes_per_it)
elif 'val' in mode:
classes_per_it = self._opt.classes_per_it
num_samples = int(self._batch_size/self._opt.classes_per_it)
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return MultiTaskSampler(labels=labels,
classes_per_it=classes_per_it,
num_samples=num_samples,
iterations=self._opt.iterations)
def _setup_data(self, datasets, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
self.data_train, self.targets_train, self.super_targets_train = [], [], []
self.data_test, self.targets_test, self.super_targets_test = [], [], []
self.data_val, self.targets_val, self.super_targets_val = [], [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self._opt.dataset == 'tinyimagenet'):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path + 'train/')
test_dataset = dataset.base_dataset(root_path + 'val/')
train_dataset.data = train_dataset.samples
test_dataset.data = test_dataset.samples
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
super_y_train = self._make_super_classes(y_train, self.increment)
super_y_test = self._make_super_classes(y_test, self.increment)
super_y_val = self._make_super_classes(y_val, self.increment)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
elif ((self._opt.dataset == 'cifar100') and (self._opt.model=="iid2")):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path, train=True, download=True)
test_dataset = dataset.base_dataset(root_path, train=False, download=True)
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
elif class_order_type == 'super' and dataset.class_order_super is not None:
order = dataset.class_order_super
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
super_y_train = self._make_super_classes(y_train, self.increment)
super_y_test = self._make_super_classes(y_test, self.increment)
super_y_val = self._make_super_classes(y_val, self.increment)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
else:
root_path = self._opt.data_path
train_dataset = dataset.base_dataset_hierarchy(root_path, train=True, download=True)
test_dataset = dataset.base_dataset_hierarchy(root_path, train=False, download=True)
x_train, y_train, super_y_train = train_dataset.data, np.array(train_dataset.targets), np.array(train_dataset.super_targets)
x_val, y_val, super_y_val, x_train, y_train, super_y_train = self._split_per_class(
x_train, y_train, super_y_train, validation_split
)
x_test, y_test, super_y_test = test_dataset.data, np.array(test_dataset.targets), np.array(test_dataset.super_targets)
idxs = np.argsort(super_y_test)
x_test = x_test[idxs]
y_test = y_test[idxs]
super_y_test = super_y_test[idxs]
idxs = np.argsort(super_y_train)
x_train = x_train[idxs]
y_train = y_train[idxs]
super_y_train = super_y_train[idxs]
idxs = np.argsort(super_y_val)
x_val = x_val[idxs]
y_val = y_val[idxs]
super_y_val = super_y_val[idxs]
idxs = np.unique(y_test, return_index=True)[1]
unique_y_order = [y_test[id] for id in sorted(idxs)]
unique_supery_order = [super_y_test[id] for id in sorted(idxs)]
print(unique_supery_order)
print(unique_y_order)
y_train = self._map_new_class_index(y_train, unique_y_order)
y_val = self._map_new_class_index(y_val, unique_y_order)
y_test = self._map_new_class_index(y_test, unique_y_order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
# current_class_idx += len(order)
# if len(datasets) > 1:
# raise(Exception("current_class_idx doesnt work for more than one dataset right now, correct it"))
# self.increments.append(len(order))
# else:
self.increments = [increment for _ in range(20)]
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.super_targets_train.append(super_y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.super_targets_val.append(super_y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
self.super_targets_test.append(super_y_test)
# print(self.increments)
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.super_targets_train = np.concatenate(self.super_targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.super_targets_val = np.concatenate(self.super_targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
self.super_targets_test = np.concatenate(self.super_targets_test)
def _make_super_classes(self, y, increment):
unique_y = np.unique(y)
super_y = [int(i/increment) for i in range(len(unique_y))]
super_order = [super_y[y[i]] for i in range(len(y))]
return super_order
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
@staticmethod
def _split_per_class(x, y, super_y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has same amount of data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
# idxs = np.argsort(y)
x = x[shuffled_indexes]
y = y[shuffled_indexes]
super_y = super_y[shuffled_indexes]
x_val, y_val, super_y_val = [], [], []
x_train, y_train, super_y_train = [], [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
super_y_val.append(super_y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
super_y_train.append(super_y[train_indexes])
x_val, y_val, super_y_val = np.concatenate(x_val), np.concatenate(y_val), np.concatenate(super_y_val)
x_train, y_train, super_y_train = np.concatenate(x_train), np.concatenate(y_train), np.concatenate(super_y_train)
return x_val, y_val, super_y_val, x_train, y_train, super_y_train
@staticmethod
def _list_split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
c = list(zip(x, y))
random.shuffle(c)
x, y = zip(*c)
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val_i = [x[i] for i in val_indexes]
y_val_i = [y[i] for i in val_indexes]
x_train_i = [x[i] for i in train_indexes]
y_train_i = [y[i] for i in train_indexes]
x_val.append(x_val_i)
y_val.append(y_val_i)
x_train.append(x_train_i)
y_train.append(y_train_i)
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
###### Unused functions
def get_idx_data(self, idx, batch_size, mode="test", data_source="train"):
"""Returns a custom loader with specific idxs only.
:param idx: A list of data indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if data_source == "train":
x, y, super_y = self.data_train, self.targets_train, self.super_targets_train
elif data_source == "val":
x, y, super_y = self.data_val, self.targets_val, self.super_targets_val
elif data_source == "test":
x, y, super_y = self.data_test, self.targets_test, self.super_targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
y, sorted_idx = y.sort()
sampler = torch.utils.data.sampler.SubsetRandomSampler(idx)
trsf = transforms.Compose(self.common_transforms)
loader = DataLoader(
DummyDataset(x[sorted_idx], y, trsf, super_y=super_y[sorted_idx]),
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=self._workers)
def get_custom_loader(self, class_indexes, mode="test", data_source="train"):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y, super_y = self.data_train, self.targets_train, self.super_targets_train
elif data_source == "val":
x, y, super_y = self.data_val, self.targets_val, self.super_targets_val
elif data_source == "test":
x, y, super_y = self.data_test, self.targets_test, self.super_targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets, super_targets = [], [], []
for class_index in class_indexes:
class_data, class_targets, super_class_targets = self._select(
x, y, super_y, low_range=class_index, high_range=class_index + 1
)
data.append(class_data)
targets.append(class_targets)
super_targets.append(super_class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
super_targets = np.concatenate(super_targets)
return data, self._get_loader(data, targets, super_targets, shuffle=False, mode=mode)
| 21,088
| 41.863821
| 164
|
py
|
La-MAML
|
La-MAML-main/dataloaders/task_incremental_loader.py
|
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from dataloaders.idataset import DummyArrayDataset
import os
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
validation_split=opt.validation
increment=opt.increment
self._setup_data(
class_order_type=opt.class_order,
seed=seed,
increment=increment,
validation_split=validation_split
)
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks()
@property
def n_tasks(self):
return len(self.test_dataset)
def new_task(self):
if self._current_task >= len(self.test_dataset):
raise Exception("No more tasks.")
p = self.sample_permutations[self._current_task]
x_train, y_train = self.train_dataset[self._current_task][1][p], self.train_dataset[self._current_task][2][p]
x_test, y_test = self.test_dataset[self._current_task][1], self.test_dataset[self._current_task][2]
train_loader = self._get_loader(x_train, y_train, mode="train")
test_loader = self._get_loader(x_test, y_test, mode="test")
task_info = {
"min_class": 0,
"max_class": self.n_outputs,
"increment": -1,
"task": self._current_task,
"max_task": len(self.test_dataset),
"n_train_data": len(x_train),
"n_test_data": len(x_test)
}
self._current_task += 1
return task_info, train_loader, None, test_loader
def _setup_test_tasks(self):
self.test_tasks = []
for i in range(len(self.test_dataset)):
self.test_tasks.append(self._get_loader(self.test_dataset[i][1], self.test_dataset[i][2], mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'test':
return self.test_dataset
elif dataset_type == 'val':
return self.test_dataset
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
n_inputs = self.train_dataset[0][1].size(1)
n_outputs = 0
for i in range(len(self.train_dataset)):
n_outputs = max(n_outputs, self.train_dataset[i][2].max())
n_outputs = max(n_outputs, self.test_dataset[i][2].max())
self.n_outputs = n_outputs
return n_inputs, n_outputs.item()+1, self.n_tasks
def _get_loader(self, x, y, shuffle=True, mode="train"):
if mode == "train":
batch_size = self._batch_size
elif mode == "test":
batch_size = self._test_batch_size
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return DataLoader(
DummyArrayDataset(x, y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
def _setup_data(self, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
torch.manual_seed(seed)
self.train_dataset, self.test_dataset = torch.load(os.path.join(self._opt.data_path, self._opt.dataset + ".pt"))
self.sample_permutations = []
# for every task, accumulate a shuffled set of samples_per_task
for t in range(len(self.train_dataset)):
N = self.train_dataset[t][1].size(0)
if self._opt.samples_per_task <= 0:
n = N
else:
n = min(self._opt.samples_per_task, N)
p = torch.randperm(N)[0:n]
self.sample_permutations.append(p)
| 3,964
| 30.468254
| 120
|
py
|
La-MAML
|
La-MAML-main/utils/misc_utils.py
|
import datetime
import glob
import json
import os
import random
import ipdb
import numpy as np
import torch
from tqdm import tqdm
def to_onehot(targets, n_classes):
onehot = torch.zeros(targets.shape[0], n_classes).to(targets.device)
onehot.scatter_(dim=1, index=targets.long().view(-1, 1), value=1.)
return onehot
def _check_loss(loss):
return not bool(torch.isnan(loss).item()) and bool((loss >= 0.).item())
def compute_accuracy(ypred, ytrue, task_size=10):
all_acc = {}
all_acc["total"] = round((ypred == ytrue).sum() / len(ytrue), 3)
for class_id in range(0, np.max(ytrue), task_size):
idxes = np.where(
np.logical_and(ytrue >= class_id, ytrue < class_id + task_size)
)[0]
label = "{}-{}".format(
str(class_id).rjust(2, "0"),
str(class_id + task_size - 1).rjust(2, "0")
)
all_acc[label] = round((ypred[idxes] == ytrue[idxes]).sum() / len(idxes), 3)
return all_acc
def get_date():
return datetime.datetime.now().strftime("%Y%m%d")
def get_date_time():
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')[:-2]
def log_dir(opt, timestamp=None):
if timestamp is None:
timestamp = get_date_time()
rand_num = str(random.randint(1,1001))
logdir = opt.log_dir + '/%s/%s-%s/%s' % (opt.model, opt.expt_name, timestamp, opt.seed)
tfdir = opt.log_dir + '/%s/%s-%s/%s/%s' % (opt.model, opt.expt_name, timestamp, opt.seed, "tfdir")
mkdir(logdir)
mkdir(tfdir)
with open(logdir + '/training_parameters.json', 'w') as f:
json.dump(vars(opt), f, indent=4)
return logdir, tfdir
def save_list_to_file(path, thelist):
with open(path, 'w') as f:
for item in thelist:
f.write("%s\n" % item)
def find_latest_checkpoint(folder_path):
print('searching for checkpoint in : '+folder_path)
files = sorted(glob.iglob(folder_path+'/*.pth'), key=os.path.getmtime, reverse=True)
print('latest checkpoint is:')
print(files[0])
return files[0]
def init_seed(seed):
'''
Disable cudnn to maximize reproducibility
'''
print("Set seed", seed)
random.seed(seed)
torch.cuda.cudnn_enabled = False
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.enabled = False
def find_latest_checkpoint_name(folder_path):
print('searching for checkpoint in : '+folder_path)
files = glob.glob(folder_path+'/*.pth')
min_num = 0
filename = ''
for i, filei in enumerate(files):
ckpt_name = os.path.splitext(filei)
ckpt_num = int(ckpt_name.split('_')[-1])
if(ckpt_num>min_num):
min_num = ckpt_num
filename = filei
print('latest checkpoint is:')
print(filename)
return filename
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def to_numpy(input):
if isinstance(input, torch.Tensor):
return input.cpu().numpy()
elif isinstance(input, np.ndarray):
return input
else:
raise TypeError('Unknown type of input, expected torch.Tensor or '\
'np.ndarray, but got {}'.format(type(input)))
def log_sum_exp(input, dim=None, keepdim=False):
"""Numerically stable LogSumExp.
Args:
input (Tensor)
dim (int): Dimension along with the sum is performed
keepdim (bool): Whether to retain the last dimension on summing
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
input = input.view(-1)
dim = 0
max_val = input.max(dim=dim, keepdim=True)[0]
output = max_val + (input - max_val).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
output = output.squeeze(dim)
return output
| 4,173
| 26.642384
| 103
|
py
|
La-MAML
|
La-MAML-main/model/lamaml.py
|
import random
import numpy as np
import ipdb
import math
import torch
import torch.nn as nn
from model.lamaml_base import *
class Net(BaseNet):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__(n_inputs,
n_outputs,
n_tasks,
args)
self.nc_per_task = n_outputs
def forward(self, x, t):
output = self.net.forward(x)
return output
def meta_loss(self, x, fast_weights, y, t):
"""
differentiate the loss through the network updates wrt alpha
"""
logits = self.net.forward(x, fast_weights)
loss_q = self.loss(logits.squeeze(1), y)
return loss_q, logits
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = self.args.second_order
grads = torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required)
for i in range(len(grads)):
torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * nn.functional.relu(p[1][1]), zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
for pass_itr in range(self.glances):
self.pass_itr = pass_itr
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
self.epoch += 1
self.zero_grads()
if t != self.current_task:
self.M = self.M_new
self.current_task = t
batch_sz = x.shape[0]
meta_losses = [0 for _ in range(batch_sz)]
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
fast_weights = None
for i in range(0, batch_sz):
batch_x = x[i].unsqueeze(0)
batch_y = y[i].unsqueeze(0)
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
if(self.real_epoch == 0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
meta_loss, logits = self.meta_loss(bx, fast_weights, by, t)
meta_losses[i] += meta_loss
# Taking the meta gradient step (will update the learning rates)
self.zero_grads()
meta_loss = sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
if self.args.learn_lr:
self.opt_lr.step()
if(self.args.sync_update):
self.opt_wt.step()
else:
for i,p in enumerate(self.net.parameters()):
p.data = p.data - p.grad * nn.functional.relu(self.net.alpha_lr[i])
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return meta_loss.item()
| 3,695
| 31.421053
| 126
|
py
|
La-MAML
|
La-MAML-main/model/meta-bgd.py
|
import random
from random import shuffle
import numpy as np
import ipdb
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
import model.meta.learner as Learner
import model.meta.modelfactory as mf
from model.optimizers_lib import optimizers_lib
from ast import literal_eval
"""
This baseline/ablation is constructed by merging C-MAML and BGD
By assigning a variance parameter to each NN parameter in the model
and using BGD's bayesian update to update these means (the NN parameters) and variances
(the learning rates in BGD are derived from the variances)
The 'n' bayesian samples in this case are the 'n' cumulative meta-losses sampled when
C-MAML is run with 'n' different initial theta vectors as the NN means sampled from the
(means, variances) stored for the model parameters.
The weight update is then carried out using the BGD formula that implicitly
uses the variances to derive the learning rates for the parameters
"""
class Net(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# define the lr params
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
# optimizer model
optimizer_model = optimizers_lib.__dict__[args.bgd_optimizer]
# params used to instantiate the BGD optimiser
optimizer_params = dict({ #"logger": logger,
"mean_eta": args.mean_eta,
"std_init": args.std_init,
"mc_iters": args.train_mc_iters}, **literal_eval(" ".join(args.optimizer_params)))
self.optimizer = optimizer_model(self.net, **optimizer_params)
self.epoch = 0
# allocate buffer
self.M = []
self.M_new = []
self.age = 0
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.pass_itr = 0
self.real_epoch = 0
# setup memories
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
if self.is_cifar:
self.nc_per_task = n_outputs / n_tasks
else:
self.nc_per_task = n_outputs
self.n_outputs = n_outputs
self.obseve_itr = 0
def take_multitask_loss(self, bt, t, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t, fast_weights=None):
self.optimizer.randomize_weights(force_std=0)
output = self.net.forward(x, vars=fast_weights)
if self.is_cifar:
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def meta_loss(self, x, fast_weights, y, bt, t):
"""
differentiate the loss through the network updates wrt alpha
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss_q = self.take_multitask_loss(bt, t, logits, y)
else:
logits = self.net.forward(x, fast_weights)
# Cross Entropy Loss over data
loss_q = self.loss(logits, y)
return loss_q, logits
def compute_offsets(self, task):
if self.is_cifar:
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
else:
offset1 = 0
offset2 = self.n_outputs
return int(offset1), int(offset2)
def push_to_mem(self, batch_x, batch_y, t):
"""
Reservoir sampling memory update
"""
if(self.real_epoch > 0 or self.pass_itr>0):
return
batch_x = batch_x.cpu()
batch_y = batch_y.cpu()
t = t.cpu()
for i in range(batch_x.shape[0]):
self.age += 1
if len(self.M_new) < self.memories:
self.M_new.append([batch_x[i], batch_y[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M_new[p] = [batch_x[i], batch_y[i], t]
def getBatch(self, x, y, t):
"""
Given the new data points, create a batch of old + new data,
where old data is part of the memory buffer
"""
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if self.args.use_old_task_memory: # and t>0:
MEM = self.M
else:
MEM = self.M_new
if len(MEM) > 0:
order = [i for i in range(0,len(MEM))]
osize = min(self.batchSize,len(MEM))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = MEM[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for j in range(len(myi)):
bxs.append(mxi[j])
bys.append(myi[j])
bts.append(mti[j])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def take_loss(self, t, logits, y):
offset1, offset2 = self.compute_offsets(t)
loss = self.loss(logits[:, offset1:offset2], y-offset1)
return loss
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.take_loss(t, logits, y)
# loss = self.loss(logits, y)
else:
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = True
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
# get fast weights vector by taking SGD step on grads
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
self.obseve_itr += 1
num_of_mc_iters = self.optimizer.get_mc_iters()
for glance_itr in range(self.glances):
mc_meta_losses = [0 for _ in range(num_of_mc_iters)]
# running C-MAML num_of_mc_iters times to get montecarlo samples of meta-loss
for pass_itr in range(num_of_mc_iters):
self.optimizer.randomize_weights()
self.pass_itr = pass_itr
self.epoch += 1
self.net.zero_grad()
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
if pass_itr==0 and glance_itr ==0 and t != self.current_task:
self.M = self.M_new
self.current_task = t
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
# the samples of new task to iterate over in inner update trajectory
iterate_till = 1 #batch_sz
meta_losses = [0 for _ in range(n_batches)]
accuracy_meta_set = [0 for _ in range(n_batches)]
# put some asserts to make sure replay batch size can accomodate old and new samples
bx, by = None, None
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
fast_weights = None
# inner loop/fast updates where learn on 1-2 samples in each inner step
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
if(pass_itr==0 and glance_itr==0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
# the meta loss is computed at each inner step
# as this is shown to work better in Reptile []
meta_loss, logits = self.meta_loss(bx, fast_weights, by, bt, t)
meta_losses[i] += meta_loss
self.optimizer.zero_grad()
meta_loss = sum(meta_losses)/len(meta_losses)
if torch.isnan(meta_loss):
ipdb.set_trace()
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
mc_meta_losses[pass_itr] = meta_loss
self.optimizer.aggregate_grads(batch_size=batch_sz)
print_std = False
if(self.obseve_itr%220==0):
print_std = True
self.optimizer.step(print_std = print_std)
meta_loss_return = sum(mc_meta_losses)/len(mc_meta_losses)
return meta_loss_return.item()
| 11,558
| 34.897516
| 121
|
py
|
La-MAML
|
La-MAML-main/model/gem.py
|
### This is a copy of GEM from https://github.com/facebookresearch/GradientEpisodicMemory.
### In order to ensure complete reproducability, we do not change the file and treat it as a baseline.
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import numpy as np
import quadprog
# Auxiliary functions useful for GEM's inner optimization.
def compute_offsets(task, nc_per_task, is_cifar):
"""
Compute offsets for cifar to determine which
outputs to select for a given task.
"""
if is_cifar:
offset1 = task * nc_per_task
offset2 = (task + 1) * nc_per_task
else:
offset1 = 0
offset2 = nc_per_task
return offset1, offset2
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def project2cone2(gradient, memories, margin=0.5, eps = 1e-3):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
t = memories_np.shape[0]
P = np.dot(memories_np, memories_np.transpose())
P = 0.5 * (P + P.transpose()) + np.eye(t) * eps
q = np.dot(memories_np, gradient_np) * -1
G = np.eye(t)
h = np.zeros(t) + margin
v = quadprog.solve_qp(P, q, G, h)[0]
x = np.dot(v, memories_np) + gradient_np
gradient.copy_(torch.Tensor(x).view(-1, 1))
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.margin = args.memory_strength
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args=args)
self.netforward = self.net.forward
self.ce = nn.CrossEntropyLoss()
self.n_outputs = n_outputs
self.glances = args.glances
self.opt = optim.SGD(self.parameters(), args.lr)
self.n_memories = args.n_memories
self.gpu = args.cuda
# allocate episodic memory
self.memory_data = torch.FloatTensor(
n_tasks, self.n_memories, n_inputs)
self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
if args.cuda:
self.memory_data = self.memory_data.cuda()
self.memory_labs = self.memory_labs.cuda()
# allocate temporary synaptic memory
self.grad_dims = []
for param in self.parameters():
self.grad_dims.append(param.data.numel())
self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
if args.cuda:
self.grads = self.grads.cuda()
# allocate counters
self.observed_tasks = []
self.old_task = -1
self.mem_cnt = 0
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
if args.cuda:
self.cuda()
def forward(self, x, t):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
output = self.netforward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1 = int(t * self.nc_per_task)
offset2 = int((t + 1) * self.nc_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
"""
Function equivalent to a single fwd+bkwd loop on one batch
of x,y,t: inputs, labels, task IDs
for each batch of (x,y,t) input to this function
the upadte is done 'glnaces' number of times
since in the single-pass setting, this batch is seen only once
and so enough updates need to be made on each data point
"""
x = x.view(x.size(0), -1)
# update memory
if t != self.old_task:
self.observed_tasks.append(t)
self.old_task = t
# in the single-pass setting, take multiple glances over every batch
for pass_itr in range(self.glances):
# only make changes like pushing to buffer once per batch and not for every glance
if(pass_itr==0):
# Update ring buffer storing examples from current task
bsz = y.data.size(0)
endcnt = min(self.mem_cnt + bsz, self.n_memories)
effbsz = endcnt - self.mem_cnt
self.memory_data[t, self.mem_cnt: endcnt].copy_(
x.data[: effbsz])
if bsz == 1:
self.memory_labs[t, self.mem_cnt] = y.data[0]
else:
self.memory_labs[t, self.mem_cnt: endcnt].copy_(
y.data[: effbsz])
self.mem_cnt += effbsz
if self.mem_cnt == self.n_memories:
self.mem_cnt = 0
# compute gradient on previous tasks
if len(self.observed_tasks) > 1:
for tt in range(len(self.observed_tasks) - 1):
self.zero_grad()
# fwd/bwd on the examples in the memory
past_task = self.observed_tasks[tt]
offset1, offset2 = compute_offsets(past_task, self.nc_per_task,
self.is_cifar)
ptloss = self.ce(
self.forward(
Variable(self.memory_data[past_task]),
past_task)[:, offset1: offset2],
Variable(self.memory_labs[past_task] - offset1))
ptloss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
store_grad(self.parameters, self.grads, self.grad_dims,
past_task)
# now compute the grad on the current minibatch
self.zero_grad()
offset1, offset2 = compute_offsets(t, self.nc_per_task, self.is_cifar)
loss = self.ce(self.forward(x, t)[:, offset1: offset2], y - offset1)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# check if gradient violates constraints
if len(self.observed_tasks) > 1:
# copy gradient
store_grad(self.parameters, self.grads, self.grad_dims, t)
indx = torch.cuda.LongTensor(self.observed_tasks[:-1]) if self.gpu \
else torch.LongTensor(self.observed_tasks[:-1])
dotp = torch.mm(self.grads[:, t].unsqueeze(0),
self.grads.index_select(1, indx))
if (dotp < 0).sum() != 0:
project2cone2(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin)
# copy gradients back
overwrite_grad(self.parameters, self.grads[:, t],
self.grad_dims)
self.opt.step()
return loss.item()
| 9,366
| 36.468
| 112
|
py
|
La-MAML
|
La-MAML-main/model/lamaml_base.py
|
import random
from random import shuffle
import numpy as np
import ipdb
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
import model.meta.learner as Learner
import model.meta.modelfactory as mf
from scipy.stats import pearsonr
import datetime
class BaseNet(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(BaseNet, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# define the lr params
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.opt_wt = torch.optim.SGD(list(self.net.parameters()), lr=args.opt_wt)
self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()), lr=args.opt_lr)
self.epoch = 0
# allocate buffer
self.M = []
self.M_new = []
self.age = 0
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.pass_itr = 0
self.real_epoch = 0
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
self.n_outputs = n_outputs
def push_to_mem(self, batch_x, batch_y, t):
"""
Reservoir sampling to push subsampled stream
of data points to replay/memory buffer
"""
if(self.real_epoch > 0 or self.pass_itr>0):
return
batch_x = batch_x.cpu()
batch_y = batch_y.cpu()
t = t.cpu()
for i in range(batch_x.shape[0]):
self.age += 1
if len(self.M_new) < self.memories:
self.M_new.append([batch_x[i], batch_y[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M_new[p] = [batch_x[i], batch_y[i], t]
def getBatch(self, x, y, t, batch_size=None):
"""
Given the new data points, create a batch of old + new data,
where old data is sampled from the memory buffer
"""
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if self.args.use_old_task_memory and t>0:
MEM = self.M
else:
MEM = self.M_new
batch_size = self.batchSize if batch_size is None else batch_size
if len(MEM) > 0:
order = [i for i in range(0,len(MEM))]
osize = min(batch_size,len(MEM))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = MEM[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for j in range(len(myi)):
bxs.append(mxi[j])
bys.append(myi[j])
bts.append(mti[j])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def compute_offsets(self, task):
# mapping from classes [1-100] to their idx within a task
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def zero_grads(self):
if self.args.learn_lr:
self.opt_lr.zero_grad()
self.opt_wt.zero_grad()
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
| 4,545
| 29.10596
| 112
|
py
|
La-MAML
|
La-MAML-main/model/lamaml_cifar.py
|
import random
import numpy as np
import ipdb
import math
import torch
import torch.nn as nn
from model.lamaml_base import *
class Net(BaseNet):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__(n_inputs,
n_outputs,
n_tasks,
args)
self.nc_per_task = n_outputs / n_tasks
def take_loss(self, t, logits, y):
# compute loss on data from a single task
offset1, offset2 = self.compute_offsets(t)
loss = self.loss(logits[:, offset1:offset2], y-offset1)
return loss
def take_multitask_loss(self, bt, t, logits, y):
# compute loss on data from a multiple tasks
# separate from take_loss() since the output positions for each task's
# logit vector are different and we nly want to compute loss on the relevant positions
# since this is a task incremental setting
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def meta_loss(self, x, fast_weights, y, bt, t):
"""
differentiate the loss through the network updates wrt alpha
"""
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss_q = self.take_multitask_loss(bt, t, logits, y)
return loss_q, logits
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.take_loss(t, logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = self.args.second_order
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
for pass_itr in range(self.glances):
self.pass_itr = pass_itr
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
self.epoch += 1
self.zero_grads()
if t != self.current_task:
self.M = self.M_new.copy()
self.current_task = t
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
fast_weights = None
meta_losses = [0 for _ in range(n_batches)]
# get a batch by augmented incming data with old task data, used for
# computing meta-loss
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
# assuming labels for inner update are from the same
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
# only sample and push to replay buffer once for each task's stream
# instead of pushing every epoch
if(self.real_epoch == 0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
meta_loss, logits = self.meta_loss(bx, fast_weights, by, bt, t)
meta_losses[i] += meta_loss
# Taking the meta gradient step (will update the learning rates)
self.zero_grads()
meta_loss = sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
if self.args.learn_lr:
self.opt_lr.step()
# if sync-update is being carried out (as in sync-maml) then update the weights using the optimiser
# otherwise update the weights with sgd using updated LRs as step sizes
if(self.args.sync_update):
self.opt_wt.step()
else:
for i,p in enumerate(self.net.parameters()):
# using relu on updated LRs to avoid negative values
p.data = p.data - p.grad * nn.functional.relu(self.net.alpha_lr[i])
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return meta_loss.item()
| 5,732
| 35.987097
| 119
|
py
|
La-MAML
|
La-MAML-main/model/agem.py
|
### This is a pytorch implementation of AGEM based on https://github.com/facebookresearch/agem.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import ipdb
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import numpy as np
import random
# Auxiliary functions useful for AGEM's inner optimization.
def compute_offsets(task, nc_per_task, is_cifar):
"""
Compute offsets for cifar to determine which
outputs to select for a given task.
"""
if is_cifar:
offset1 = task * nc_per_task
offset2 = (task + 1) * nc_per_task
else:
offset1 = 0
offset2 = nc_per_task
return offset1, offset2
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def projectgrad(gradient, memories, margin=0.5, eps = 1e-3, oiter = 0):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
similarity = torch.nn.functional.cosine_similarity(gradient.t(), memories.t().mean(dim=0).unsqueeze(0))
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
# merge memories
t = memories_np.shape[0]
memories_np2 = memories_np.mean(axis=0).reshape(1, memories_np.shape[1])
ref_mag = np.dot(memories_np2, memories_np2.transpose())
dotp = np.dot(gradient_np.reshape(1, -1), memories_np2.transpose())
if(oiter%100==0):
print('similarity : ', similarity.item())
print('dotp:', dotp)
if(dotp[0,0]<0):
proj = gradient_np.reshape(1, -1) - ((dotp/ ref_mag) * memories_np2)
gradient.copy_(torch.Tensor(proj).view(-1, 1))
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
self.margin = args.memory_strength
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
self.ce = nn.CrossEntropyLoss()
self.bce = torch.nn.CrossEntropyLoss()
self.n_outputs = n_outputs
self.glances = args.glances
self.opt = optim.SGD(self.parameters(), args.lr)
self.n_memories = args.n_memories
self.gpu = args.cuda
self.age = 0
self.M = []
self.memories = args.memories
self.grad_align = []
self.grad_task_align = {}
self.current_task = 0
# allocate episodic memory
self.memory_data = torch.FloatTensor(
n_tasks, self.n_memories, n_inputs)
self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
if args.cuda:
self.memory_data = self.memory_data.cuda()
self.memory_labs = self.memory_labs.cuda()
# allocate temporary synaptic memory
self.grad_dims = []
for param in self.parameters():
self.grad_dims.append(param.data.numel())
self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
if args.cuda:
self.grads = self.grads.cuda()
# allocate counters
self.observed_tasks = []
self.mem_cnt = 0
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
if args.cuda:
self.cuda()
self.iter = 0
def forward(self, x, t):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
output = self.net.forward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1 = int(t * self.nc_per_task)
offset2 = int((t + 1) * self.nc_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
self.iter +=1
x = x.view(x.size(0), -1)
# update memory
if t != self.current_task:
self.observed_tasks.append(t)
self.current_task = t
self.grad_align.append([])
for pass_itr in range(self.glances):
if(pass_itr==0):
# Update ring buffer storing examples from current task
bsz = y.data.size(0)
endcnt = min(self.mem_cnt + bsz, self.n_memories)
effbsz = endcnt - self.mem_cnt
self.memory_data[t, self.mem_cnt: endcnt].copy_(
x.data[: effbsz])
if bsz == 1:
self.memory_labs[t, self.mem_cnt] = y.data[0]
else:
self.memory_labs[t, self.mem_cnt: endcnt].copy_(
y.data[: effbsz])
self.mem_cnt += effbsz
if self.mem_cnt == self.n_memories:
self.mem_cnt = 0
# compute gradient on previous tasks
if len(self.observed_tasks) > 1:
for tt in range(len(self.observed_tasks) - 1):
self.zero_grad()
# fwd/bwd on the examples in the memory
past_task = self.observed_tasks[tt]
offset1, offset2 = compute_offsets(past_task, self.nc_per_task,
self.is_cifar)
ptloss = self.ce(
self.forward(
Variable(self.memory_data[past_task]),
past_task)[:, offset1: offset2],
Variable(self.memory_labs[past_task] - offset1))
ptloss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
store_grad(self.parameters, self.grads, self.grad_dims,
past_task)
# now compute the grad on the current minibatch
self.zero_grad()
offset1, offset2 = compute_offsets(t, self.nc_per_task, self.is_cifar)
loss = self.ce(self.forward(x, t)[:, offset1: offset2], y - offset1)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# check if gradient violates constraints
if len(self.observed_tasks) > 1:
# copy gradient
store_grad(self.parameters, self.grads, self.grad_dims, t)
indx = torch.cuda.LongTensor(self.observed_tasks[:-1]) if self.gpu \
else torch.LongTensor(self.observed_tasks[:-1])
projectgrad(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin, oiter = self.iter)
# copy gradients back
overwrite_grad(self.parameters, self.grads[:, t],
self.grad_dims)
self.opt.step()
xi = x.data.cpu().numpy()
yi = y.data.cpu().numpy()
for i in range(0,x.size()[0]):
self.age += 1
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi[i],yi[i],t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi[i],yi[i],t]
return loss.item()
| 9,569
| 34.576208
| 112
|
py
|
La-MAML
|
La-MAML-main/model/meralg1.py
|
# An implementation of MER Algorithm 1 from https://openreview.net/pdf?id=B1gTShAct7
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import random
from torch.nn.modules.loss import CrossEntropyLoss
from random import shuffle
import sys
import ipdb
from copy import deepcopy
import warnings
import model.meta.learner as Learner
import model.meta.modelfactory as mf
warnings.filterwarnings("ignore")
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
self.is_cifar = (args.dataset == 'cifar100' or args.dataset == 'tinyimagenet')
config = mf.ModelFactory.get_model(args.arch, sizes=[n_inputs] + [nh] * nl + [n_outputs], dataset=args.dataset, args=args)
self.net = Learner.Learner(config, args=args)
self.netforward = self.net.forward
self.bce = torch.nn.CrossEntropyLoss()
self.n_outputs = n_outputs
if self.is_cifar:
self.nc_per_task = n_outputs / n_tasks
else:
self.nc_per_task = n_outputs
self.opt = optim.SGD(self.parameters(), args.lr)
self.batchSize = int(args.replay_batch_size)
self.memories = args.memories
self.steps = int(args.batches_per_example)
self.beta = args.beta
self.gamma = args.gamma
# allocate buffer
self.M = []
self.age = 0
# handle gpus if specified
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
def forward(self, x, t):
output = self.netforward(x)
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def compute_offsets(self, task):
if self.is_cifar:
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
else:
offset1 = 0
offset2 = self.n_outputs
return int(offset1), int(offset2)
def getBatch(self,x,y,t):
if(x is not None):
xi = Variable(torch.from_numpy(np.array(x))).float().unsqueeze(0) #.view(1,-1)
yi = Variable(torch.from_numpy(np.array(y))).long()
ti = Variable(torch.from_numpy(np.array(t))).long()
if self.cuda:
xi = xi.cuda()
yi = yi.cuda()
ti = ti.cuda()
bxs = [xi]
bys = [yi]
bts = [ti]
else:
bxs = []
bys = []
bts = []
if len(self.M) > 0:
order = [i for i in range(0,len(self.M))]
osize = min(self.batchSize,len(self.M))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = self.M[k]
xi = Variable(torch.from_numpy(np.array(x))).float().unsqueeze(0) #.view(1,-1)
yi = Variable(torch.from_numpy(np.array(y))).long()
ti = Variable(torch.from_numpy(np.array(t))).long()
# handle gpus if specified
if self.cuda:
xi = xi.cuda()
yi = yi.cuda()
ti = ti.cuda()
bxs.append(xi)
bys.append(yi)
bts.append(ti)
return bxs,bys,bts
def observe(self, x, y, t):
# step through elements of x
for i in range(0,x.size()[0]):
self.age += 1
xi = x[i].data.cpu().numpy()
yi = y[i].data.cpu().numpy()
self.net.zero_grad()
before = deepcopy(self.net.state_dict())
for step in range(0,self.steps):
weights_before = deepcopy(self.net.state_dict())
##Check for nan
if weights_before != weights_before:
ipdb.set_trace()
# Draw batch from buffer:
bxs, bys, bts = self.getBatch(xi,yi,t)
loss = 0.0
total_loss = 0.0
for idx in range(len(bxs)):
self.net.zero_grad()
bx = bxs[idx]
by = bys[idx]
bt = bts[idx]
if self.is_cifar:
offset1, offset2 = self.compute_offsets(bt)
prediction = (self.netforward(bx)[:, offset1:offset2])
loss = self.bce(prediction,
by.unsqueeze(0)-offset1)
else:
prediction = self.forward(bx,0)
loss = self.bce(prediction, by.unsqueeze(0))
if torch.isnan(loss):
ipdb.set_trace()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt.step()
total_loss += loss.item()
weights_after = self.net.state_dict()
if weights_after != weights_after:
ipdb.set_trace()
# Within batch Reptile meta-update:
self.net.load_state_dict({name : weights_before[name] + ((weights_after[name] - weights_before[name]) * self.beta) for name in weights_before})
after = self.net.state_dict()
# Across batch Reptile meta-update:
self.net.load_state_dict({name : before[name] + ((after[name] - before[name]) * self.gamma) for name in before})
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi,yi,t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi,yi,t]
return total_loss/self.steps
| 6,478
| 31.888325
| 159
|
py
|
La-MAML
|
La-MAML-main/model/iid2.py
|
import torch
import numpy as np
import random
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import ipdb
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("once")
"""
Multi task
big batch size, set increment 100 so that it is treated as 1 task with all classes in the dataset
inference time for acc eval, use offsets
"""
class Net(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.nt = n_tasks
self.n_feat = n_outputs
self.n_classes = n_outputs
arch = args.arch
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# setup optimizer
self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.gpu = args.cuda
self.nc_per_task = int(n_outputs / n_tasks)
self.n_outputs = n_outputs
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def take_multitask_loss(self, bt, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
# make sure we predict classes within the current task
if torch.unique(t).shape[0] == 1:
offset1, offset2 = self.compute_offsets(t[0].item())
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
else:
for i in range(len(t)):
offset1, offset2 = self.compute_offsets(t[i])
if offset1 > 0:
output[i, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[i, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
self.net.train()
self.net.zero_grad()
logits = self.net.forward(x)
loss = self.take_multitask_loss(t, logits, y)
loss.backward()
self.opt.step()
return loss.item()
| 2,878
| 30.637363
| 107
|
py
|
La-MAML
|
La-MAML-main/model/eralg4.py
|
# An implementation of Experience Replay (ER) with reservoir sampling and without using tasks from Algorithm 4 of https://openreview.net/pdf?id=B1gTShAct7
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import random
from torch.nn.modules.loss import CrossEntropyLoss
from random import shuffle
import sys
import warnings
import math
import model.meta.modelfactory as mf
import model.meta.learner as Learner
warnings.filterwarnings("ignore")
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
self.opt_wt = optim.SGD(self.parameters(), lr=args.lr)
if self.args.learn_lr:
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()), lr=args.opt_lr)
self.loss = CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
# allocate buffer
self.M = []
self.age = 0
# handle gpus if specified
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
self.n_outputs = n_outputs
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def take_multitask_loss(self, bt, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def getBatch(self, x, y, t):
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if len(self.M) > 0:
order = [i for i in range(0,len(self.M))]
osize = min(self.batchSize,len(self.M))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = self.M[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for i in range(len(myi)):
bxs.append(mxi[i])
bys.append(myi[i])
bts.append(mti[i])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def observe(self, x, y, t):
### step through elements of x
xi = x.data.cpu().numpy()
yi = y.data.cpu().numpy()
if t != self.current_task:
self.current_task = t
if self.args.learn_lr:
loss = self.la_ER(x, y, t)
else:
loss = self.ER(xi, yi, t)
for i in range(0, x.size()[0]):
self.age += 1
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi[i], yi[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi[i], yi[i], t]
return loss.item()
def ER(self, x, y, t):
for pass_itr in range(self.glances):
self.net.zero_grad()
# Draw batch from buffer:
bx,by,bt = self.getBatch(x,y,t)
bx = bx.squeeze()
prediction = self.net.forward(bx)
loss = self.take_multitask_loss(bt, prediction, by)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt_wt.step()
return loss
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.loss(logits[:, offset1:offset2], y-offset1)
else:
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
graph_required = self.args.second_order
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights, loss.item()
def la_ER(self, x, y, t):
"""
this ablation tests whether it suffices to just do the learning rate modulation
guided by gradient alignment + clipping (that La-MAML does implciitly through autodiff)
and use it with ER (therefore no meta-learning for the weights)
"""
for pass_itr in range(self.glances):
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
fast_weights = None
meta_losses = [0 for _ in range(n_batches)]
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
bx = bx.squeeze()
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
# assuming labels for inner update are from the same
fast_weights, inner_loss = self.inner_update(batch_x, fast_weights, batch_y, t)
prediction = self.net.forward(bx, fast_weights)
meta_loss = self.take_multitask_loss(bt, prediction, by)
meta_losses[i] += meta_loss
# update alphas
self.net.zero_grad()
self.opt_lr.zero_grad()
meta_loss = meta_losses[-1] #sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
# update the LRs (guided by meta-loss, but not the weights)
self.opt_lr.step()
# update weights
self.net.zero_grad()
# compute ER loss for network weights
prediction = self.net.forward(bx)
loss = self.take_multitask_loss(bt, prediction, by)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# update weights with grad from simple ER loss
# and LRs obtained from meta-loss guided by old and new tasks
for i,p in enumerate(self.net.parameters()):
p.data = p.data - (p.grad * nn.functional.relu(self.net.alpha_lr[i]))
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return loss
| 9,335
| 32.342857
| 154
|
py
|
La-MAML
|
La-MAML-main/model/icarl.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import random
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("once")
class Net(torch.nn.Module):
# Re-implementation of
# S.-A. Rebuffi, A. Kolesnikov, G. Sperl, and C. H. Lampert.
# iCaRL: Incremental classifier and representation learning.
# CVPR, 2017.
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.nt = n_tasks
self.reg = args.memory_strength
self.n_memories = args.n_memories
self.num_exemplars = 0
self.n_feat = n_outputs
self.n_classes = n_outputs
self.samples_per_task = args.samples_per_task * (1.0 - args.validation)
if self.samples_per_task <= 0:
error('set explicitly args.samples_per_task')
self.examples_seen = 0
self.glances = args.glances
# setup network
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# setup optimizer
self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)
# setup losses
self.bce = torch.nn.CrossEntropyLoss()
self.kl = torch.nn.KLDivLoss() # for distillation
self.lsm = torch.nn.LogSoftmax(dim=1)
self.sm = torch.nn.Softmax(dim=1)
# memory
self.memx = None # stores raw inputs, PxD
self.memy = None
self.mem_class_x = {} # stores exemplars class by class
self.mem_class_y = {}
self.gpu = args.cuda
self.nc_per_task = int(n_outputs / n_tasks)
self.n_outputs = n_outputs
def netforward(self, x):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
return self.net.forward(x)
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def forward(self, x, t):
# nearest neighbor
nd = self.n_feat
ns = x.size(0)
if t * self.nc_per_task not in self.mem_class_x.keys():
# no exemplar in memory yet, output uniform distr. over classes in
# task t above, we check presence of first class for this task, we
# should check them all
out = torch.Tensor(ns, self.n_classes).fill_(-10e10)
out[:, int(t * self.nc_per_task): int((t + 1) * self.nc_per_task)].fill_(
1.0 / self.nc_per_task)
if self.gpu:
out = out.cuda()
return out
means = torch.ones(self.nc_per_task, nd) * float('inf')
if self.gpu:
means = means.cuda()
offset1, offset2 = self.compute_offsets(t)
for cc in range(offset1, offset2):
means[cc -
offset1] =self.netforward(self.mem_class_x[cc]).data.mean(0)
classpred = torch.LongTensor(ns)
preds = self.netforward(x).data.clone()
for ss in range(ns):
dist = (means - preds[ss].expand(self.nc_per_task, nd)).norm(2, 1)
_, ii = dist.min(0)
ii = ii.squeeze()
classpred[ss] = ii.item() + offset1
out = torch.zeros(ns, self.n_classes)
if self.gpu:
out = out.cuda()
for ss in range(ns):
out[ss, classpred[ss]] = 1
return out # return 1-of-C code, ns x nc
def forward_training(self, x, t):
output = self.netforward(x)
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
# zero out all the logits outside the task's range
# since the output vector from the model is of dimension (num_tasks * num_classes_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
x = x.view(x.size(0), -1)
self.net.train()
for pass_itr in range(self.glances):
# only make changes like pushing to buffer once per batch and not for every glance
if(pass_itr==0):
self.examples_seen += x.size(0)
if self.examples_seen < self.samples_per_task:
if self.memx is None:
self.memx = x.data.clone()
self.memy = y.data.clone()
else:
self.memx = torch.cat((self.memx, x.data.clone()))
self.memy = torch.cat((self.memy, y.data.clone()))
self.net.zero_grad()
offset1, offset2 = self.compute_offsets(t)
loss = self.bce((self.netforward(x)[:, offset1: offset2]),
y - offset1)
if self.num_exemplars > 0:
# distillation
for tt in range(t):
# first generate a minibatch with one example per class from
# previous tasks
inp_dist = torch.zeros(self.nc_per_task, x.size(1))
target_dist = torch.zeros(self.nc_per_task, self.n_feat)
offset1, offset2 = self.compute_offsets(tt)
if self.gpu:
inp_dist = inp_dist.cuda()
target_dist = target_dist.cuda()
for cc in range(self.nc_per_task):
indx = random.randint(0, len(self.mem_class_x[cc + offset1]) - 1)
inp_dist[cc] = self.mem_class_x[cc + offset1][indx].clone()
target_dist[cc] = self.mem_class_y[cc +
offset1][indx].clone()
# Add distillation loss
loss += self.reg * self.kl(
self.lsm(self.netforward(inp_dist)
[:, offset1: offset2]),
self.sm(target_dist[:, offset1: offset2])) * self.nc_per_task
# bprop and update
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt.step()
# check whether this is the last minibatch of the current task
# We assume only 1 epoch!
if self.examples_seen == self.args.n_epochs * self.samples_per_task:
self.examples_seen = 0
# get labels from previous task; we assume labels are consecutive
if self.gpu:
all_labs = torch.LongTensor(np.unique(self.memy.cpu().numpy()))
else:
all_labs = torch.LongTensor(np.unique(self.memy.numpy()))
num_classes = all_labs.size(0)
assert(num_classes == self.nc_per_task)
# Reduce exemplar set by updating value of num. exemplars per class
self.num_exemplars = int(self.n_memories /
(num_classes + len(self.mem_class_x.keys())))
offset1, offset2 = self.compute_offsets(t)
for ll in range(num_classes):
lab = all_labs[ll].cuda()
indxs = (self.memy == lab).nonzero().squeeze()
cdata = self.memx.index_select(0, indxs)
# Construct exemplar set for last task
mean_feature = self.netforward(cdata)[
:, offset1: offset2].data.clone().mean(0)
nd = self.nc_per_task
exemplars = torch.zeros(self.num_exemplars, x.size(1))
if self.gpu:
exemplars = exemplars.cuda()
ntr = cdata.size(0)
# used to keep track of which examples we have already used
taken = torch.zeros(ntr)
model_output = self.netforward(cdata)[
:, offset1: offset2].data.clone()
for ee in range(self.num_exemplars):
prev = torch.zeros(1, nd)
if self.gpu:
prev = prev.cuda()
if ee > 0:
prev = self.netforward(exemplars[:ee])[
:, offset1: offset2].data.clone().sum(0)
cost = (mean_feature.expand(ntr, nd) - (model_output
+ prev.expand(ntr, nd)) / (ee + 1)).norm(2, 1).squeeze()
_, indx = cost.sort(0)
winner = 0
while winner < indx.size(0) and taken[indx[winner]] == 1:
winner += 1
if winner < indx.size(0):
taken[indx[winner]] = 1
exemplars[ee] = cdata[indx[winner]].clone()
else:
exemplars = exemplars[:indx.size(0), :].clone()
self.num_exemplars = indx.size(0)
break
# update memory with exemplars
self.mem_class_x[lab.item()] = exemplars.clone()
# recompute outputs for distillation purposes
for cc in self.mem_class_x.keys():
self.mem_class_x[cc] = self.mem_class_x[cc][:self.num_exemplars]
self.mem_class_y[cc] = self.netforward(
self.mem_class_x[cc]).data.clone()
self.memx = None
self.memy = None
print(len(self.mem_class_x[0]))
return loss.item()
| 10,217
| 40.536585
| 116
|
py
|
La-MAML
|
La-MAML-main/model/meta/modelfactory.py
|
import ipdb
class ModelFactory():
def __init__(self):
pass
@staticmethod
def get_model(model_type, sizes, dataset='mnist', args=None):
net_list = []
if "mnist" in dataset:
if model_type=="linear":
for i in range(0, len(sizes) - 1):
net_list.append(('linear', [sizes[i+1], sizes[i]], ''))
if i < (len(sizes) - 2):
net_list.append(('relu', [True], ''))
if i == (len(sizes) - 2):
net_list.append(('rep', [], ''))
return net_list
elif dataset == "tinyimagenet":
if model_type == 'pc_cnn':
channels = 160
return [
('conv2d', [channels, 3, 3, 3, 2, 1], ''),
('relu', [True], ''),
('conv2d', [channels, channels, 3, 3, 2, 1], ''),
('relu', [True], ''),
('conv2d', [channels, channels, 3, 3, 2, 1], ''),
('relu', [True], ''),
('conv2d', [channels, channels, 3, 3, 2, 1], ''),
('relu', [True], ''),
('flatten', [], ''),
('rep', [], ''),
('linear', [640, 16 * channels], ''),
('relu', [True], ''),
('linear', [640, 640], ''),
('relu', [True], ''),
('linear', [sizes[-1], 640], '')
]
elif dataset == "cifar100":
if model_type == 'pc_cnn':
channels = 160
return [
('conv2d', [channels, 3, 3, 3, 2, 1], ''),
('relu', [True], ''),
('conv2d', [channels, channels, 3, 3, 2, 1], ''),
('relu', [True], ''),
('conv2d', [channels, channels, 3, 3, 2, 1], ''),
('relu', [True], ''),
('flatten', [], ''),
('rep', [], ''),
('linear', [320, 16 * channels], ''),
('relu', [True], ''),
('linear', [320, 320], ''),
('relu', [True], ''),
('linear', [sizes[-1], 320], '')
]
else:
print("Unsupported model; either implement the model in model/ModelFactory or choose a different model")
assert (False)
| 2,530
| 30.246914
| 116
|
py
|
La-MAML
|
La-MAML-main/model/meta/learner.py
|
import math
import os
import sys
import traceback
import numpy as np
import ipdb
import torch
from torch import nn
from torch.nn import functional as F
class Learner(nn.Module):
def __init__(self, config, args = None):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
self.tf_counter = 0
self.args = args
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
self.names = []
for i, (name, param, extra_name) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
if(self.args.xav_init):
w = nn.Parameter(torch.ones(*param[:4]))
b = nn.Parameter(torch.zeros(param[0]))
torch.nn.init.xavier_normal_(w.data)
b.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*b.data.shape[0]))
self.vars.append(w)
self.vars.append(b)
else:
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# layer += 1
if(self.args.xav_init):
w = nn.Parameter(torch.ones(*param))
# b = nn.Parameter(torch.zeros(param[0]))
torch.nn.init.xavier_normal_(w.data)
# b.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*b.data.shape[0]))
self.vars.append(w)
# self.vars.append(b)
else:
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'cat':
pass
elif name is 'cat_start':
pass
elif name is "rep":
pass
elif name in ["residual3", "residual5", "in"]:
pass
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param, extra_name in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)' \
% (param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)' \
% (param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)' % (param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)' % (param[0])
info += tmp + '\n'
elif name is 'cat':
tmp = 'cat'
info += tmp + "\n"
elif name is 'cat_start':
tmp = 'cat_start'
info += tmp + "\n"
elif name is 'rep':
tmp = 'rep'
info += tmp + "\n"
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)' % (param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)' % (param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=False, feature=False):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
cat_var = False
cat_list = []
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
try:
for (name, param, extra_name) in self.config:
# assert(name == "conv2d")
if name == 'conv2d':
w, b = vars[idx], vars[idx + 1]
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'convt2d':
w, b = vars[idx], vars[idx + 1]
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
elif name == 'linear':
# ipdb.set_trace()
if extra_name == 'cosine':
w = F.normalize(vars[idx])
x = F.normalize(x)
x = F.linear(x, w)
idx += 1
else:
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
if cat_var:
cat_list.append(x)
elif name == 'rep':
# print('rep')
# print(x.shape)
if feature:
return x
elif name == "cat_start":
cat_var = True
cat_list = []
elif name == "cat":
cat_var = False
x = torch.cat(cat_list, dim=1)
elif name == 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx + 1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name == 'flatten':
# print('flatten')
# print(x.shape)
x = x.view(x.size(0), -1)
elif name == 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name == 'relu':
x = F.relu(x, inplace=param[0])
elif name == 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name == 'tanh':
x = F.tanh(x)
elif name == 'sigmoid':
x = torch.sigmoid(x)
elif name == 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name == 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name == 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
print(name)
raise NotImplementedError
except:
traceback.print_exc(file=sys.stdout)
ipdb.set_trace()
# make sure variable is used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def define_task_lr_params(self, alpha_init=1e-3):
# Setup learning parameters
self.alpha_lr = nn.ParameterList([])
self.lr_name = []
for n, p in self.named_parameters():
self.lr_name.append(n)
for p in self.parameters():
self.alpha_lr.append(nn.Parameter(alpha_init * torch.ones(p.shape, requires_grad=True)))
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars
| 10,679
| 34.364238
| 143
|
py
|
La-MAML
|
La-MAML-main/model/optimizers_lib/bgd_optimizer.py
|
import torch
from torch.optim.optimizer import Optimizer
class BGD(Optimizer):
"""Implements BGD.
A simple usage of BGD would be:
for samples, labels in batches:
for mc_iter in range(mc_iters):
optimizer.randomize_weights()
output = model.forward(samples)
loss = cirterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.aggregate_grads()
optimizer.step()
"""
def __init__(self, params, std_init, mean_eta=1, mc_iters=10):
"""
Initialization of BGD optimizer
group["mean_param"] is the learned mean.
group["std_param"] is the learned STD.
:param params: List of model parameters
:param std_init: Initialization value for STD parameter
:param mean_eta: Eta value
:param mc_iters: Number of Monte Carlo iteration. Used for correctness check.
Use None to disable the check.
"""
super(BGD, self).__init__(params, defaults={})
assert mc_iters is None or (type(mc_iters) == int and mc_iters > 0), "mc_iters should be positive int or None."
self.std_init = std_init
self.mean_eta = mean_eta
self.mc_iters = mc_iters
# Initialize mu (mean_param) and sigma (std_param)
for group in self.param_groups:
assert len(group["params"]) == 1, "BGD optimizer does not support multiple params in a group"
# group['params'][0] is the weights
assert isinstance(group["params"][0], torch.Tensor), "BGD expect param to be a tensor"
# We use the initialization of weights to initialize the mean.
group["mean_param"] = group["params"][0].data.clone()
group["std_param"] = torch.zeros_like(group["params"][0].data).add_(self.std_init)
self._init_accumulators()
def get_mc_iters(self):
return self.mc_iters
def _init_accumulators(self):
self.mc_iters_taken = 0
for group in self.param_groups:
group["eps"] = None
group["grad_mul_eps_sum"] = torch.zeros_like(group["params"][0].data).cuda()
group["grad_sum"] = torch.zeros_like(group["params"][0].data).cuda()
def randomize_weights(self, force_std=-1):
"""
Randomize the weights according to N(mean, std).
:param force_std: If force_std>=0 then force_std is used for STD instead of the learned STD.
:return: None
"""
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
if force_std >= 0:
std = std.mul(0).add(force_std)
group["eps"] = torch.normal(torch.zeros_like(mean), 1).cuda()
# Reparameterization trick (here we set the weights to their randomized value):
group["params"][0].data.copy_(mean.add(std.mul(group["eps"])))
def aggregate_grads(self, batch_size):
"""
Aggregates a single Monte Carlo iteration gradients. Used in step() for the expectations calculations.
optimizer.zero_grad() should be used before calling .backward() once again.
:param batch_size: BGD is using non-normalized gradients, but PyTorch gives normalized gradients.
Therefore, we multiply the gradients by the batch size.
:return: None
"""
self.mc_iters_taken += 1
groups_cnt = 0
for group in self.param_groups:
if group["params"][0].grad is None:
continue
assert group["eps"] is not None, "Must randomize weights before using aggregate_grads"
groups_cnt += 1
grad = group["params"][0].grad.data.mul(batch_size)
group["grad_sum"].add_(grad)
group["grad_mul_eps_sum"].add_(grad.mul(group["eps"]))
group["eps"] = None
assert groups_cnt > 0, "Called aggregate_grads, but all gradients were None. Make sure you called .backward()"
def step(self, closure=None, print_std = False):
"""
Updates the learned mean and STD.
:return:
"""
# Makes sure that self.mc_iters had been taken.
assert self.mc_iters is None or self.mc_iters == self.mc_iters_taken, "MC iters is set to " \
+ str(self.mc_iters) \
+ ", but took " + \
str(self.mc_iters_taken) + " MC iters"
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
# Divide gradients by MC iters to get expectation
e_grad = group["grad_sum"].div(self.mc_iters_taken)
e_grad_eps = group["grad_mul_eps_sum"].div(self.mc_iters_taken)
# Update mean and STD params
mean.add_(-std.pow(2).mul(e_grad).mul(self.mean_eta))
sqrt_term = torch.sqrt(e_grad_eps.mul(std).div(2).pow(2).add(1)).mul(std)
std.copy_(sqrt_term.add(-e_grad_eps.mul(std.pow(2)).div(2)))
self.randomize_weights(force_std=0)
self._init_accumulators()
| 5,328
| 46.580357
| 119
|
py
|
La-MAML
|
La-MAML-main/model/optimizers_lib/optimizers_lib.py
|
import torch.optim as optim
from .bgd_optimizer import BGD
def bgd(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
bgd_params = {
"mean_eta": kwargs.get("mean_eta", 1),
"std_init": kwargs.get("std_init", 0.02),
"mc_iters": kwargs.get("mc_iters", 10)
}
# logger.info("BGD params: " + str(bgd_params))
all_params = [{'params': params} for l, (name, params) in enumerate(model.named_parameters())]
return BGD(all_params, **bgd_params)
def sgd(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
sgd_params = {
"momentum": kwargs.get("momentum", 0.9),
"lr": kwargs.get("lr", 0.1),
"weight_decay": kwargs.get("weight_decay", 5e-4)
}
# logger.info("SGD params: " + str(sgd_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.1)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.SGD(all_params, **sgd_params)
def adam(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
adam_params = {
"eps": kwargs.get("eps", 1e-08),
"lr": kwargs.get("lr", 0.001),
"betas": kwargs.get("betas", (0.9, 0.999)),
"weight_decay": kwargs.get("weight_decay", 0)
}
# logger.info("ADAM params: " + str(adam_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.001)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.Adam(all_params, **adam_params)
def adagrad(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
adam_params = {
"lr": kwargs.get("lr", 0.01),
"weight_decay": kwargs.get("weight_decay", 0)
}
# logger.info("Adagrad params: " + str(adam_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.01)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.Adagrad(all_params, **adam_params)
| 2,099
| 37.181818
| 151
|
py
|
La-MAML
|
La-MAML-main/model/optimizers_lib/__init__.py
|
from .optimizers_lib import *
| 29
| 29
| 29
|
py
|
fiery
|
fiery-master/evaluate.py
|
from argparse import ArgumentParser
import torch
from tqdm import tqdm
from fiery.data import prepare_dataloaders
from fiery.trainer import TrainingModule
from fiery.metrics import IntersectionOverUnion, PanopticMetric
from fiery.utils.network import preprocess_batch
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
# 30mx30m, 100mx100m
EVALUATION_RANGES = {'30x30': (70, 130),
'100x100': (0, 200)
}
def eval(checkpoint_path, dataroot, version):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
print(f'Loaded weights from \n {checkpoint_path}')
trainer.eval()
device = torch.device('cuda:0')
trainer.to(device)
model = trainer.model
cfg = model.cfg
cfg.GPUS = "[0]"
cfg.BATCHSIZE = 1
cfg.DATASET.DATAROOT = dataroot
cfg.DATASET.VERSION = version
_, valloader = prepare_dataloaders(cfg)
panoptic_metrics = {}
iou_metrics = {}
n_classes = len(cfg.SEMANTIC_SEG.WEIGHTS)
for key in EVALUATION_RANGES.keys():
panoptic_metrics[key] = PanopticMetric(n_classes=n_classes, temporally_consistent=True).to(
device)
iou_metrics[key] = IntersectionOverUnion(n_classes).to(device)
for i, batch in enumerate(tqdm(valloader)):
preprocess_batch(batch, device)
image = batch['image']
intrinsics = batch['intrinsics']
extrinsics = batch['extrinsics']
future_egomotion = batch['future_egomotion']
batch_size = image.shape[0]
labels, future_distribution_inputs = trainer.prepare_future_labels(batch)
with torch.no_grad():
# Evaluate with mean prediction
noise = torch.zeros((batch_size, 1, model.latent_dim), device=device)
output = model(image, intrinsics, extrinsics, future_egomotion,
future_distribution_inputs, noise=noise)
# Consistent instance seg
pred_consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False, make_consistent=True
)
segmentation_pred = output['segmentation'].detach()
segmentation_pred = torch.argmax(segmentation_pred, dim=2, keepdims=True)
for key, grid in EVALUATION_RANGES.items():
limits = slice(grid[0], grid[1])
panoptic_metrics[key](pred_consistent_instance_seg[..., limits, limits].contiguous().detach(),
labels['instance'][..., limits, limits].contiguous()
)
iou_metrics[key](segmentation_pred[..., limits, limits].contiguous(),
labels['segmentation'][..., limits, limits].contiguous()
)
results = {}
for key, grid in EVALUATION_RANGES.items():
panoptic_scores = panoptic_metrics[key].compute()
for panoptic_key, value in panoptic_scores.items():
results[f'{panoptic_key}'] = results.get(f'{panoptic_key}', []) + [100 * value[1].item()]
iou_scores = iou_metrics[key].compute()
results['iou'] = results.get('iou', []) + [100 * iou_scores[1].item()]
for panoptic_key in ['iou', 'pq', 'sq', 'rq']:
print(panoptic_key)
print(' & '.join([f'{x:.1f}' for x in results[panoptic_key]]))
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery evaluation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
parser.add_argument('--dataroot', default='./nuscenes', type=str, help='path to the dataset')
parser.add_argument('--version', default='trainval', type=str, choices=['mini', 'trainval'],
help='dataset version')
args = parser.parse_args()
eval(args.checkpoint, args.dataroot, args.version)
| 3,908
| 36.951456
| 106
|
py
|
fiery
|
fiery-master/visualise.py
|
import os
from argparse import ArgumentParser
from glob import glob
import cv2
import numpy as np
import torch
import torchvision
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
from fiery.trainer import TrainingModule
from fiery.utils.network import NormalizeInverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy
EXAMPLE_DATA_PATH = 'example_data'
def plot_prediction(image, output, cfg):
# Process predictions
consistent_instance_seg, matched_centers = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=True
)
# Plot future trajectories
unique_ids = torch.unique(consistent_instance_seg[0, 0]).cpu().long().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_colours = generate_instance_colours(instance_map)
vis_image = plot_instance_map(consistent_instance_seg[0, 0].cpu().numpy(), instance_map)
trajectory_img = np.zeros(vis_image.shape, dtype=np.uint8)
for instance_id in unique_ids:
path = matched_centers[instance_id]
for t in range(len(path) - 1):
color = instance_colours[instance_id].tolist()
cv2.line(trajectory_img, tuple(path[t]), tuple(path[t + 1]),
color, 4)
# Overlay arrows
temp_img = cv2.addWeighted(vis_image, 0.7, trajectory_img, 0.3, 1.0)
mask = ~ np.all(trajectory_img == 0, axis=2)
vis_image[mask] = temp_img[mask]
# Plot present RGB frames and predictions
val_w = 2.99
cameras = cfg.IMAGE.NAMES
image_ratio = cfg.IMAGE.FINAL_DIM[0] / cfg.IMAGE.FINAL_DIM[1]
val_h = val_w * image_ratio
fig = plt.figure(figsize=(4 * val_w, 2 * val_h))
width_ratios = (val_w, val_w, val_w, val_w)
gs = mpl.gridspec.GridSpec(2, 4, width_ratios=width_ratios)
gs.update(wspace=0.0, hspace=0.0, left=0.0, right=1.0, top=1.0, bottom=0.0)
denormalise_img = torchvision.transforms.Compose(
(NormalizeInverse(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
torchvision.transforms.ToPILImage(),)
)
for imgi, img in enumerate(image[0, -1]):
ax = plt.subplot(gs[imgi // 3, imgi % 3])
showimg = denormalise_img(img.cpu())
if imgi > 2:
showimg = showimg.transpose(Image.FLIP_LEFT_RIGHT)
plt.annotate(cameras[imgi].replace('_', ' ').replace('CAM ', ''), (0.01, 0.87), c='white',
xycoords='axes fraction', fontsize=14)
plt.imshow(showimg)
plt.axis('off')
ax = plt.subplot(gs[:, 3])
plt.imshow(make_contour(vis_image[::-1, ::-1]))
plt.axis('off')
plt.draw()
figure_numpy = convert_figure_numpy(fig)
plt.close()
return figure_numpy
def download_example_data():
from requests import get
def download(url, file_name):
# open in binary mode
with open(file_name, "wb") as file:
# get request
response = get(url)
# write to file
file.write(response.content)
os.makedirs(EXAMPLE_DATA_PATH, exist_ok=True)
url_list = ['https://github.com/wayveai/fiery/releases/download/v1.0/example_1.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_2.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_3.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_4.npz'
]
for url in url_list:
download(url, os.path.join(EXAMPLE_DATA_PATH, os.path.basename(url)))
def visualise(checkpoint_path):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
device = torch.device('cuda:0')
trainer = trainer.to(device)
trainer.eval()
# Download example data
download_example_data()
# Load data
for data_path in sorted(glob(os.path.join(EXAMPLE_DATA_PATH, '*.npz'))):
data = np.load(data_path)
image = torch.from_numpy(data['image']).to(device)
intrinsics = torch.from_numpy(data['intrinsics']).to(device)
extrinsics = torch.from_numpy(data['extrinsics']).to(device)
future_egomotions = torch.from_numpy(data['future_egomotion']).to(device)
# Forward pass
with torch.no_grad():
output = trainer.model(image, intrinsics, extrinsics, future_egomotions)
figure_numpy = plot_prediction(image, output, trainer.cfg)
os.makedirs('./output_vis', exist_ok=True)
output_filename = os.path.join('./output_vis', os.path.basename(data_path).split('.')[0]) + '.png'
Image.fromarray(figure_numpy).save(output_filename)
print(f'Saved output in {output_filename}')
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery visualisation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
args = parser.parse_args()
visualise(args.checkpoint)
| 5,095
| 36.470588
| 118
|
py
|
fiery
|
fiery-master/train.py
|
import os
import time
import socket
import torch
import pytorch_lightning as pl
from pytorch_lightning.plugins import DDPPlugin
from fiery.config import get_parser, get_cfg
from fiery.data import prepare_dataloaders
from fiery.trainer import TrainingModule
def main():
args = get_parser().parse_args()
cfg = get_cfg(args)
trainloader, valloader = prepare_dataloaders(cfg)
model = TrainingModule(cfg.convert_to_dict())
if cfg.PRETRAINED.LOAD_WEIGHTS:
# Load single-image instance segmentation model.
pretrained_model_weights = torch.load(
os.path.join(cfg.DATASET.DATAROOT, cfg.PRETRAINED.PATH), map_location='cpu'
)['state_dict']
model.load_state_dict(pretrained_model_weights, strict=False)
print(f'Loaded single-image model weights from {cfg.PRETRAINED.PATH}')
save_dir = os.path.join(
cfg.LOG_DIR, time.strftime('%d%B%Yat%H:%M:%S%Z') + '_' + socket.gethostname() + '_' + cfg.TAG
)
tb_logger = pl.loggers.TensorBoardLogger(save_dir=save_dir)
trainer = pl.Trainer(
gpus=cfg.GPUS,
accelerator='ddp',
precision=cfg.PRECISION,
sync_batchnorm=True,
gradient_clip_val=cfg.GRAD_NORM_CLIP,
max_epochs=cfg.EPOCHS,
weights_summary='full',
logger=tb_logger,
log_every_n_steps=cfg.LOGGING_INTERVAL,
plugins=DDPPlugin(find_unused_parameters=True),
profiler='simple',
)
trainer.fit(model, trainloader, valloader)
if __name__ == "__main__":
main()
| 1,540
| 29.215686
| 101
|
py
|
fiery
|
fiery-master/fiery/losses.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialRegressionLoss(nn.Module):
def __init__(self, norm, ignore_index=255, future_discount=1.0):
super(SpatialRegressionLoss, self).__init__()
self.norm = norm
self.ignore_index = ignore_index
self.future_discount = future_discount
if norm == 1:
self.loss_fn = F.l1_loss
elif norm == 2:
self.loss_fn = F.mse_loss
else:
raise ValueError(f'Expected norm 1 or 2, but got norm={norm}')
def forward(self, prediction, target):
assert len(prediction.shape) == 5, 'Must be a 5D tensor'
# ignore_index is the same across all channels
mask = target[:, :, :1] != self.ignore_index
if mask.sum() == 0:
return prediction.new_zeros(1)[0].float()
loss = self.loss_fn(prediction, target, reduction='none')
# Sum channel dimension
loss = torch.sum(loss, dim=-3, keepdims=True)
seq_len = loss.shape[1]
future_discounts = self.future_discount ** torch.arange(seq_len, device=loss.device, dtype=loss.dtype)
future_discounts = future_discounts.view(1, seq_len, 1, 1, 1)
loss = loss * future_discounts
return loss[mask].mean()
class SegmentationLoss(nn.Module):
def __init__(self, class_weights, ignore_index=255, use_top_k=False, top_k_ratio=1.0, future_discount=1.0):
super().__init__()
self.class_weights = class_weights
self.ignore_index = ignore_index
self.use_top_k = use_top_k
self.top_k_ratio = top_k_ratio
self.future_discount = future_discount
def forward(self, prediction, target):
if target.shape[-3] != 1:
raise ValueError('segmentation label must be an index-label with channel dimension = 1.')
b, s, c, h, w = prediction.shape
prediction = prediction.view(b * s, c, h, w)
target = target.view(b * s, h, w)
loss = F.cross_entropy(
prediction,
target,
ignore_index=self.ignore_index,
reduction='none',
weight=self.class_weights.to(target.device),
)
loss = loss.view(b, s, h, w)
future_discounts = self.future_discount ** torch.arange(s, device=loss.device, dtype=loss.dtype)
future_discounts = future_discounts.view(1, s, 1, 1)
loss = loss * future_discounts
loss = loss.view(b, s, -1)
if self.use_top_k:
# Penalises the top-k hardest pixels
k = int(self.top_k_ratio * loss.shape[2])
loss, _ = torch.sort(loss, dim=2, descending=True)
loss = loss[:, :, :k]
return torch.mean(loss)
class ProbabilisticLoss(nn.Module):
def forward(self, output):
present_mu = output['present_mu']
present_log_sigma = output['present_log_sigma']
future_mu = output['future_mu']
future_log_sigma = output['future_log_sigma']
var_future = torch.exp(2 * future_log_sigma)
var_present = torch.exp(2 * present_log_sigma)
kl_div = (
present_log_sigma - future_log_sigma - 0.5 + (var_future + (future_mu - present_mu) ** 2) / (
2 * var_present)
)
kl_loss = torch.mean(torch.sum(kl_div, dim=-1))
return kl_loss
| 3,378
| 33.835052
| 111
|
py
|
fiery
|
fiery-master/fiery/data.py
|
import os
from PIL import Image
import numpy as np
import cv2
import torch
import torchvision
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import Box
from lyft_dataset_sdk.lyftdataset import LyftDataset
from fiery.utils.geometry import (
resize_and_crop_image,
update_intrinsics,
calculate_birds_eye_view_parameters,
convert_egopose_to_matrix_numpy,
pose_vec2mat,
mat2pose_vec,
invert_matrix_egopose_numpy,
)
from fiery.utils.instance import convert_instance_mask_to_center_and_offset_label
from fiery.utils.lyft_splits import TRAIN_LYFT_INDICES, VAL_LYFT_INDICES
class FuturePredictionDataset(torch.utils.data.Dataset):
def __init__(self, nusc, is_train, cfg):
self.nusc = nusc
self.is_train = is_train
self.cfg = cfg
self.is_lyft = isinstance(nusc, LyftDataset)
if self.is_lyft:
self.dataroot = self.nusc.data_path
else:
self.dataroot = self.nusc.dataroot
self.mode = 'train' if self.is_train else 'val'
self.sequence_length = cfg.TIME_RECEPTIVE_FIELD + cfg.N_FUTURE_FRAMES
self.scenes = self.get_scenes()
self.ixes = self.prepro()
self.indices = self.get_indices()
# Image resizing and cropping
self.augmentation_parameters = self.get_resizing_and_cropping_parameters()
# Normalising input images
self.normalise_image = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
# Bird's-eye view parameters
bev_resolution, bev_start_position, bev_dimension = calculate_birds_eye_view_parameters(
cfg.LIFT.X_BOUND, cfg.LIFT.Y_BOUND, cfg.LIFT.Z_BOUND
)
self.bev_resolution, self.bev_start_position, self.bev_dimension = (
bev_resolution.numpy(), bev_start_position.numpy(), bev_dimension.numpy()
)
# Spatial extent in bird's-eye view, in meters
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
def get_scenes(self):
if self.is_lyft:
scenes = [row['name'] for row in self.nusc.scene]
# Split in train/val
indices = TRAIN_LYFT_INDICES if self.is_train else VAL_LYFT_INDICES
scenes = [scenes[i] for i in indices]
else:
# filter by scene split
split = {'v1.0-trainval': {True: 'train', False: 'val'},
'v1.0-mini': {True: 'mini_train', False: 'mini_val'},}[
self.nusc.version
][self.is_train]
scenes = create_splits_scenes()[split]
return scenes
def prepro(self):
samples = [samp for samp in self.nusc.sample]
# remove samples that aren't in this split
samples = [samp for samp in samples if self.nusc.get('scene', samp['scene_token'])['name'] in self.scenes]
# sort by scene, timestamp (only to make chronological viz easier)
samples.sort(key=lambda x: (x['scene_token'], x['timestamp']))
return samples
def get_indices(self):
indices = []
for index in range(len(self.ixes)):
is_valid_data = True
previous_rec = None
current_indices = []
for t in range(self.sequence_length):
index_t = index + t
# Going over the dataset size limit.
if index_t >= len(self.ixes):
is_valid_data = False
break
rec = self.ixes[index_t]
# Check if scene is the same
if (previous_rec is not None) and (rec['scene_token'] != previous_rec['scene_token']):
is_valid_data = False
break
current_indices.append(index_t)
previous_rec = rec
if is_valid_data:
indices.append(current_indices)
return np.asarray(indices)
def get_resizing_and_cropping_parameters(self):
original_height, original_width = self.cfg.IMAGE.ORIGINAL_HEIGHT, self.cfg.IMAGE.ORIGINAL_WIDTH
final_height, final_width = self.cfg.IMAGE.FINAL_DIM
resize_scale = self.cfg.IMAGE.RESIZE_SCALE
resize_dims = (int(original_width * resize_scale), int(original_height * resize_scale))
resized_width, resized_height = resize_dims
crop_h = self.cfg.IMAGE.TOP_CROP
crop_w = int(max(0, (resized_width - final_width) / 2))
# Left, top, right, bottom crops.
crop = (crop_w, crop_h, crop_w + final_width, crop_h + final_height)
if resized_width != final_width:
print('Zero padding left and right parts of the image.')
if crop_h + final_height != resized_height:
print('Zero padding bottom part of the image.')
return {'scale_width': resize_scale,
'scale_height': resize_scale,
'resize_dims': resize_dims,
'crop': crop,
}
def get_input_data(self, rec):
"""
Parameters
----------
rec: nuscenes identifier for a given timestamp
Returns
-------
images: torch.Tensor<float> (N, 3, H, W)
intrinsics: torch.Tensor<float> (3, 3)
extrinsics: torch.Tensor(N, 4, 4)
"""
images = []
intrinsics = []
extrinsics = []
cameras = self.cfg.IMAGE.NAMES
# The extrinsics we want are from the camera sensor to "flat egopose" as defined
# https://github.com/nutonomy/nuscenes-devkit/blob/9b492f76df22943daf1dc991358d3d606314af27/python-sdk/nuscenes/nuscenes.py#L279
# which corresponds to the position of the lidar.
# This is because the labels are generated by projecting the 3D bounding box in this lidar's reference frame.
# From lidar egopose to world.
lidar_sample = self.nusc.get('sample_data', rec['data']['LIDAR_TOP'])
lidar_pose = self.nusc.get('ego_pose', lidar_sample['ego_pose_token'])
yaw = Quaternion(lidar_pose['rotation']).yaw_pitch_roll[0]
lidar_rotation = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)])
lidar_translation = np.array(lidar_pose['translation'])[:, None]
lidar_to_world = np.vstack([
np.hstack((lidar_rotation.rotation_matrix, lidar_translation)),
np.array([0, 0, 0, 1])
])
for cam in cameras:
camera_sample = self.nusc.get('sample_data', rec['data'][cam])
# Transformation from world to egopose
car_egopose = self.nusc.get('ego_pose', camera_sample['ego_pose_token'])
egopose_rotation = Quaternion(car_egopose['rotation']).inverse
egopose_translation = -np.array(car_egopose['translation'])[:, None]
world_to_car_egopose = np.vstack([
np.hstack((egopose_rotation.rotation_matrix, egopose_rotation.rotation_matrix @ egopose_translation)),
np.array([0, 0, 0, 1])
])
# From egopose to sensor
sensor_sample = self.nusc.get('calibrated_sensor', camera_sample['calibrated_sensor_token'])
intrinsic = torch.Tensor(sensor_sample['camera_intrinsic'])
sensor_rotation = Quaternion(sensor_sample['rotation'])
sensor_translation = np.array(sensor_sample['translation'])[:, None]
car_egopose_to_sensor = np.vstack([
np.hstack((sensor_rotation.rotation_matrix, sensor_translation)),
np.array([0, 0, 0, 1])
])
car_egopose_to_sensor = np.linalg.inv(car_egopose_to_sensor)
# Combine all the transformation.
# From sensor to lidar.
lidar_to_sensor = car_egopose_to_sensor @ world_to_car_egopose @ lidar_to_world
sensor_to_lidar = torch.from_numpy(np.linalg.inv(lidar_to_sensor)).float()
# Load image
image_filename = os.path.join(self.dataroot, camera_sample['filename'])
img = Image.open(image_filename)
# Resize and crop
img = resize_and_crop_image(
img, resize_dims=self.augmentation_parameters['resize_dims'], crop=self.augmentation_parameters['crop']
)
# Normalise image
normalised_img = self.normalise_image(img)
# Combine resize/cropping in the intrinsics
top_crop = self.augmentation_parameters['crop'][1]
left_crop = self.augmentation_parameters['crop'][0]
intrinsic = update_intrinsics(
intrinsic, top_crop, left_crop,
scale_width=self.augmentation_parameters['scale_width'],
scale_height=self.augmentation_parameters['scale_height']
)
images.append(normalised_img.unsqueeze(0).unsqueeze(0))
intrinsics.append(intrinsic.unsqueeze(0).unsqueeze(0))
extrinsics.append(sensor_to_lidar.unsqueeze(0).unsqueeze(0))
images, intrinsics, extrinsics = (torch.cat(images, dim=1),
torch.cat(intrinsics, dim=1),
torch.cat(extrinsics, dim=1)
)
return images, intrinsics, extrinsics
def _get_top_lidar_pose(self, rec):
egopose = self.nusc.get('ego_pose', self.nusc.get('sample_data', rec['data']['LIDAR_TOP'])['ego_pose_token'])
trans = -np.array(egopose['translation'])
yaw = Quaternion(egopose['rotation']).yaw_pitch_roll[0]
rot = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse
return trans, rot
def get_birds_eye_view_label(self, rec, instance_map):
translation, rotation = self._get_top_lidar_pose(rec)
segmentation = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
# Background is ID 0
instance = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
z_position = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
attribute_label = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
for annotation_token in rec['anns']:
# Filter out all non vehicle instances
annotation = self.nusc.get('sample_annotation', annotation_token)
if not self.is_lyft:
# NuScenes filter
if 'vehicle' not in annotation['category_name']:
continue
if self.cfg.DATASET.FILTER_INVISIBLE_VEHICLES and int(annotation['visibility_token']) == 1:
continue
else:
# Lyft filter
if annotation['category_name'] not in ['bus', 'car', 'construction_vehicle', 'trailer', 'truck']:
continue
if annotation['instance_token'] not in instance_map:
instance_map[annotation['instance_token']] = len(instance_map) + 1
instance_id = instance_map[annotation['instance_token']]
if not self.is_lyft:
instance_attribute = int(annotation['visibility_token'])
else:
instance_attribute = 0
poly_region, z = self._get_poly_region_in_image(annotation, translation, rotation)
cv2.fillPoly(instance, [poly_region], instance_id)
cv2.fillPoly(segmentation, [poly_region], 1.0)
cv2.fillPoly(z_position, [poly_region], z)
cv2.fillPoly(attribute_label, [poly_region], instance_attribute)
return segmentation, instance, z_position, instance_map, attribute_label
def _get_poly_region_in_image(self, instance_annotation, ego_translation, ego_rotation):
box = Box(
instance_annotation['translation'], instance_annotation['size'], Quaternion(instance_annotation['rotation'])
)
box.translate(ego_translation)
box.rotate(ego_rotation)
pts = box.bottom_corners()[:2].T
pts = np.round((pts - self.bev_start_position[:2] + self.bev_resolution[:2] / 2.0) / self.bev_resolution[:2]).astype(np.int32)
pts[:, [1, 0]] = pts[:, [0, 1]]
z = box.bottom_corners()[2, 0]
return pts, z
def get_label(self, rec, instance_map):
segmentation_np, instance_np, z_position_np, instance_map, attribute_label_np = \
self.get_birds_eye_view_label(rec, instance_map)
segmentation = torch.from_numpy(segmentation_np).long().unsqueeze(0).unsqueeze(0)
instance = torch.from_numpy(instance_np).long().unsqueeze(0)
z_position = torch.from_numpy(z_position_np).float().unsqueeze(0).unsqueeze(0)
attribute_label = torch.from_numpy(attribute_label_np).long().unsqueeze(0).unsqueeze(0)
return segmentation, instance, z_position, instance_map, attribute_label
def get_future_egomotion(self, rec, index):
rec_t0 = rec
# Identity
future_egomotion = np.eye(4, dtype=np.float32)
if index < len(self.ixes) - 1:
rec_t1 = self.ixes[index + 1]
if rec_t0['scene_token'] == rec_t1['scene_token']:
egopose_t0 = self.nusc.get(
'ego_pose', self.nusc.get('sample_data', rec_t0['data']['LIDAR_TOP'])['ego_pose_token']
)
egopose_t1 = self.nusc.get(
'ego_pose', self.nusc.get('sample_data', rec_t1['data']['LIDAR_TOP'])['ego_pose_token']
)
egopose_t0 = convert_egopose_to_matrix_numpy(egopose_t0)
egopose_t1 = convert_egopose_to_matrix_numpy(egopose_t1)
future_egomotion = invert_matrix_egopose_numpy(egopose_t1).dot(egopose_t0)
future_egomotion[3, :3] = 0.0
future_egomotion[3, 3] = 1.0
future_egomotion = torch.Tensor(future_egomotion).float()
# Convert to 6DoF vector
future_egomotion = mat2pose_vec(future_egomotion)
return future_egomotion.unsqueeze(0)
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
"""
Returns
-------
data: dict with the following keys:
image: torch.Tensor<float> (T, N, 3, H, W)
normalised cameras images with T the sequence length, and N the number of cameras.
intrinsics: torch.Tensor<float> (T, N, 3, 3)
intrinsics containing resizing and cropping parameters.
extrinsics: torch.Tensor<float> (T, N, 4, 4)
6 DoF pose from world coordinates to camera coordinates.
segmentation: torch.Tensor<int64> (T, 1, H_bev, W_bev)
(H_bev, W_bev) are the pixel dimensions in bird's-eye view.
instance: torch.Tensor<int64> (T, 1, H_bev, W_bev)
centerness: torch.Tensor<float> (T, 1, H_bev, W_bev)
offset: torch.Tensor<float> (T, 2, H_bev, W_bev)
flow: torch.Tensor<float> (T, 2, H_bev, W_bev)
future_egomotion: torch.Tensor<float> (T, 6)
6 DoF egomotion t -> t+1
sample_token: List<str> (T,)
'z_position': list_z_position,
'attribute': list_attribute_label,
"""
data = {}
keys = ['image', 'intrinsics', 'extrinsics',
'segmentation', 'instance', 'centerness', 'offset', 'flow', 'future_egomotion',
'sample_token',
'z_position', 'attribute'
]
for key in keys:
data[key] = []
instance_map = {}
# Loop over all the frames in the sequence.
for index_t in self.indices[index]:
rec = self.ixes[index_t]
images, intrinsics, extrinsics = self.get_input_data(rec)
segmentation, instance, z_position, instance_map, attribute_label = self.get_label(rec, instance_map)
future_egomotion = self.get_future_egomotion(rec, index_t)
data['image'].append(images)
data['intrinsics'].append(intrinsics)
data['extrinsics'].append(extrinsics)
data['segmentation'].append(segmentation)
data['instance'].append(instance)
data['future_egomotion'].append(future_egomotion)
data['sample_token'].append(rec['token'])
data['z_position'].append(z_position)
data['attribute'].append(attribute_label)
for key, value in data.items():
if key in ['sample_token', 'centerness', 'offset', 'flow']:
continue
data[key] = torch.cat(value, dim=0)
# If lyft need to subsample, and update future_egomotions
if self.cfg.MODEL.SUBSAMPLE:
for key, value in data.items():
if key in ['future_egomotion', 'sample_token', 'centerness', 'offset', 'flow']:
continue
data[key] = data[key][::2].clone()
data['sample_token'] = data['sample_token'][::2]
# Update future egomotions
future_egomotions_matrix = pose_vec2mat(data['future_egomotion'])
future_egomotion_accum = torch.zeros_like(future_egomotions_matrix)
future_egomotion_accum[:-1] = future_egomotions_matrix[:-1] @ future_egomotions_matrix[1:]
future_egomotion_accum = mat2pose_vec(future_egomotion_accum)
data['future_egomotion'] = future_egomotion_accum[::2].clone()
instance_centerness, instance_offset, instance_flow = convert_instance_mask_to_center_and_offset_label(
data['instance'], data['future_egomotion'],
num_instances=len(instance_map), ignore_index=self.cfg.DATASET.IGNORE_INDEX, subtract_egomotion=True,
spatial_extent=self.spatial_extent,
)
data['centerness'] = instance_centerness
data['offset'] = instance_offset
data['flow'] = instance_flow
return data
def prepare_dataloaders(cfg, return_dataset=False):
version = cfg.DATASET.VERSION
train_on_training_data = True
if cfg.DATASET.NAME == 'nuscenes':
# 28130 train and 6019 val
dataroot = os.path.join(cfg.DATASET.DATAROOT, version)
nusc = NuScenes(version='v1.0-{}'.format(cfg.DATASET.VERSION), dataroot=dataroot, verbose=False)
elif cfg.DATASET.NAME == 'lyft':
# train contains 22680 samples
# we split in 16506 6174
dataroot = os.path.join(cfg.DATASET.DATAROOT, 'trainval')
nusc = LyftDataset(data_path=dataroot,
json_path=os.path.join(dataroot, 'train_data'),
verbose=True)
traindata = FuturePredictionDataset(nusc, train_on_training_data, cfg)
valdata = FuturePredictionDataset(nusc, False, cfg)
if cfg.DATASET.VERSION == 'mini':
traindata.indices = traindata.indices[:10]
valdata.indices = valdata.indices[:10]
nworkers = cfg.N_WORKERS
trainloader = torch.utils.data.DataLoader(
traindata, batch_size=cfg.BATCHSIZE, shuffle=True, num_workers=nworkers, pin_memory=True, drop_last=True
)
valloader = torch.utils.data.DataLoader(
valdata, batch_size=cfg.BATCHSIZE, shuffle=False, num_workers=nworkers, pin_memory=True, drop_last=False)
if return_dataset:
return trainloader, valloader, traindata, valdata
else:
return trainloader, valloader
| 19,735
| 41.62635
| 136
|
py
|
fiery
|
fiery-master/fiery/config.py
|
import argparse
from fvcore.common.config import CfgNode as _CfgNode
def convert_to_dict(cfg_node, key_list=[]):
"""Convert a config node to dictionary."""
_VALID_TYPES = {tuple, list, str, int, float, bool}
if not isinstance(cfg_node, _CfgNode):
if type(cfg_node) not in _VALID_TYPES:
print(
'Key {} with value {} is not a valid type; valid types: {}'.format(
'.'.join(key_list), type(cfg_node), _VALID_TYPES
),
)
return cfg_node
else:
cfg_dict = dict(cfg_node)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_to_dict(v, key_list + [k])
return cfg_dict
class CfgNode(_CfgNode):
"""Remove once https://github.com/rbgirshick/yacs/issues/19 is merged."""
def convert_to_dict(self):
return convert_to_dict(self)
CN = CfgNode
_C = CN()
_C.LOG_DIR = 'tensorboard_logs'
_C.TAG = 'default'
_C.GPUS = [0] # gpus to use
_C.PRECISION = 32 # 16bit or 32bit
_C.BATCHSIZE = 3
_C.EPOCHS = 20
_C.N_WORKERS = 5
_C.VIS_INTERVAL = 5000
_C.LOGGING_INTERVAL = 500
_C.PRETRAINED = CN()
_C.PRETRAINED.LOAD_WEIGHTS = False
_C.PRETRAINED.PATH = ''
_C.DATASET = CN()
_C.DATASET.DATAROOT = './nuscenes/'
_C.DATASET.VERSION = 'trainval'
_C.DATASET.NAME = 'nuscenes'
_C.DATASET.IGNORE_INDEX = 255 # Ignore index when creating flow/offset labels
_C.DATASET.FILTER_INVISIBLE_VEHICLES = True # Filter vehicles that are not visible from the cameras
_C.TIME_RECEPTIVE_FIELD = 3 # how many frames of temporal context (1 for single timeframe)
_C.N_FUTURE_FRAMES = 4 # how many time steps into the future to predict
_C.IMAGE = CN()
_C.IMAGE.FINAL_DIM = (224, 480)
_C.IMAGE.RESIZE_SCALE = 0.3
_C.IMAGE.TOP_CROP = 46
_C.IMAGE.ORIGINAL_HEIGHT = 900 # Original input RGB camera height
_C.IMAGE.ORIGINAL_WIDTH = 1600 # Original input RGB camera width
_C.IMAGE.NAMES = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
_C.LIFT = CN() # image to BEV lifting
_C.LIFT.X_BOUND = [-50.0, 50.0, 0.5] # Forward
_C.LIFT.Y_BOUND = [-50.0, 50.0, 0.5] # Sides
_C.LIFT.Z_BOUND = [-10.0, 10.0, 20.0] # Height
_C.LIFT.D_BOUND = [2.0, 50.0, 1.0]
_C.MODEL = CN()
_C.MODEL.ENCODER = CN()
_C.MODEL.ENCODER.DOWNSAMPLE = 8
_C.MODEL.ENCODER.NAME = 'efficientnet-b4'
_C.MODEL.ENCODER.OUT_CHANNELS = 64
_C.MODEL.ENCODER.USE_DEPTH_DISTRIBUTION = True
_C.MODEL.TEMPORAL_MODEL = CN()
_C.MODEL.TEMPORAL_MODEL.NAME = 'temporal_block' # type of temporal model
_C.MODEL.TEMPORAL_MODEL.START_OUT_CHANNELS = 64
_C.MODEL.TEMPORAL_MODEL.EXTRA_IN_CHANNELS = 0
_C.MODEL.TEMPORAL_MODEL.INBETWEEN_LAYERS = 0
_C.MODEL.TEMPORAL_MODEL.PYRAMID_POOLING = True
_C.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE = True
_C.MODEL.DISTRIBUTION = CN()
_C.MODEL.DISTRIBUTION.LATENT_DIM = 32
_C.MODEL.DISTRIBUTION.MIN_LOG_SIGMA = -5.0
_C.MODEL.DISTRIBUTION.MAX_LOG_SIGMA = 5.0
_C.MODEL.FUTURE_PRED = CN()
_C.MODEL.FUTURE_PRED.N_GRU_BLOCKS = 3
_C.MODEL.FUTURE_PRED.N_RES_LAYERS = 3
_C.MODEL.DECODER = CN()
_C.MODEL.BN_MOMENTUM = 0.1
_C.MODEL.SUBSAMPLE = False # Subsample frames for Lyft
_C.SEMANTIC_SEG = CN()
_C.SEMANTIC_SEG.WEIGHTS = [1.0, 2.0] # per class cross entropy weights (bg, dynamic)
_C.SEMANTIC_SEG.USE_TOP_K = True # backprop only top-k hardest pixels
_C.SEMANTIC_SEG.TOP_K_RATIO = 0.25
_C.INSTANCE_SEG = CN()
_C.INSTANCE_FLOW = CN()
_C.INSTANCE_FLOW.ENABLED = True
_C.PROBABILISTIC = CN()
_C.PROBABILISTIC.ENABLED = True # learn a distribution over futures
_C.PROBABILISTIC.WEIGHT = 100.0
_C.PROBABILISTIC.FUTURE_DIM = 6 # number of dimension added (future flow, future centerness, offset, seg)
_C.FUTURE_DISCOUNT = 0.95
_C.OPTIMIZER = CN()
_C.OPTIMIZER.LR = 3e-4
_C.OPTIMIZER.WEIGHT_DECAY = 1e-7
_C.GRAD_NORM_CLIP = 5
def get_parser():
parser = argparse.ArgumentParser(description='Fiery training')
# TODO: remove below?
parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file')
parser.add_argument(
'opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER,
)
return parser
def get_cfg(args=None, cfg_dict=None):
""" First get default config. Then merge cfg_dict. Then merge according to args. """
cfg = _C.clone()
if cfg_dict is not None:
cfg.merge_from_other_cfg(CfgNode(cfg_dict))
if args is not None:
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
| 4,568
| 29.46
| 114
|
py
|
fiery
|
fiery-master/fiery/metrics.py
|
from typing import Optional
import torch
from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.metrics.functional.classification import stat_scores_multiple_classes
from pytorch_lightning.metrics.functional.reduction import reduce
class IntersectionOverUnion(Metric):
"""Computes intersection-over-union."""
def __init__(
self,
n_classes: int,
ignore_index: Optional[int] = None,
absent_score: float = 0.0,
reduction: str = 'none',
compute_on_step: bool = False,
):
super().__init__(compute_on_step=compute_on_step)
self.n_classes = n_classes
self.ignore_index = ignore_index
self.absent_score = absent_score
self.reduction = reduction
self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('support', default=torch.zeros(n_classes), dist_reduce_fx='sum')
def update(self, prediction: torch.Tensor, target: torch.Tensor):
tps, fps, _, fns, sups = stat_scores_multiple_classes(prediction, target, self.n_classes)
self.true_positive += tps
self.false_positive += fps
self.false_negative += fns
self.support += sups
def compute(self):
scores = torch.zeros(self.n_classes, device=self.true_positive.device, dtype=torch.float32)
for class_idx in range(self.n_classes):
if class_idx == self.ignore_index:
continue
tp = self.true_positive[class_idx]
fp = self.false_positive[class_idx]
fn = self.false_negative[class_idx]
sup = self.support[class_idx]
# If this class is absent in the target (no support) AND absent in the pred (no true or false
# positives), then use the absent_score for this class.
if sup + tp + fp == 0:
scores[class_idx] = self.absent_score
continue
denominator = tp + fp + fn
score = tp.to(torch.float) / denominator
scores[class_idx] = score
# Remove the ignored class index from the scores.
if (self.ignore_index is not None) and (0 <= self.ignore_index < self.n_classes):
scores = torch.cat([scores[:self.ignore_index], scores[self.ignore_index+1:]])
return reduce(scores, reduction=self.reduction)
class PanopticMetric(Metric):
def __init__(
self,
n_classes: int,
temporally_consistent: bool = True,
vehicles_id: int = 1,
compute_on_step: bool = False,
):
super().__init__(compute_on_step=compute_on_step)
self.n_classes = n_classes
self.temporally_consistent = temporally_consistent
self.vehicles_id = vehicles_id
self.keys = ['iou', 'true_positive', 'false_positive', 'false_negative']
self.add_state('iou', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum')
def update(self, pred_instance, gt_instance):
"""
Update state with predictions and targets.
Parameters
----------
pred_instance: (b, s, h, w)
Temporally consistent instance segmentation prediction.
gt_instance: (b, s, h, w)
Ground truth instance segmentation.
"""
batch_size, sequence_length = gt_instance.shape[:2]
# Process labels
assert gt_instance.min() == 0, 'ID 0 of gt_instance must be background'
pred_segmentation = (pred_instance > 0).long()
gt_segmentation = (gt_instance > 0).long()
for b in range(batch_size):
unique_id_mapping = {}
for t in range(sequence_length):
result = self.panoptic_metrics(
pred_segmentation[b, t].detach(),
pred_instance[b, t].detach(),
gt_segmentation[b, t],
gt_instance[b, t],
unique_id_mapping,
)
self.iou += result['iou']
self.true_positive += result['true_positive']
self.false_positive += result['false_positive']
self.false_negative += result['false_negative']
def compute(self):
denominator = torch.maximum(
(self.true_positive + self.false_positive / 2 + self.false_negative / 2),
torch.ones_like(self.true_positive)
)
pq = self.iou / denominator
sq = self.iou / torch.maximum(self.true_positive, torch.ones_like(self.true_positive))
rq = self.true_positive / denominator
return {'pq': pq,
'sq': sq,
'rq': rq,
# If 0, it means there wasn't any detection.
'denominator': (self.true_positive + self.false_positive / 2 + self.false_negative / 2),
}
def panoptic_metrics(self, pred_segmentation, pred_instance, gt_segmentation, gt_instance, unique_id_mapping):
"""
Computes panoptic quality metric components.
Parameters
----------
pred_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void)
pred_instance: [H, W] range {0, ..., n_instances} (zero means background)
gt_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void)
gt_instance: [H, W] range {0, ..., n_instances} (zero means background)
unique_id_mapping: instance id mapping to check consistency
"""
n_classes = self.n_classes
result = {key: torch.zeros(n_classes, dtype=torch.float32, device=gt_instance.device) for key in self.keys}
assert pred_segmentation.dim() == 2
assert pred_segmentation.shape == pred_instance.shape == gt_segmentation.shape == gt_instance.shape
n_instances = int(torch.cat([pred_instance, gt_instance]).max().item())
n_all_things = n_instances + n_classes # Classes + instances.
n_things_and_void = n_all_things + 1
# Now 1 is background; 0 is void (not used). 2 is vehicle semantic class but since it overlaps with
# instances, it is not present.
# and the rest are instance ids starting from 3
prediction, pred_to_cls = self.combine_mask(pred_segmentation, pred_instance, n_classes, n_all_things)
target, target_to_cls = self.combine_mask(gt_segmentation, gt_instance, n_classes, n_all_things)
# Compute ious between all stuff and things
# hack for bincounting 2 arrays together
x = prediction + n_things_and_void * target
bincount_2d = torch.bincount(x.long(), minlength=n_things_and_void ** 2)
if bincount_2d.shape[0] != n_things_and_void ** 2:
raise ValueError('Incorrect bincount size.')
conf = bincount_2d.reshape((n_things_and_void, n_things_and_void))
# Drop void class
conf = conf[1:, 1:]
# Confusion matrix contains intersections between all combinations of classes
union = conf.sum(0).unsqueeze(0) + conf.sum(1).unsqueeze(1) - conf
iou = torch.where(union > 0, (conf.float() + 1e-9) / (union.float() + 1e-9), torch.zeros_like(union).float())
# In the iou matrix, first dimension is target idx, second dimension is pred idx.
# Mapping will contain a tuple that maps prediction idx to target idx for segments matched by iou.
mapping = (iou > 0.5).nonzero(as_tuple=False)
# Check that classes match.
is_matching = pred_to_cls[mapping[:, 1]] == target_to_cls[mapping[:, 0]]
mapping = mapping[is_matching]
tp_mask = torch.zeros_like(conf, dtype=torch.bool)
tp_mask[mapping[:, 0], mapping[:, 1]] = True
# First ids correspond to "stuff" i.e. semantic seg.
# Instance ids are offset accordingly
for target_id, pred_id in mapping:
cls_id = pred_to_cls[pred_id]
if self.temporally_consistent and cls_id == self.vehicles_id:
if target_id.item() in unique_id_mapping and unique_id_mapping[target_id.item()] != pred_id.item():
# Not temporally consistent
result['false_negative'][target_to_cls[target_id]] += 1
result['false_positive'][pred_to_cls[pred_id]] += 1
unique_id_mapping[target_id.item()] = pred_id.item()
continue
result['true_positive'][cls_id] += 1
result['iou'][cls_id] += iou[target_id][pred_id]
unique_id_mapping[target_id.item()] = pred_id.item()
for target_id in range(n_classes, n_all_things):
# If this is a true positive do nothing.
if tp_mask[target_id, n_classes:].any():
continue
# If this target instance didn't match with any predictions and was present set it as false negative.
if target_to_cls[target_id] != -1:
result['false_negative'][target_to_cls[target_id]] += 1
for pred_id in range(n_classes, n_all_things):
# If this is a true positive do nothing.
if tp_mask[n_classes:, pred_id].any():
continue
# If this predicted instance didn't match with any prediction, set that predictions as false positive.
if pred_to_cls[pred_id] != -1 and (conf[:, pred_id] > 0).any():
result['false_positive'][pred_to_cls[pred_id]] += 1
return result
def combine_mask(self, segmentation: torch.Tensor, instance: torch.Tensor, n_classes: int, n_all_things: int):
"""Shifts all things ids by num_classes and combines things and stuff into a single mask
Returns a combined mask + a mapping from id to segmentation class.
"""
instance = instance.view(-1)
instance_mask = instance > 0
instance = instance - 1 + n_classes
segmentation = segmentation.clone().view(-1)
segmentation_mask = segmentation < n_classes # Remove void pixels.
# Build an index from instance id to class id.
instance_id_to_class_tuples = torch.cat(
(
instance[instance_mask & segmentation_mask].unsqueeze(1),
segmentation[instance_mask & segmentation_mask].unsqueeze(1),
),
dim=1,
)
instance_id_to_class = -instance_id_to_class_tuples.new_ones((n_all_things,))
instance_id_to_class[instance_id_to_class_tuples[:, 0]] = instance_id_to_class_tuples[:, 1]
instance_id_to_class[torch.arange(n_classes, device=segmentation.device)] = torch.arange(
n_classes, device=segmentation.device
)
segmentation[instance_mask] = instance[instance_mask]
segmentation += 1 # Shift all legit classes by 1.
segmentation[~segmentation_mask] = 0 # Shift void class to zero.
return segmentation, instance_id_to_class
| 11,415
| 43.59375
| 117
|
py
|
fiery
|
fiery-master/fiery/trainer.py
|
import torch
import torch.nn as nn
import pytorch_lightning as pl
from fiery.config import get_cfg
from fiery.models.fiery import Fiery
from fiery.losses import ProbabilisticLoss, SpatialRegressionLoss, SegmentationLoss
from fiery.metrics import IntersectionOverUnion, PanopticMetric
from fiery.utils.geometry import cumulative_warp_features_reverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import visualise_output
class TrainingModule(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
# see config.py for details
self.hparams = hparams
# pytorch lightning does not support saving YACS CfgNone
cfg = get_cfg(cfg_dict=self.hparams)
self.cfg = cfg
self.n_classes = len(self.cfg.SEMANTIC_SEG.WEIGHTS)
# Bird's-eye view extent in meters
assert self.cfg.LIFT.X_BOUND[1] > 0 and self.cfg.LIFT.Y_BOUND[1] > 0
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
# Model
self.model = Fiery(cfg)
# Losses
self.losses_fn = nn.ModuleDict()
self.losses_fn['segmentation'] = SegmentationLoss(
class_weights=torch.Tensor(self.cfg.SEMANTIC_SEG.WEIGHTS),
use_top_k=self.cfg.SEMANTIC_SEG.USE_TOP_K,
top_k_ratio=self.cfg.SEMANTIC_SEG.TOP_K_RATIO,
future_discount=self.cfg.FUTURE_DISCOUNT,
)
# Uncertainty weighting
self.model.segmentation_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.metric_iou_val = IntersectionOverUnion(self.n_classes)
self.losses_fn['instance_center'] = SpatialRegressionLoss(
norm=2, future_discount=self.cfg.FUTURE_DISCOUNT
)
self.losses_fn['instance_offset'] = SpatialRegressionLoss(
norm=1, future_discount=self.cfg.FUTURE_DISCOUNT, ignore_index=self.cfg.DATASET.IGNORE_INDEX
)
# Uncertainty weighting
self.model.centerness_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.model.offset_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.metric_panoptic_val = PanopticMetric(n_classes=self.n_classes)
if self.cfg.INSTANCE_FLOW.ENABLED:
self.losses_fn['instance_flow'] = SpatialRegressionLoss(
norm=1, future_discount=self.cfg.FUTURE_DISCOUNT, ignore_index=self.cfg.DATASET.IGNORE_INDEX
)
# Uncertainty weighting
self.model.flow_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
if self.cfg.PROBABILISTIC.ENABLED:
self.losses_fn['probabilistic'] = ProbabilisticLoss()
self.training_step_count = 0
def shared_step(self, batch, is_train):
image = batch['image']
intrinsics = batch['intrinsics']
extrinsics = batch['extrinsics']
future_egomotion = batch['future_egomotion']
# Warp labels
labels, future_distribution_inputs = self.prepare_future_labels(batch)
# Forward pass
output = self.model(
image, intrinsics, extrinsics, future_egomotion, future_distribution_inputs
)
#####
# Loss computation
#####
loss = {}
segmentation_factor = 1 / torch.exp(self.model.segmentation_weight)
loss['segmentation'] = segmentation_factor * self.losses_fn['segmentation'](
output['segmentation'], labels['segmentation']
)
loss['segmentation_uncertainty'] = 0.5 * self.model.segmentation_weight
centerness_factor = 1 / (2*torch.exp(self.model.centerness_weight))
loss['instance_center'] = centerness_factor * self.losses_fn['instance_center'](
output['instance_center'], labels['centerness']
)
offset_factor = 1 / (2*torch.exp(self.model.offset_weight))
loss['instance_offset'] = offset_factor * self.losses_fn['instance_offset'](
output['instance_offset'], labels['offset']
)
loss['centerness_uncertainty'] = 0.5 * self.model.centerness_weight
loss['offset_uncertainty'] = 0.5 * self.model.offset_weight
if self.cfg.INSTANCE_FLOW.ENABLED:
flow_factor = 1 / (2*torch.exp(self.model.flow_weight))
loss['instance_flow'] = flow_factor * self.losses_fn['instance_flow'](
output['instance_flow'], labels['flow']
)
loss['flow_uncertainty'] = 0.5 * self.model.flow_weight
if self.cfg.PROBABILISTIC.ENABLED:
loss['probabilistic'] = self.cfg.PROBABILISTIC.WEIGHT * self.losses_fn['probabilistic'](output)
# Metrics
if not is_train:
seg_prediction = output['segmentation'].detach()
seg_prediction = torch.argmax(seg_prediction, dim=2, keepdims=True)
self.metric_iou_val(seg_prediction, labels['segmentation'])
pred_consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False
)
self.metric_panoptic_val(pred_consistent_instance_seg, labels['instance'])
return output, labels, loss
def prepare_future_labels(self, batch):
labels = {}
future_distribution_inputs = []
segmentation_labels = batch['segmentation']
instance_center_labels = batch['centerness']
instance_offset_labels = batch['offset']
instance_flow_labels = batch['flow']
gt_instance = batch['instance']
future_egomotion = batch['future_egomotion']
# Warp labels to present's reference frame
segmentation_labels = cumulative_warp_features_reverse(
segmentation_labels[:, (self.model.receptive_field - 1):].float(),
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).long().contiguous()
labels['segmentation'] = segmentation_labels
future_distribution_inputs.append(segmentation_labels)
# Warp instance labels to present's reference frame
gt_instance = cumulative_warp_features_reverse(
gt_instance[:, (self.model.receptive_field - 1):].float().unsqueeze(2),
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).long().contiguous()[:, :, 0]
labels['instance'] = gt_instance
instance_center_labels = cumulative_warp_features_reverse(
instance_center_labels[:, (self.model.receptive_field - 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['centerness'] = instance_center_labels
instance_offset_labels = cumulative_warp_features_reverse(
instance_offset_labels[:, (self.model.receptive_field- 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['offset'] = instance_offset_labels
future_distribution_inputs.append(instance_center_labels)
future_distribution_inputs.append(instance_offset_labels)
if self.cfg.INSTANCE_FLOW.ENABLED:
instance_flow_labels = cumulative_warp_features_reverse(
instance_flow_labels[:, (self.model.receptive_field - 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['flow'] = instance_flow_labels
future_distribution_inputs.append(instance_flow_labels)
if len(future_distribution_inputs) > 0:
future_distribution_inputs = torch.cat(future_distribution_inputs, dim=2)
return labels, future_distribution_inputs
def visualise(self, labels, output, batch_idx, prefix='train'):
visualisation_video = visualise_output(labels, output, self.cfg)
name = f'{prefix}_outputs'
if prefix == 'val':
name = name + f'_{batch_idx}'
self.logger.experiment.add_video(name, visualisation_video, global_step=self.training_step_count, fps=2)
def training_step(self, batch, batch_idx):
output, labels, loss = self.shared_step(batch, True)
self.training_step_count += 1
for key, value in loss.items():
self.logger.experiment.add_scalar(key, value, global_step=self.training_step_count)
if self.training_step_count % self.cfg.VIS_INTERVAL == 0:
self.visualise(labels, output, batch_idx, prefix='train')
return sum(loss.values())
def validation_step(self, batch, batch_idx):
output, labels, loss = self.shared_step(batch, False)
for key, value in loss.items():
self.log('val_' + key, value)
if batch_idx == 0:
self.visualise(labels, output, batch_idx, prefix='val')
def shared_epoch_end(self, step_outputs, is_train):
# log per class iou metrics
class_names = ['background', 'dynamic']
if not is_train:
scores = self.metric_iou_val.compute()
for key, value in zip(class_names, scores):
self.logger.experiment.add_scalar('val_iou_' + key, value, global_step=self.training_step_count)
self.metric_iou_val.reset()
if not is_train:
scores = self.metric_panoptic_val.compute()
for key, value in scores.items():
for instance_name, score in zip(['background', 'vehicles'], value):
if instance_name != 'background':
self.logger.experiment.add_scalar(f'val_{key}_{instance_name}', score.item(),
global_step=self.training_step_count)
self.metric_panoptic_val.reset()
self.logger.experiment.add_scalar('segmentation_weight',
1 / (torch.exp(self.model.segmentation_weight)),
global_step=self.training_step_count)
self.logger.experiment.add_scalar('centerness_weight',
1 / (2 * torch.exp(self.model.centerness_weight)),
global_step=self.training_step_count)
self.logger.experiment.add_scalar('offset_weight', 1 / (2 * torch.exp(self.model.offset_weight)),
global_step=self.training_step_count)
if self.cfg.INSTANCE_FLOW.ENABLED:
self.logger.experiment.add_scalar('flow_weight', 1 / (2 * torch.exp(self.model.flow_weight)),
global_step=self.training_step_count)
def training_epoch_end(self, step_outputs):
self.shared_epoch_end(step_outputs, True)
def validation_epoch_end(self, step_outputs):
self.shared_epoch_end(step_outputs, False)
def configure_optimizers(self):
params = self.model.parameters()
optimizer = torch.optim.Adam(
params, lr=self.cfg.OPTIMIZER.LR, weight_decay=self.cfg.OPTIMIZER.WEIGHT_DECAY
)
return optimizer
| 11,419
| 42.754789
| 112
|
py
|
fiery
|
fiery-master/fiery/models/distributions.py
|
import torch
import torch.nn as nn
from fiery.layers.convolutions import Bottleneck
class DistributionModule(nn.Module):
"""
A convolutional net that parametrises a diagonal Gaussian distribution.
"""
def __init__(
self, in_channels, latent_dim, min_log_sigma, max_log_sigma):
super().__init__()
self.compress_dim = in_channels // 2
self.latent_dim = latent_dim
self.min_log_sigma = min_log_sigma
self.max_log_sigma = max_log_sigma
self.encoder = DistributionEncoder(
in_channels,
self.compress_dim,
)
self.last_conv = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(self.compress_dim, out_channels=2 * self.latent_dim, kernel_size=1)
)
def forward(self, s_t):
b, s = s_t.shape[:2]
assert s == 1
encoding = self.encoder(s_t[:, 0])
mu_log_sigma = self.last_conv(encoding).view(b, 1, 2 * self.latent_dim)
mu = mu_log_sigma[:, :, :self.latent_dim]
log_sigma = mu_log_sigma[:, :, self.latent_dim:]
# clip the log_sigma value for numerical stability
log_sigma = torch.clamp(log_sigma, self.min_log_sigma, self.max_log_sigma)
return mu, log_sigma
class DistributionEncoder(nn.Module):
"""Encodes s_t or (s_t, y_{t+1}, ..., y_{t+H}).
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.model = nn.Sequential(
Bottleneck(in_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
)
def forward(self, s_t):
return self.model(s_t)
| 1,871
| 31.842105
| 114
|
py
|
fiery
|
fiery-master/fiery/models/future_prediction.py
|
import torch
from fiery.layers.convolutions import Bottleneck
from fiery.layers.temporal import SpatialGRU
class FuturePrediction(torch.nn.Module):
def __init__(self, in_channels, latent_dim, n_gru_blocks=3, n_res_layers=3):
super().__init__()
self.n_gru_blocks = n_gru_blocks
# Convolutional recurrent model with z_t as an initial hidden state and inputs the sample
# from the probabilistic model. The architecture of the model is:
# [Spatial GRU - [Bottleneck] x n_res_layers] x n_gru_blocks
self.spatial_grus = []
self.res_blocks = []
for i in range(self.n_gru_blocks):
gru_in_channels = latent_dim if i == 0 else in_channels
self.spatial_grus.append(SpatialGRU(gru_in_channels, in_channels))
self.res_blocks.append(torch.nn.Sequential(*[Bottleneck(in_channels)
for _ in range(n_res_layers)]))
self.spatial_grus = torch.nn.ModuleList(self.spatial_grus)
self.res_blocks = torch.nn.ModuleList(self.res_blocks)
def forward(self, x, hidden_state):
# x has shape (b, n_future, c, h, w), hidden_state (b, c, h, w)
for i in range(self.n_gru_blocks):
x = self.spatial_grus[i](x, hidden_state, flow=None)
b, n_future, c, h, w = x.shape
x = self.res_blocks[i](x.view(b * n_future, c, h, w))
x = x.view(b, n_future, c, h, w)
return x
| 1,488
| 39.243243
| 97
|
py
|
fiery
|
fiery-master/fiery/models/fiery.py
|
import torch
import torch.nn as nn
from fiery.models.encoder import Encoder
from fiery.models.temporal_model import TemporalModelIdentity, TemporalModel
from fiery.models.distributions import DistributionModule
from fiery.models.future_prediction import FuturePrediction
from fiery.models.decoder import Decoder
from fiery.utils.network import pack_sequence_dim, unpack_sequence_dim, set_bn_momentum
from fiery.utils.geometry import cumulative_warp_features, calculate_birds_eye_view_parameters, VoxelsSumming
class Fiery(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
bev_resolution, bev_start_position, bev_dimension = calculate_birds_eye_view_parameters(
self.cfg.LIFT.X_BOUND, self.cfg.LIFT.Y_BOUND, self.cfg.LIFT.Z_BOUND
)
self.bev_resolution = nn.Parameter(bev_resolution, requires_grad=False)
self.bev_start_position = nn.Parameter(bev_start_position, requires_grad=False)
self.bev_dimension = nn.Parameter(bev_dimension, requires_grad=False)
self.encoder_downsample = self.cfg.MODEL.ENCODER.DOWNSAMPLE
self.encoder_out_channels = self.cfg.MODEL.ENCODER.OUT_CHANNELS
self.frustum = self.create_frustum()
self.depth_channels, _, _, _ = self.frustum.shape
if self.cfg.TIME_RECEPTIVE_FIELD == 1:
assert self.cfg.MODEL.TEMPORAL_MODEL.NAME == 'identity'
# temporal block
self.receptive_field = self.cfg.TIME_RECEPTIVE_FIELD
self.n_future = self.cfg.N_FUTURE_FRAMES
self.latent_dim = self.cfg.MODEL.DISTRIBUTION.LATENT_DIM
if self.cfg.MODEL.SUBSAMPLE:
assert self.cfg.DATASET.NAME == 'lyft'
self.receptive_field = 3
self.n_future = 5
# Spatial extent in bird's-eye view, in meters
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
self.bev_size = (self.bev_dimension[0].item(), self.bev_dimension[1].item())
# Encoder
self.encoder = Encoder(cfg=self.cfg.MODEL.ENCODER, D=self.depth_channels)
# Temporal model
temporal_in_channels = self.encoder_out_channels
if self.cfg.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE:
temporal_in_channels += 6
if self.cfg.MODEL.TEMPORAL_MODEL.NAME == 'identity':
self.temporal_model = TemporalModelIdentity(temporal_in_channels, self.receptive_field)
elif cfg.MODEL.TEMPORAL_MODEL.NAME == 'temporal_block':
self.temporal_model = TemporalModel(
temporal_in_channels,
self.receptive_field,
input_shape=self.bev_size,
start_out_channels=self.cfg.MODEL.TEMPORAL_MODEL.START_OUT_CHANNELS,
extra_in_channels=self.cfg.MODEL.TEMPORAL_MODEL.EXTRA_IN_CHANNELS,
n_spatial_layers_between_temporal_layers=self.cfg.MODEL.TEMPORAL_MODEL.INBETWEEN_LAYERS,
use_pyramid_pooling=self.cfg.MODEL.TEMPORAL_MODEL.PYRAMID_POOLING,
)
else:
raise NotImplementedError(f'Temporal module {self.cfg.MODEL.TEMPORAL_MODEL.NAME}.')
self.future_pred_in_channels = self.temporal_model.out_channels
if self.n_future > 0:
# probabilistic sampling
if self.cfg.PROBABILISTIC.ENABLED:
# Distribution networks
self.present_distribution = DistributionModule(
self.future_pred_in_channels,
self.latent_dim,
min_log_sigma=self.cfg.MODEL.DISTRIBUTION.MIN_LOG_SIGMA,
max_log_sigma=self.cfg.MODEL.DISTRIBUTION.MAX_LOG_SIGMA,
)
future_distribution_in_channels = (self.future_pred_in_channels
+ self.n_future * self.cfg.PROBABILISTIC.FUTURE_DIM
)
self.future_distribution = DistributionModule(
future_distribution_in_channels,
self.latent_dim,
min_log_sigma=self.cfg.MODEL.DISTRIBUTION.MIN_LOG_SIGMA,
max_log_sigma=self.cfg.MODEL.DISTRIBUTION.MAX_LOG_SIGMA,
)
# Future prediction
self.future_prediction = FuturePrediction(
in_channels=self.future_pred_in_channels,
latent_dim=self.latent_dim,
n_gru_blocks=self.cfg.MODEL.FUTURE_PRED.N_GRU_BLOCKS,
n_res_layers=self.cfg.MODEL.FUTURE_PRED.N_RES_LAYERS,
)
# Decoder
self.decoder = Decoder(
in_channels=self.future_pred_in_channels,
n_classes=len(self.cfg.SEMANTIC_SEG.WEIGHTS),
predict_future_flow=self.cfg.INSTANCE_FLOW.ENABLED,
)
set_bn_momentum(self, self.cfg.MODEL.BN_MOMENTUM)
def create_frustum(self):
# Create grid in image plane
h, w = self.cfg.IMAGE.FINAL_DIM
downsampled_h, downsampled_w = h // self.encoder_downsample, w // self.encoder_downsample
# Depth grid
depth_grid = torch.arange(*self.cfg.LIFT.D_BOUND, dtype=torch.float)
depth_grid = depth_grid.view(-1, 1, 1).expand(-1, downsampled_h, downsampled_w)
n_depth_slices = depth_grid.shape[0]
# x and y grids
x_grid = torch.linspace(0, w - 1, downsampled_w, dtype=torch.float)
x_grid = x_grid.view(1, 1, downsampled_w).expand(n_depth_slices, downsampled_h, downsampled_w)
y_grid = torch.linspace(0, h - 1, downsampled_h, dtype=torch.float)
y_grid = y_grid.view(1, downsampled_h, 1).expand(n_depth_slices, downsampled_h, downsampled_w)
# Dimension (n_depth_slices, downsampled_h, downsampled_w, 3)
# containing data points in the image: left-right, top-bottom, depth
frustum = torch.stack((x_grid, y_grid, depth_grid), -1)
return nn.Parameter(frustum, requires_grad=False)
def forward(self, image, intrinsics, extrinsics, future_egomotion, future_distribution_inputs=None, noise=None):
output = {}
# Only process features from the past and present
image = image[:, :self.receptive_field].contiguous()
intrinsics = intrinsics[:, :self.receptive_field].contiguous()
extrinsics = extrinsics[:, :self.receptive_field].contiguous()
future_egomotion = future_egomotion[:, :self.receptive_field].contiguous()
# Lifting features and project to bird's-eye view
x = self.calculate_birds_eye_view_features(image, intrinsics, extrinsics)
# Warp past features to the present's reference frame
x = cumulative_warp_features(
x.clone(), future_egomotion,
mode='bilinear', spatial_extent=self.spatial_extent,
)
if self.cfg.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE:
b, s, c = future_egomotion.shape
h, w = x.shape[-2:]
future_egomotions_spatial = future_egomotion.view(b, s, c, 1, 1).expand(b, s, c, h, w)
# at time 0, no egomotion so feed zero vector
future_egomotions_spatial = torch.cat([torch.zeros_like(future_egomotions_spatial[:, :1]),
future_egomotions_spatial[:, :(self.receptive_field-1)]], dim=1)
x = torch.cat([x, future_egomotions_spatial], dim=-3)
# Temporal model
states = self.temporal_model(x)
if self.n_future > 0:
present_state = states[:, :1].contiguous()
if self.cfg.PROBABILISTIC.ENABLED:
# Do probabilistic computation
sample, output_distribution = self.distribution_forward(
present_state, future_distribution_inputs, noise
)
output = {**output, **output_distribution}
# Prepare future prediction input
b, _, _, h, w = present_state.shape
hidden_state = present_state[:, 0]
if self.cfg.PROBABILISTIC.ENABLED:
future_prediction_input = sample.expand(-1, self.n_future, -1, -1, -1)
else:
future_prediction_input = hidden_state.new_zeros(b, self.n_future, self.latent_dim, h, w)
# Recursively predict future states
future_states = self.future_prediction(future_prediction_input, hidden_state)
# Concatenate present state
future_states = torch.cat([present_state, future_states], dim=1)
# Predict bird's-eye view outputs
if self.n_future > 0:
bev_output = self.decoder(future_states)
else:
bev_output = self.decoder(states[:, -1:])
output = {**output, **bev_output}
return output
def get_geometry(self, intrinsics, extrinsics):
"""Calculate the (x, y, z) 3D position of the features.
"""
rotation, translation = extrinsics[..., :3, :3], extrinsics[..., :3, 3]
B, N, _ = translation.shape
# Add batch, camera dimension, and a dummy dimension at the end
points = self.frustum.unsqueeze(0).unsqueeze(0).unsqueeze(-1)
# Camera to ego reference frame
points = torch.cat((points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3], points[:, :, :, :, :, 2:3]), 5)
combined_transformation = rotation.matmul(torch.inverse(intrinsics))
points = combined_transformation.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1)
points += translation.view(B, N, 1, 1, 1, 3)
# The 3 dimensions in the ego reference frame are: (forward, sides, height)
return points
def encoder_forward(self, x):
# batch, n_cameras, channels, height, width
b, n, c, h, w = x.shape
x = x.view(b * n, c, h, w)
x = self.encoder(x)
x = x.view(b, n, *x.shape[1:])
x = x.permute(0, 1, 3, 4, 5, 2)
return x
def projection_to_birds_eye_view(self, x, geometry):
""" Adapted from https://github.com/nv-tlabs/lift-splat-shoot/blob/master/src/models.py#L200"""
# batch, n_cameras, depth, height, width, channels
batch, n, d, h, w, c = x.shape
output = torch.zeros(
(batch, c, self.bev_dimension[0], self.bev_dimension[1]), dtype=torch.float, device=x.device
)
# Number of 3D points
N = n * d * h * w
for b in range(batch):
# flatten x
x_b = x[b].reshape(N, c)
# Convert positions to integer indices
geometry_b = ((geometry[b] - (self.bev_start_position - self.bev_resolution / 2.0)) / self.bev_resolution)
geometry_b = geometry_b.view(N, 3).long()
# Mask out points that are outside the considered spatial extent.
mask = (
(geometry_b[:, 0] >= 0)
& (geometry_b[:, 0] < self.bev_dimension[0])
& (geometry_b[:, 1] >= 0)
& (geometry_b[:, 1] < self.bev_dimension[1])
& (geometry_b[:, 2] >= 0)
& (geometry_b[:, 2] < self.bev_dimension[2])
)
x_b = x_b[mask]
geometry_b = geometry_b[mask]
# Sort tensors so that those within the same voxel are consecutives.
ranks = (
geometry_b[:, 0] * (self.bev_dimension[1] * self.bev_dimension[2])
+ geometry_b[:, 1] * (self.bev_dimension[2])
+ geometry_b[:, 2]
)
ranks_indices = ranks.argsort()
x_b, geometry_b, ranks = x_b[ranks_indices], geometry_b[ranks_indices], ranks[ranks_indices]
# Project to bird's-eye view by summing voxels.
x_b, geometry_b = VoxelsSumming.apply(x_b, geometry_b, ranks)
bev_feature = torch.zeros((self.bev_dimension[2], self.bev_dimension[0], self.bev_dimension[1], c),
device=x_b.device)
bev_feature[geometry_b[:, 2], geometry_b[:, 0], geometry_b[:, 1]] = x_b
# Put channel in second position and remove z dimension
bev_feature = bev_feature.permute((0, 3, 1, 2))
bev_feature = bev_feature.squeeze(0)
output[b] = bev_feature
return output
def calculate_birds_eye_view_features(self, x, intrinsics, extrinsics):
b, s, n, c, h, w = x.shape
# Reshape
x = pack_sequence_dim(x)
intrinsics = pack_sequence_dim(intrinsics)
extrinsics = pack_sequence_dim(extrinsics)
geometry = self.get_geometry(intrinsics, extrinsics)
x = self.encoder_forward(x)
x = self.projection_to_birds_eye_view(x, geometry)
x = unpack_sequence_dim(x, b, s)
return x
def distribution_forward(self, present_features, future_distribution_inputs=None, noise=None):
"""
Parameters
----------
present_features: 5-D output from dynamics module with shape (b, 1, c, h, w)
future_distribution_inputs: 5-D tensor containing labels shape (b, s, cfg.PROB_FUTURE_DIM, h, w)
noise: a sample from a (0, 1) gaussian with shape (b, s, latent_dim). If None, will sample in function
Returns
-------
sample: sample taken from present/future distribution, broadcast to shape (b, s, latent_dim, h, w)
present_distribution_mu: shape (b, s, latent_dim)
present_distribution_log_sigma: shape (b, s, latent_dim)
future_distribution_mu: shape (b, s, latent_dim)
future_distribution_log_sigma: shape (b, s, latent_dim)
"""
b, s, _, h, w = present_features.size()
assert s == 1
present_mu, present_log_sigma = self.present_distribution(present_features)
future_mu, future_log_sigma = None, None
if future_distribution_inputs is not None:
# Concatenate future labels to z_t
future_features = future_distribution_inputs[:, 1:].contiguous().view(b, 1, -1, h, w)
future_features = torch.cat([present_features, future_features], dim=2)
future_mu, future_log_sigma = self.future_distribution(future_features)
if noise is None:
if self.training:
noise = torch.randn_like(present_mu)
else:
noise = torch.zeros_like(present_mu)
if self.training:
mu = future_mu
sigma = torch.exp(future_log_sigma)
else:
mu = present_mu
sigma = torch.exp(present_log_sigma)
sample = mu + sigma * noise
# Spatially broadcast sample to the dimensions of present_features
sample = sample.view(b, s, self.latent_dim, 1, 1).expand(b, s, self.latent_dim, h, w)
output_distribution = {
'present_mu': present_mu,
'present_log_sigma': present_log_sigma,
'future_mu': future_mu,
'future_log_sigma': future_log_sigma,
}
return sample, output_distribution
| 15,090
| 43.385294
| 118
|
py
|
fiery
|
fiery-master/fiery/models/temporal_model.py
|
import torch.nn as nn
from fiery.layers.temporal import Bottleneck3D, TemporalBlock
class TemporalModel(nn.Module):
def __init__(
self, in_channels, receptive_field, input_shape, start_out_channels=64, extra_in_channels=0,
n_spatial_layers_between_temporal_layers=0, use_pyramid_pooling=True):
super().__init__()
self.receptive_field = receptive_field
n_temporal_layers = receptive_field - 1
h, w = input_shape
modules = []
block_in_channels = in_channels
block_out_channels = start_out_channels
for _ in range(n_temporal_layers):
if use_pyramid_pooling:
use_pyramid_pooling = True
pool_sizes = [(2, h, w)]
else:
use_pyramid_pooling = False
pool_sizes = None
temporal = TemporalBlock(
block_in_channels,
block_out_channels,
use_pyramid_pooling=use_pyramid_pooling,
pool_sizes=pool_sizes,
)
spatial = [
Bottleneck3D(block_out_channels, block_out_channels, kernel_size=(1, 3, 3))
for _ in range(n_spatial_layers_between_temporal_layers)
]
temporal_spatial_layers = nn.Sequential(temporal, *spatial)
modules.extend(temporal_spatial_layers)
block_in_channels = block_out_channels
block_out_channels += extra_in_channels
self.out_channels = block_in_channels
self.model = nn.Sequential(*modules)
def forward(self, x):
# Reshape input tensor to (batch, C, time, H, W)
x = x.permute(0, 2, 1, 3, 4)
x = self.model(x)
x = x.permute(0, 2, 1, 3, 4).contiguous()
return x[:, (self.receptive_field - 1):]
class TemporalModelIdentity(nn.Module):
def __init__(self, in_channels, receptive_field):
super().__init__()
self.receptive_field = receptive_field
self.out_channels = in_channels
def forward(self, x):
return x[:, (self.receptive_field - 1):]
| 2,120
| 32.666667
| 104
|
py
|
fiery
|
fiery-master/fiery/models/encoder.py
|
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
from fiery.layers.convolutions import UpsamplingConcat
class Encoder(nn.Module):
def __init__(self, cfg, D):
super().__init__()
self.D = D
self.C = cfg.OUT_CHANNELS
self.use_depth_distribution = cfg.USE_DEPTH_DISTRIBUTION
self.downsample = cfg.DOWNSAMPLE
self.version = cfg.NAME.split('-')[1]
self.backbone = EfficientNet.from_pretrained(cfg.NAME)
self.delete_unused_layers()
if self.downsample == 16:
if self.version == 'b0':
upsampling_in_channels = 320 + 112
elif self.version == 'b4':
upsampling_in_channels = 448 + 160
upsampling_out_channels = 512
elif self.downsample == 8:
if self.version == 'b0':
upsampling_in_channels = 112 + 40
elif self.version == 'b4':
upsampling_in_channels = 160 + 56
upsampling_out_channels = 128
else:
raise ValueError(f'Downsample factor {self.downsample} not handled.')
self.upsampling_layer = UpsamplingConcat(upsampling_in_channels, upsampling_out_channels)
if self.use_depth_distribution:
self.depth_layer = nn.Conv2d(upsampling_out_channels, self.C + self.D, kernel_size=1, padding=0)
else:
self.depth_layer = nn.Conv2d(upsampling_out_channels, self.C, kernel_size=1, padding=0)
def delete_unused_layers(self):
indices_to_delete = []
for idx in range(len(self.backbone._blocks)):
if self.downsample == 8:
if self.version == 'b0' and idx > 10:
indices_to_delete.append(idx)
if self.version == 'b4' and idx > 21:
indices_to_delete.append(idx)
for idx in reversed(indices_to_delete):
del self.backbone._blocks[idx]
del self.backbone._conv_head
del self.backbone._bn1
del self.backbone._avg_pooling
del self.backbone._dropout
del self.backbone._fc
def get_features(self, x):
# Adapted from https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py#L231
endpoints = dict()
# Stem
x = self.backbone._swish(self.backbone._bn0(self.backbone._conv_stem(x)))
prev_x = x
# Blocks
for idx, block in enumerate(self.backbone._blocks):
drop_connect_rate = self.backbone._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.backbone._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
prev_x = x
if self.downsample == 8:
if self.version == 'b0' and idx == 10:
break
if self.version == 'b4' and idx == 21:
break
# Head
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
if self.downsample == 16:
input_1, input_2 = endpoints['reduction_5'], endpoints['reduction_4']
elif self.downsample == 8:
input_1, input_2 = endpoints['reduction_4'], endpoints['reduction_3']
x = self.upsampling_layer(input_1, input_2)
return x
def forward(self, x):
x = self.get_features(x) # get feature vector
x = self.depth_layer(x) # feature and depth head
if self.use_depth_distribution:
depth = x[:, : self.D].softmax(dim=1)
x = depth.unsqueeze(1) * x[:, self.D : (self.D + self.C)].unsqueeze(2) # outer product depth and features
else:
x = x.unsqueeze(2).repeat(1, 1, self.D, 1, 1)
return x
| 3,910
| 36.247619
| 119
|
py
|
fiery
|
fiery-master/fiery/models/decoder.py
|
import torch.nn as nn
from torchvision.models.resnet import resnet18
from fiery.layers.convolutions import UpsamplingAdd
class Decoder(nn.Module):
def __init__(self, in_channels, n_classes, predict_future_flow):
super().__init__()
backbone = resnet18(pretrained=False, zero_init_residual=True)
self.first_conv = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = backbone.bn1
self.relu = backbone.relu
self.layer1 = backbone.layer1
self.layer2 = backbone.layer2
self.layer3 = backbone.layer3
self.predict_future_flow = predict_future_flow
shared_out_channels = in_channels
self.up3_skip = UpsamplingAdd(256, 128, scale_factor=2)
self.up2_skip = UpsamplingAdd(128, 64, scale_factor=2)
self.up1_skip = UpsamplingAdd(64, shared_out_channels, scale_factor=2)
self.segmentation_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, n_classes, kernel_size=1, padding=0),
)
self.instance_offset_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
)
self.instance_center_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 1, kernel_size=1, padding=0),
nn.Sigmoid(),
)
if self.predict_future_flow:
self.instance_future_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
)
def forward(self, x):
b, s, c, h, w = x.shape
x = x.view(b * s, c, h, w)
# (H, W)
skip_x = {'1': x}
x = self.first_conv(x)
x = self.bn1(x)
x = self.relu(x)
# (H/4, W/4)
x = self.layer1(x)
skip_x['2'] = x
x = self.layer2(x)
skip_x['3'] = x
# (H/8, W/8)
x = self.layer3(x)
# First upsample to (H/4, W/4)
x = self.up3_skip(x, skip_x['3'])
# Second upsample to (H/2, W/2)
x = self.up2_skip(x, skip_x['2'])
# Third upsample to (H, W)
x = self.up1_skip(x, skip_x['1'])
segmentation_output = self.segmentation_head(x)
instance_center_output = self.instance_center_head(x)
instance_offset_output = self.instance_offset_head(x)
instance_future_output = self.instance_future_head(x) if self.predict_future_flow else None
return {
'segmentation': segmentation_output.view(b, s, *segmentation_output.shape[1:]),
'instance_center': instance_center_output.view(b, s, *instance_center_output.shape[1:]),
'instance_offset': instance_offset_output.view(b, s, *instance_offset_output.shape[1:]),
'instance_flow': instance_future_output.view(b, s, *instance_future_output.shape[1:])
if instance_future_output is not None else None,
}
| 3,676
| 38.967391
| 106
|
py
|
fiery
|
fiery-master/fiery/layers/convolutions.py
|
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
"""2D convolution followed by
- an optional normalisation (batch norm or instance norm)
- an optional activation (ReLU, LeakyReLU, or tanh)
"""
def __init__(
self,
in_channels,
out_channels=None,
kernel_size=3,
stride=1,
norm='bn',
activation='relu',
bias=False,
transpose=False,
):
super().__init__()
out_channels = out_channels or in_channels
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d if not transpose else partial(nn.ConvTranspose2d, output_padding=1)
self.conv = self.conv(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'none':
self.norm = None
else:
raise ValueError('Invalid norm {}'.format(norm))
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.1, inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh(inplace=True)
elif activation == 'none':
self.activation = None
else:
raise ValueError('Invalid activation {}'.format(activation))
def forward(self, x):
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class Bottleneck(nn.Module):
"""
Defines a bottleneck module with a residual connection
"""
def __init__(
self,
in_channels,
out_channels=None,
kernel_size=3,
dilation=1,
groups=1,
upsample=False,
downsample=False,
dropout=0.0,
):
super().__init__()
self._downsample = downsample
bottleneck_channels = int(in_channels / 2)
out_channels = out_channels or in_channels
padding_size = ((kernel_size - 1) * dilation + 1) // 2
# Define the main conv operation
assert dilation == 1
if upsample:
assert not downsample, 'downsample and upsample not possible simultaneously.'
bottleneck_conv = nn.ConvTranspose2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=1,
stride=2,
output_padding=padding_size,
padding=padding_size,
groups=groups,
)
elif downsample:
bottleneck_conv = nn.Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=dilation,
stride=2,
padding=padding_size,
groups=groups,
)
else:
bottleneck_conv = nn.Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=dilation,
padding=padding_size,
groups=groups,
)
self.layers = nn.Sequential(
OrderedDict(
[
# First projection with 1x1 kernel
('conv_down_project', nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, bias=False)),
('abn_down_project', nn.Sequential(nn.BatchNorm2d(bottleneck_channels),
nn.ReLU(inplace=True))),
# Second conv block
('conv', bottleneck_conv),
('abn', nn.Sequential(nn.BatchNorm2d(bottleneck_channels), nn.ReLU(inplace=True))),
# Final projection with 1x1 kernel
('conv_up_project', nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)),
('abn_up_project', nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))),
# Regulariser
('dropout', nn.Dropout2d(p=dropout)),
]
)
)
if out_channels == in_channels and not downsample and not upsample:
self.projection = None
else:
projection = OrderedDict()
if upsample:
projection.update({'upsample_skip_proj': Interpolate(scale_factor=2)})
elif downsample:
projection.update({'upsample_skip_proj': nn.MaxPool2d(kernel_size=2, stride=2)})
projection.update(
{
'conv_skip_proj': nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
'bn_skip_proj': nn.BatchNorm2d(out_channels),
}
)
self.projection = nn.Sequential(projection)
# pylint: disable=arguments-differ
def forward(self, *args):
(x,) = args
x_residual = self.layers(x)
if self.projection is not None:
if self._downsample:
# pad h/w dimensions if they are odd to prevent shape mismatch with residual layer
x = nn.functional.pad(x, (0, x.shape[-1] % 2, 0, x.shape[-2] % 2), value=0)
return x_residual + self.projection(x)
return x_residual + x
class Interpolate(nn.Module):
def __init__(self, scale_factor: int = 2):
super().__init__()
self._interpolate = nn.functional.interpolate
self._scale_factor = scale_factor
# pylint: disable=arguments-differ
def forward(self, x):
return self._interpolate(x, scale_factor=self._scale_factor, mode='bilinear', align_corners=False)
class UpsamplingConcat(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super().__init__()
self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x_to_upsample, x):
x_to_upsample = self.upsample(x_to_upsample)
x_to_upsample = torch.cat([x, x_to_upsample], dim=1)
return self.conv(x_to_upsample)
class UpsamplingAdd(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super().__init__()
self.upsample_layer = nn.Sequential(
nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False),
nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
)
def forward(self, x, x_skip):
x = self.upsample_layer(x)
return x + x_skip
| 7,593
| 34.32093
| 114
|
py
|
fiery
|
fiery-master/fiery/layers/temporal.py
|
from collections import OrderedDict
import torch
import torch.nn as nn
from fiery.layers.convolutions import ConvBlock
from fiery.utils.geometry import warp_features
class SpatialGRU(nn.Module):
"""A GRU cell that takes an input tensor [BxTxCxHxW] and an optional previous state and passes a
convolutional gated recurrent unit over the data"""
def __init__(self, input_size, hidden_size, gru_bias_init=0.0, norm='bn', activation='relu'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru_bias_init = gru_bias_init
self.conv_update = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size=3, bias=True, padding=1)
self.conv_reset = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size=3, bias=True, padding=1)
self.conv_state_tilde = ConvBlock(
input_size + hidden_size, hidden_size, kernel_size=3, bias=False, norm=norm, activation=activation
)
def forward(self, x, state=None, flow=None, mode='bilinear'):
# pylint: disable=unused-argument, arguments-differ
# Check size
assert len(x.size()) == 5, 'Input tensor must be BxTxCxHxW.'
b, timesteps, c, h, w = x.size()
assert c == self.input_size, f'feature sizes must match, got input {c} for layer with size {self.input_size}'
# recurrent layers
rnn_output = []
rnn_state = torch.zeros(b, self.hidden_size, h, w, device=x.device) if state is None else state
for t in range(timesteps):
x_t = x[:, t]
if flow is not None:
rnn_state = warp_features(rnn_state, flow[:, t], mode=mode)
# propagate rnn state
rnn_state = self.gru_cell(x_t, rnn_state)
rnn_output.append(rnn_state)
# reshape rnn output to batch tensor
return torch.stack(rnn_output, dim=1)
def gru_cell(self, x, state):
# Compute gates
x_and_state = torch.cat([x, state], dim=1)
update_gate = self.conv_update(x_and_state)
reset_gate = self.conv_reset(x_and_state)
# Add bias to initialise gate as close to identity function
update_gate = torch.sigmoid(update_gate + self.gru_bias_init)
reset_gate = torch.sigmoid(reset_gate + self.gru_bias_init)
# Compute proposal state, activation is defined in norm_act_config (can be tanh, ReLU etc)
state_tilde = self.conv_state_tilde(torch.cat([x, (1.0 - reset_gate) * state], dim=1))
output = (1.0 - update_gate) * state + update_gate * state_tilde
return output
class CausalConv3d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 3, 3), dilation=(1, 1, 1), bias=False):
super().__init__()
assert len(kernel_size) == 3, 'kernel_size must be a 3-tuple.'
time_pad = (kernel_size[0] - 1) * dilation[0]
height_pad = ((kernel_size[1] - 1) * dilation[1]) // 2
width_pad = ((kernel_size[2] - 1) * dilation[2]) // 2
# Pad temporally on the left
self.pad = nn.ConstantPad3d(padding=(width_pad, width_pad, height_pad, height_pad, time_pad, 0), value=0)
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, dilation=dilation, stride=1, padding=0, bias=bias)
self.norm = nn.BatchNorm3d(out_channels)
self.activation = nn.ReLU(inplace=True)
def forward(self, *inputs):
(x,) = inputs
x = self.pad(x)
x = self.conv(x)
x = self.norm(x)
x = self.activation(x)
return x
class CausalMaxPool3d(nn.Module):
def __init__(self, kernel_size=(2, 3, 3)):
super().__init__()
assert len(kernel_size) == 3, 'kernel_size must be a 3-tuple.'
time_pad = kernel_size[0] - 1
height_pad = (kernel_size[1] - 1) // 2
width_pad = (kernel_size[2] - 1) // 2
# Pad temporally on the left
self.pad = nn.ConstantPad3d(padding=(width_pad, width_pad, height_pad, height_pad, time_pad, 0), value=0)
self.max_pool = nn.MaxPool3d(kernel_size, stride=1)
def forward(self, *inputs):
(x,) = inputs
x = self.pad(x)
x = self.max_pool(x)
return x
def conv_1x1x1_norm_activated(in_channels, out_channels):
"""1x1x1 3D convolution, normalization and activation layer."""
return nn.Sequential(
OrderedDict(
[
('conv', nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=False)),
('norm', nn.BatchNorm3d(out_channels)),
('activation', nn.ReLU(inplace=True)),
]
)
)
class Bottleneck3D(nn.Module):
"""
Defines a bottleneck module with a residual connection
"""
def __init__(self, in_channels, out_channels=None, kernel_size=(2, 3, 3), dilation=(1, 1, 1)):
super().__init__()
bottleneck_channels = in_channels // 2
out_channels = out_channels or in_channels
self.layers = nn.Sequential(
OrderedDict(
[
# First projection with 1x1 kernel
('conv_down_project', conv_1x1x1_norm_activated(in_channels, bottleneck_channels)),
# Second conv block
(
'conv',
CausalConv3d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
dilation=dilation,
bias=False,
),
),
# Final projection with 1x1 kernel
('conv_up_project', conv_1x1x1_norm_activated(bottleneck_channels, out_channels)),
]
)
)
if out_channels != in_channels:
self.projection = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm3d(out_channels),
)
else:
self.projection = None
def forward(self, *args):
(x,) = args
x_residual = self.layers(x)
x_features = self.projection(x) if self.projection is not None else x
return x_residual + x_features
class PyramidSpatioTemporalPooling(nn.Module):
""" Spatio-temporal pyramid pooling.
Performs 3D average pooling followed by 1x1x1 convolution to reduce the number of channels and upsampling.
Setting contains a list of kernel_size: usually it is [(2, h, w), (2, h//2, w//2), (2, h//4, w//4)]
"""
def __init__(self, in_channels, reduction_channels, pool_sizes):
super().__init__()
self.features = []
for pool_size in pool_sizes:
assert pool_size[0] == 2, (
"Time kernel should be 2 as PyTorch raises an error when" "padding with more than half the kernel size"
)
stride = (1, *pool_size[1:])
padding = (pool_size[0] - 1, 0, 0)
self.features.append(
nn.Sequential(
OrderedDict(
[
# Pad the input tensor but do not take into account zero padding into the average.
(
'avgpool',
torch.nn.AvgPool3d(
kernel_size=pool_size, stride=stride, padding=padding, count_include_pad=False
),
),
('conv_bn_relu', conv_1x1x1_norm_activated(in_channels, reduction_channels)),
]
)
)
)
self.features = nn.ModuleList(self.features)
def forward(self, *inputs):
(x,) = inputs
b, _, t, h, w = x.shape
# Do not include current tensor when concatenating
out = []
for f in self.features:
# Remove unnecessary padded values (time dimension) on the right
x_pool = f(x)[:, :, :-1].contiguous()
c = x_pool.shape[1]
x_pool = nn.functional.interpolate(
x_pool.view(b * t, c, *x_pool.shape[-2:]), (h, w), mode='bilinear', align_corners=False
)
x_pool = x_pool.view(b, c, t, h, w)
out.append(x_pool)
out = torch.cat(out, 1)
return out
class TemporalBlock(nn.Module):
""" Temporal block with the following layers:
- 2x3x3, 1x3x3, spatio-temporal pyramid pooling
- dropout
- skip connection.
"""
def __init__(self, in_channels, out_channels=None, use_pyramid_pooling=False, pool_sizes=None):
super().__init__()
self.in_channels = in_channels
self.half_channels = in_channels // 2
self.out_channels = out_channels or self.in_channels
self.kernels = [(2, 3, 3), (1, 3, 3)]
# Flag for spatio-temporal pyramid pooling
self.use_pyramid_pooling = use_pyramid_pooling
# 3 convolution paths: 2x3x3, 1x3x3, 1x1x1
self.convolution_paths = []
for kernel_size in self.kernels:
self.convolution_paths.append(
nn.Sequential(
conv_1x1x1_norm_activated(self.in_channels, self.half_channels),
CausalConv3d(self.half_channels, self.half_channels, kernel_size=kernel_size),
)
)
self.convolution_paths.append(conv_1x1x1_norm_activated(self.in_channels, self.half_channels))
self.convolution_paths = nn.ModuleList(self.convolution_paths)
agg_in_channels = len(self.convolution_paths) * self.half_channels
if self.use_pyramid_pooling:
assert pool_sizes is not None, "setting must contain the list of kernel_size, but is None."
reduction_channels = self.in_channels // 3
self.pyramid_pooling = PyramidSpatioTemporalPooling(self.in_channels, reduction_channels, pool_sizes)
agg_in_channels += len(pool_sizes) * reduction_channels
# Feature aggregation
self.aggregation = nn.Sequential(
conv_1x1x1_norm_activated(agg_in_channels, self.out_channels),)
if self.out_channels != self.in_channels:
self.projection = nn.Sequential(
nn.Conv3d(self.in_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm3d(self.out_channels),
)
else:
self.projection = None
def forward(self, *inputs):
(x,) = inputs
x_paths = []
for conv in self.convolution_paths:
x_paths.append(conv(x))
x_residual = torch.cat(x_paths, dim=1)
if self.use_pyramid_pooling:
x_pool = self.pyramid_pooling(x)
x_residual = torch.cat([x_residual, x_pool], dim=1)
x_residual = self.aggregation(x_residual)
if self.out_channels != self.in_channels:
x = self.projection(x)
x = x + x_residual
return x
| 11,152
| 38.549645
| 120
|
py
|
fiery
|
fiery-master/fiery/utils/visualisation.py
|
import numpy as np
import torch
import matplotlib.pylab
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
DEFAULT_COLORMAP = matplotlib.pylab.cm.jet
def flow_to_image(flow: np.ndarray, autoscale: bool = False) -> np.ndarray:
"""
Applies colour map to flow which should be a 2 channel image tensor HxWx2. Returns a HxWx3 numpy image
Code adapted from: https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
u = flow[0, :, :]
v = flow[1, :, :]
# Convert to polar coordinates
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = np.max(rad)
# Normalise flow maps
if autoscale:
u /= maxrad + np.finfo(float).eps
v /= maxrad + np.finfo(float).eps
# visualise flow with cmap
return np.uint8(compute_color(u, v) * 255)
def _normalise(image: np.ndarray) -> np.ndarray:
lower = np.min(image)
delta = np.max(image) - lower
if delta == 0:
delta = 1
image = (image.astype(np.float32) - lower) / delta
return image
def apply_colour_map(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = False
) -> np.ndarray:
"""
Applies a colour map to the given 1 or 2 channel numpy image. if 2 channel, must be 2xHxW.
Returns a HxWx3 numpy image
"""
if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):
if image.ndim == 3:
image = image[0]
# grayscale scalar image
if autoscale:
image = _normalise(image)
return cmap(image)[:, :, :3]
if image.shape[0] == 2:
# 2 dimensional UV
return flow_to_image(image, autoscale=autoscale)
if image.shape[0] == 3:
# normalise rgb channels
if autoscale:
image = _normalise(image)
return np.transpose(image, axes=[1, 2, 0])
raise Exception('Image must be 1, 2 or 3 channel to convert to colour_map (CxHxW)')
def heatmap_image(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = True
) -> np.ndarray:
"""Colorize an 1 or 2 channel image with a colourmap."""
if not issubclass(image.dtype.type, np.floating):
raise ValueError(f"Expected a ndarray of float type, but got dtype {image.dtype}")
if not (image.ndim == 2 or (image.ndim == 3 and image.shape[0] in [1, 2])):
raise ValueError(f"Expected a ndarray of shape [H, W] or [1, H, W] or [2, H, W], but got shape {image.shape}")
heatmap_np = apply_colour_map(image, cmap=cmap, autoscale=autoscale)
heatmap_np = np.uint8(heatmap_np * 255)
return heatmap_np
def compute_color(u: np.ndarray, v: np.ndarray) -> np.ndarray:
assert u.shape == v.shape
[h, w] = u.shape
img = np.zeros([h, w, 3])
nan_mask = np.isnan(u) | np.isnan(v)
u[nan_mask] = 0
v[nan_mask] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
f_k = (a + 1) / 2 * (ncols - 1) + 1
k_0 = np.floor(f_k).astype(int)
k_1 = k_0 + 1
k_1[k_1 == ncols + 1] = 1
f = f_k - k_0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k_0 - 1] / 255
col1 = tmp[k_1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = col * (1 - nan_mask)
return img
def make_color_wheel() -> np.ndarray:
"""
Create colour wheel.
Code adapted from https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
red_yellow = 15
yellow_green = 6
green_cyan = 4
cyan_blue = 11
blue_magenta = 13
magenta_red = 6
ncols = red_yellow + yellow_green + green_cyan + cyan_blue + blue_magenta + magenta_red
colorwheel = np.zeros([ncols, 3])
col = 0
# red_yellow
colorwheel[0:red_yellow, 0] = 255
colorwheel[0:red_yellow, 1] = np.transpose(np.floor(255 * np.arange(0, red_yellow) / red_yellow))
col += red_yellow
# yellow_green
colorwheel[col : col + yellow_green, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, yellow_green) / yellow_green)
)
colorwheel[col : col + yellow_green, 1] = 255
col += yellow_green
# green_cyan
colorwheel[col : col + green_cyan, 1] = 255
colorwheel[col : col + green_cyan, 2] = np.transpose(np.floor(255 * np.arange(0, green_cyan) / green_cyan))
col += green_cyan
# cyan_blue
colorwheel[col : col + cyan_blue, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, cyan_blue) / cyan_blue))
colorwheel[col : col + cyan_blue, 2] = 255
col += cyan_blue
# blue_magenta
colorwheel[col : col + blue_magenta, 2] = 255
colorwheel[col : col + blue_magenta, 0] = np.transpose(np.floor(255 * np.arange(0, blue_magenta) / blue_magenta))
col += +blue_magenta
# magenta_red
colorwheel[col : col + magenta_red, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, magenta_red) / magenta_red))
colorwheel[col : col + magenta_red, 0] = 255
return colorwheel
def make_contour(img, colour=[0, 0, 0], double_line=False):
h, w = img.shape[:2]
out = img.copy()
# Vertical lines
out[np.arange(h), np.repeat(0, h)] = colour
out[np.arange(h), np.repeat(w - 1, h)] = colour
# Horizontal lines
out[np.repeat(0, w), np.arange(w)] = colour
out[np.repeat(h - 1, w), np.arange(w)] = colour
if double_line:
out[np.arange(h), np.repeat(1, h)] = colour
out[np.arange(h), np.repeat(w - 2, h)] = colour
# Horizontal lines
out[np.repeat(1, w), np.arange(w)] = colour
out[np.repeat(h - 2, w), np.arange(w)] = colour
return out
def plot_instance_map(instance_image, instance_map, instance_colours=None, bg_image=None):
if isinstance(instance_image, torch.Tensor):
instance_image = instance_image.cpu().numpy()
assert isinstance(instance_image, np.ndarray)
if instance_colours is None:
instance_colours = generate_instance_colours(instance_map)
if len(instance_image.shape) > 2:
instance_image = instance_image.reshape((instance_image.shape[-2], instance_image.shape[-1]))
if bg_image is None:
plot_image = 255 * np.ones((instance_image.shape[0], instance_image.shape[1], 3), dtype=np.uint8)
else:
plot_image = bg_image
for key, value in instance_colours.items():
plot_image[instance_image == key] = value
return plot_image
def visualise_output(labels, output, cfg):
semantic_colours = np.array([[255, 255, 255], [0, 0, 0]], dtype=np.uint8)
consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False
)
sequence_length = consistent_instance_seg.shape[1]
b = 0
video = []
for t in range(sequence_length):
out_t = []
# Ground truth
unique_ids = torch.unique(labels['instance'][b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(labels['instance'][b, t].cpu(), instance_map)[::-1, ::-1]
instance_plot = make_contour(instance_plot)
semantic_seg = labels['segmentation'].squeeze(2).cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t][::-1, ::-1]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = labels['flow'][b, t].cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)[::-1, ::-1]
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(labels['centerness'][b, t, 0].cpu().numpy())[::-1, ::-1]
center_plot = make_contour(center_plot)
offset_plot = labels['offset'][b, t].cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)[::-1, ::-1]
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
# Predictions
unique_ids = torch.unique(consistent_instance_seg[b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(consistent_instance_seg[b, t].cpu(), instance_map)[::-1, ::-1]
instance_plot = make_contour(instance_plot)
semantic_seg = output['segmentation'].argmax(dim=2).detach().cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t][::-1, ::-1]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = output['instance_flow'][b, t].detach().cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)[::-1, ::-1]
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(output['instance_center'][b, t, 0].detach().cpu().numpy())[::-1, ::-1]
center_plot = make_contour(center_plot)
offset_plot = output['instance_offset'][b, t].detach().cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)[::-1, ::-1]
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
out_t = np.concatenate(out_t, axis=1)
# Shape (C, H, W)
out_t = out_t.transpose((2, 0, 1))
video.append(out_t)
# Shape (B, T, C, H, W)
video = np.stack(video)[None]
return video
def convert_figure_numpy(figure):
""" Convert figure to numpy image """
figure_np = np.frombuffer(figure.canvas.tostring_rgb(), dtype=np.uint8)
figure_np = figure_np.reshape(figure.canvas.get_width_height()[::-1] + (3,))
return figure_np
def generate_instance_colours(instance_map):
# Most distinct 22 colors (kelly colors from https://stackoverflow.com/questions/470690/how-to-automatically-generate
# -n-distinct-colors)
# plus some colours from AD40k
INSTANCE_COLOURS = np.asarray([
[0, 0, 0],
[255, 179, 0],
[128, 62, 117],
[255, 104, 0],
[166, 189, 215],
[193, 0, 32],
[206, 162, 98],
[129, 112, 102],
[0, 125, 52],
[246, 118, 142],
[0, 83, 138],
[255, 122, 92],
[83, 55, 122],
[255, 142, 0],
[179, 40, 81],
[244, 200, 0],
[127, 24, 13],
[147, 170, 0],
[89, 51, 21],
[241, 58, 19],
[35, 44, 22],
[112, 224, 255],
[70, 184, 160],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[0, 255, 235],
[255, 0, 235],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 255, 204],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[255, 214, 0],
[25, 194, 194],
[92, 0, 255],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
])
return {instance_id: INSTANCE_COLOURS[global_instance_id % len(INSTANCE_COLOURS)] for
instance_id, global_instance_id in instance_map.items()
}
| 12,488
| 32.572581
| 121
|
py
|
fiery
|
fiery-master/fiery/utils/network.py
|
import torch
import torch.nn as nn
import torchvision
def pack_sequence_dim(x):
b, s = x.shape[:2]
return x.view(b * s, *x.shape[2:])
def unpack_sequence_dim(x, b, s):
return x.view(b, s, *x.shape[1:])
def preprocess_batch(batch, device, unsqueeze=False):
for key, value in batch.items():
if key != 'sample_token':
batch[key] = value.to(device)
if unsqueeze:
batch[key] = batch[key].unsqueeze(0)
def set_module_grad(module, requires_grad=False):
for p in module.parameters():
p.requires_grad = requires_grad
def set_bn_momentum(model, momentum=0.1):
for m in model.modules():
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = momentum
class NormalizeInverse(torchvision.transforms.Normalize):
# https://discuss.pytorch.org/t/simple-way-to-inverse-transform-normalization/4821/8
def __init__(self, mean, std):
mean = torch.as_tensor(mean)
std = torch.as_tensor(std)
std_inv = 1 / (std + 1e-7)
mean_inv = -mean * std_inv
super().__init__(mean=mean_inv, std=std_inv)
def __call__(self, tensor):
return super().__call__(tensor.clone())
| 1,236
| 27.113636
| 89
|
py
|
fiery
|
fiery-master/fiery/utils/geometry.py
|
import PIL
import numpy as np
import torch
from pyquaternion import Quaternion
def resize_and_crop_image(img, resize_dims, crop):
# Bilinear resizing followed by cropping
img = img.resize(resize_dims, resample=PIL.Image.BILINEAR)
img = img.crop(crop)
return img
def update_intrinsics(intrinsics, top_crop=0.0, left_crop=0.0, scale_width=1.0, scale_height=1.0):
"""
Parameters
----------
intrinsics: torch.Tensor (3, 3)
top_crop: float
left_crop: float
scale_width: float
scale_height: float
"""
updated_intrinsics = intrinsics.clone()
# Adjust intrinsics scale due to resizing
updated_intrinsics[0, 0] *= scale_width
updated_intrinsics[0, 2] *= scale_width
updated_intrinsics[1, 1] *= scale_height
updated_intrinsics[1, 2] *= scale_height
# Adjust principal point due to cropping
updated_intrinsics[0, 2] -= left_crop
updated_intrinsics[1, 2] -= top_crop
return updated_intrinsics
def calculate_birds_eye_view_parameters(x_bounds, y_bounds, z_bounds):
"""
Parameters
----------
x_bounds: Forward direction in the ego-car.
y_bounds: Sides
z_bounds: Height
Returns
-------
bev_resolution: Bird's-eye view bev_resolution
bev_start_position Bird's-eye view first element
bev_dimension Bird's-eye view tensor spatial dimension
"""
bev_resolution = torch.tensor([row[2] for row in [x_bounds, y_bounds, z_bounds]])
bev_start_position = torch.tensor([row[0] + row[2] / 2.0 for row in [x_bounds, y_bounds, z_bounds]])
bev_dimension = torch.tensor([(row[1] - row[0]) / row[2] for row in [x_bounds, y_bounds, z_bounds]],
dtype=torch.long)
return bev_resolution, bev_start_position, bev_dimension
def convert_egopose_to_matrix_numpy(egopose):
transformation_matrix = np.zeros((4, 4), dtype=np.float32)
rotation = Quaternion(egopose['rotation']).rotation_matrix
translation = np.array(egopose['translation'])
transformation_matrix[:3, :3] = rotation
transformation_matrix[:3, 3] = translation
transformation_matrix[3, 3] = 1.0
return transformation_matrix
def invert_matrix_egopose_numpy(egopose):
""" Compute the inverse transformation of a 4x4 egopose numpy matrix."""
inverse_matrix = np.zeros((4, 4), dtype=np.float32)
rotation = egopose[:3, :3]
translation = egopose[:3, 3]
inverse_matrix[:3, :3] = rotation.T
inverse_matrix[:3, 3] = -np.dot(rotation.T, translation)
inverse_matrix[3, 3] = 1.0
return inverse_matrix
def mat2pose_vec(matrix: torch.Tensor):
"""
Converts a 4x4 pose matrix into a 6-dof pose vector
Args:
matrix (ndarray): 4x4 pose matrix
Returns:
vector (ndarray): 6-dof pose vector comprising translation components (tx, ty, tz) and
rotation components (rx, ry, rz)
"""
# M[1, 2] = -sinx*cosy, M[2, 2] = +cosx*cosy
rotx = torch.atan2(-matrix[..., 1, 2], matrix[..., 2, 2])
# M[0, 2] = +siny, M[1, 2] = -sinx*cosy, M[2, 2] = +cosx*cosy
cosy = torch.sqrt(matrix[..., 1, 2] ** 2 + matrix[..., 2, 2] ** 2)
roty = torch.atan2(matrix[..., 0, 2], cosy)
# M[0, 0] = +cosy*cosz, M[0, 1] = -cosy*sinz
rotz = torch.atan2(-matrix[..., 0, 1], matrix[..., 0, 0])
rotation = torch.stack((rotx, roty, rotz), dim=-1)
# Extract translation params
translation = matrix[..., :3, 3]
return torch.cat((translation, rotation), dim=-1)
def euler2mat(angle: torch.Tensor):
"""Convert euler angles to rotation matrix.
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
angle: rotation angle along 3 axis (in radians) [Bx3]
Returns:
Rotation matrix corresponding to the euler angles [Bx3x3]
"""
shape = angle.shape
angle = angle.view(-1, 3)
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = torch.zeros_like(z)
ones = torch.ones_like(z)
zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).view(-1, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).view(-1, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).view(-1, 3, 3)
rot_mat = xmat.bmm(ymat).bmm(zmat)
rot_mat = rot_mat.view(*shape[:-1], 3, 3)
return rot_mat
def pose_vec2mat(vec: torch.Tensor):
"""
Convert 6DoF parameters to transformation matrix.
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz [B,6]
Returns:
A transformation matrix [B,4,4]
"""
translation = vec[..., :3].unsqueeze(-1) # [...x3x1]
rot = vec[..., 3:].contiguous() # [...x3]
rot_mat = euler2mat(rot) # [...,3,3]
transform_mat = torch.cat([rot_mat, translation], dim=-1) # [...,3,4]
transform_mat = torch.nn.functional.pad(transform_mat, [0, 0, 0, 1], value=0) # [...,4,4]
transform_mat[..., 3, 3] = 1.0
return transform_mat
def invert_pose_matrix(x):
"""
Parameters
----------
x: [B, 4, 4] batch of pose matrices
Returns
-------
out: [B, 4, 4] batch of inverse pose matrices
"""
assert len(x.shape) == 3 and x.shape[1:] == (4, 4), 'Only works for batch of pose matrices.'
transposed_rotation = torch.transpose(x[:, :3, :3], 1, 2)
translation = x[:, :3, 3:]
inverse_mat = torch.cat([transposed_rotation, -torch.bmm(transposed_rotation, translation)], dim=-1) # [B,3,4]
inverse_mat = torch.nn.functional.pad(inverse_mat, [0, 0, 0, 1], value=0) # [B,4,4]
inverse_mat[..., 3, 3] = 1.0
return inverse_mat
def warp_features(x, flow, mode='nearest', spatial_extent=None):
""" Applies a rotation and translation to feature map x.
Args:
x: (b, c, h, w) feature map
flow: (b, 6) 6DoF vector (only uses the xy poriton)
mode: use 'nearest' when dealing with categorical inputs
Returns:
in plane transformed feature map
"""
if flow is None:
return x
b, c, h, w = x.shape
# z-rotation
angle = flow[:, 5].clone() # torch.atan2(flow[:, 1, 0], flow[:, 0, 0])
# x-y translation
translation = flow[:, :2].clone() # flow[:, :2, 3]
# Normalise translation. Need to divide by how many meters is half of the image.
# because translation of 1.0 correspond to translation of half of the image.
translation[:, 0] /= spatial_extent[0]
translation[:, 1] /= spatial_extent[1]
# forward axis is inverted
translation[:, 0] *= -1
cos_theta = torch.cos(angle)
sin_theta = torch.sin(angle)
# output = Rot.input + translation
# tx and ty are inverted as is the case when going from real coordinates to numpy coordinates
# translation_pos_0 -> positive value makes the image move to the left
# translation_pos_1 -> positive value makes the image move to the top
# Angle -> positive value in rad makes the image move in the trigonometric way
transformation = torch.stack([cos_theta, -sin_theta, translation[:, 1],
sin_theta, cos_theta, translation[:, 0]], dim=-1).view(b, 2, 3)
# Note that a rotation will preserve distances only if height = width. Otherwise there's
# resizing going on. e.g. rotation of pi/2 of a 100x200 image will make what's in the center of the image
# elongated.
grid = torch.nn.functional.affine_grid(transformation, size=x.shape, align_corners=False)
warped_x = torch.nn.functional.grid_sample(x, grid.float(), mode=mode, padding_mode='zeros', align_corners=False)
return warped_x
def cumulative_warp_features(x, flow, mode='nearest', spatial_extent=None):
""" Warps a sequence of feature maps by accumulating incremental 2d flow.
x[:, -1] remains unchanged
x[:, -2] is warped using flow[:, -2]
x[:, -3] is warped using flow[:, -3] @ flow[:, -2]
...
x[:, 0] is warped using flow[:, 0] @ ... @ flow[:, -3] @ flow[:, -2]
Args:
x: (b, t, c, h, w) sequence of feature maps
flow: (b, t, 6) sequence of 6 DoF pose
from t to t+1 (only uses the xy poriton)
"""
sequence_length = x.shape[1]
if sequence_length == 1:
return x
flow = pose_vec2mat(flow)
out = [x[:, -1]]
cum_flow = flow[:, -2]
for t in reversed(range(sequence_length - 1)):
out.append(warp_features(x[:, t], mat2pose_vec(cum_flow), mode=mode, spatial_extent=spatial_extent))
# @ is the equivalent of torch.bmm
cum_flow = flow[:, t - 1] @ cum_flow
return torch.stack(out[::-1], 1)
def cumulative_warp_features_reverse(x, flow, mode='nearest', spatial_extent=None):
""" Warps a sequence of feature maps by accumulating incremental 2d flow.
x[:, 0] remains unchanged
x[:, 1] is warped using flow[:, 0].inverse()
x[:, 2] is warped using flow[:, 0].inverse() @ flow[:, 1].inverse()
...
Args:
x: (b, t, c, h, w) sequence of feature maps
flow: (b, t, 6) sequence of 6 DoF pose
from t to t+1 (only uses the xy poriton)
"""
flow = pose_vec2mat(flow)
out = [x[:,0]]
for i in range(1, x.shape[1]):
if i==1:
cum_flow = invert_pose_matrix(flow[:, 0])
else:
cum_flow = cum_flow @ invert_pose_matrix(flow[:,i-1])
out.append( warp_features(x[:,i], mat2pose_vec(cum_flow), mode, spatial_extent=spatial_extent))
return torch.stack(out, 1)
class VoxelsSumming(torch.autograd.Function):
"""Adapted from https://github.com/nv-tlabs/lift-splat-shoot/blob/master/src/tools.py#L193"""
@staticmethod
def forward(ctx, x, geometry, ranks):
"""The features `x` and `geometry` are ranked by voxel positions."""
# Cumulative sum of all features.
x = x.cumsum(0)
# Indicates the change of voxel.
mask = torch.ones(x.shape[0], device=x.device, dtype=torch.bool)
mask[:-1] = ranks[1:] != ranks[:-1]
x, geometry = x[mask], geometry[mask]
# Calculate sum of features within a voxel.
x = torch.cat((x[:1], x[1:] - x[:-1]))
ctx.save_for_backward(mask)
ctx.mark_non_differentiable(geometry)
return x, geometry
@staticmethod
def backward(ctx, grad_x, grad_geometry):
(mask,) = ctx.saved_tensors
# Since the operation is summing, we simply need to send gradient
# to all elements that were part of the summation process.
indices = torch.cumsum(mask, 0)
indices[mask] -= 1
output_grad = grad_x[indices]
return output_grad, None, None
| 10,875
| 33.526984
| 117
|
py
|
fiery
|
fiery-master/fiery/utils/instance.py
|
from typing import Tuple
import torch
import torch.nn.functional as F
import numpy as np
from scipy.optimize import linear_sum_assignment
from fiery.utils.geometry import mat2pose_vec, pose_vec2mat, warp_features
# set ignore index to 0 for vis
def convert_instance_mask_to_center_and_offset_label(instance_img, future_egomotion, num_instances, ignore_index=255,
subtract_egomotion=True, sigma=3, spatial_extent=None):
seq_len, h, w = instance_img.shape
center_label = torch.zeros(seq_len, 1, h, w)
offset_label = ignore_index * torch.ones(seq_len, 2, h, w)
future_displacement_label = ignore_index * torch.ones(seq_len, 2, h, w)
# x is vertical displacement, y is horizontal displacement
x, y = torch.meshgrid(torch.arange(h, dtype=torch.float), torch.arange(w, dtype=torch.float))
if subtract_egomotion:
future_egomotion_inv = mat2pose_vec(pose_vec2mat(future_egomotion).inverse())
# Compute warped instance segmentation
warped_instance_seg = {}
for t in range(1, seq_len):
warped_inst_t = warp_features(instance_img[t].unsqueeze(0).unsqueeze(1).float(),
future_egomotion_inv[t - 1].unsqueeze(0), mode='nearest',
spatial_extent=spatial_extent)
warped_instance_seg[t] = warped_inst_t[0, 0]
# Ignore id 0 which is the background
for instance_id in range(1, num_instances+1):
prev_xc = None
prev_yc = None
prev_mask = None
for t in range(seq_len):
instance_mask = (instance_img[t] == instance_id)
if instance_mask.sum() == 0:
# this instance is not in this frame
prev_xc = None
prev_yc = None
prev_mask = None
continue
xc = x[instance_mask].mean().round().long()
yc = y[instance_mask].mean().round().long()
off_x = xc - x
off_y = yc - y
g = torch.exp(-(off_x ** 2 + off_y ** 2) / sigma ** 2)
center_label[t, 0] = torch.maximum(center_label[t, 0], g)
offset_label[t, 0, instance_mask] = off_x[instance_mask]
offset_label[t, 1, instance_mask] = off_y[instance_mask]
if prev_xc is not None:
# old method
# cur_pt = torch.stack((xc, yc)).unsqueeze(0).float()
# if subtract_egomotion:
# cur_pt = warp_points(cur_pt, future_egomotion_inv[t - 1])
# cur_pt = cur_pt.squeeze(0)
warped_instance_mask = warped_instance_seg[t] == instance_id
if warped_instance_mask.sum() > 0:
warped_xc = x[warped_instance_mask].mean().round()
warped_yc = y[warped_instance_mask].mean().round()
delta_x = warped_xc - prev_xc
delta_y = warped_yc - prev_yc
future_displacement_label[t - 1, 0, prev_mask] = delta_x
future_displacement_label[t - 1, 1, prev_mask] = delta_y
prev_xc = xc
prev_yc = yc
prev_mask = instance_mask
return center_label, offset_label, future_displacement_label
def find_instance_centers(center_prediction: torch.Tensor, conf_threshold: float = 0.1, nms_kernel_size: float = 3):
assert len(center_prediction.shape) == 3
center_prediction = F.threshold(center_prediction, threshold=conf_threshold, value=-1)
nms_padding = (nms_kernel_size - 1) // 2
maxpooled_center_prediction = F.max_pool2d(
center_prediction, kernel_size=nms_kernel_size, stride=1, padding=nms_padding
)
# Filter all elements that are not the maximum (i.e. the center of the heatmap instance)
center_prediction[center_prediction != maxpooled_center_prediction] = -1
return torch.nonzero(center_prediction > 0)[:, 1:]
def group_pixels(centers: torch.Tensor, offset_predictions: torch.Tensor) -> torch.Tensor:
width, height = offset_predictions.shape[-2:]
x_grid = (
torch.arange(width, dtype=offset_predictions.dtype, device=offset_predictions.device)
.view(1, width, 1)
.repeat(1, 1, height)
)
y_grid = (
torch.arange(height, dtype=offset_predictions.dtype, device=offset_predictions.device)
.view(1, 1, height)
.repeat(1, width, 1)
)
pixel_grid = torch.cat((x_grid, y_grid), dim=0)
center_locations = (pixel_grid + offset_predictions).view(2, width * height, 1).permute(2, 1, 0)
centers = centers.view(-1, 1, 2)
distances = torch.norm(centers - center_locations, dim=-1)
instance_id = torch.argmin(distances, dim=0).reshape(1, width, height) + 1
return instance_id
def get_instance_segmentation_and_centers(
center_predictions: torch.Tensor,
offset_predictions: torch.Tensor,
foreground_mask: torch.Tensor,
conf_threshold: float = 0.1,
nms_kernel_size: float = 3,
max_n_instance_centers: int = 100,
) -> Tuple[torch.Tensor, torch.Tensor]:
width, height = center_predictions.shape[-2:]
center_predictions = center_predictions.view(1, width, height)
offset_predictions = offset_predictions.view(2, width, height)
foreground_mask = foreground_mask.view(1, width, height)
centers = find_instance_centers(center_predictions, conf_threshold=conf_threshold, nms_kernel_size=nms_kernel_size)
if not len(centers):
return torch.zeros(center_predictions.shape, dtype=torch.int64, device=center_predictions.device), \
torch.zeros((0, 2), device=centers.device)
if len(centers) > max_n_instance_centers:
print(f'There are a lot of detected instance centers: {centers.shape}')
centers = centers[:max_n_instance_centers].clone()
instance_ids = group_pixels(centers, offset_predictions)
instance_seg = (instance_ids * foreground_mask.float()).long()
# Make the indices of instance_seg consecutive
instance_seg = make_instance_seg_consecutive(instance_seg)
return instance_seg.long(), centers
def update_instance_ids(instance_seg, old_ids, new_ids):
"""
Parameters
----------
instance_seg: torch.Tensor arbitrary shape
old_ids: 1D tensor containing the list of old ids, must be all present in instance_seg.
new_ids: 1D tensor with the new ids, aligned with old_ids
Returns
new_instance_seg: torch.Tensor same shape as instance_seg with new ids
"""
indices = torch.arange(old_ids.max() + 1, device=instance_seg.device)
for old_id, new_id in zip(old_ids, new_ids):
indices[old_id] = new_id
return indices[instance_seg].long()
def make_instance_seg_consecutive(instance_seg):
# Make the indices of instance_seg consecutive
unique_ids = torch.unique(instance_seg)
new_ids = torch.arange(len(unique_ids), device=instance_seg.device)
instance_seg = update_instance_ids(instance_seg, unique_ids, new_ids)
return instance_seg
def make_instance_id_temporally_consistent(pred_inst, future_flow, matching_threshold=3.0):
"""
Parameters
----------
pred_inst: torch.Tensor (1, seq_len, h, w)
future_flow: torch.Tensor(1, seq_len, 2, h, w)
matching_threshold: distance threshold for a match to be valid.
Returns
-------
consistent_instance_seg: torch.Tensor(1, seq_len, h, w)
1. time t. Loop over all detected instances. Use flow to compute new centers at time t+1.
2. Store those centers
3. time t+1. Re-identify instances by comparing position of actual centers, and flow-warped centers.
Make the labels at t+1 consistent with the matching
4. Repeat
"""
assert pred_inst.shape[0] == 1, 'Assumes batch size = 1'
# Initialise instance segmentations with prediction corresponding to the present
consistent_instance_seg = [pred_inst[0, 0]]
largest_instance_id = consistent_instance_seg[0].max().item()
_, seq_len, h, w = pred_inst.shape
device = pred_inst.device
for t in range(seq_len - 1):
# Compute predicted future instance means
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=device), torch.arange(w, dtype=torch.float, device=device)
))
# Add future flow
grid = grid + future_flow[0, t]
warped_centers = []
# Go through all ids, except the background
t_instance_ids = torch.unique(consistent_instance_seg[-1])[1:].cpu().numpy()
if len(t_instance_ids) == 0:
# No instance so nothing to update
consistent_instance_seg.append(pred_inst[0, t + 1])
continue
for instance_id in t_instance_ids:
instance_mask = (consistent_instance_seg[-1] == instance_id)
warped_centers.append(grid[:, instance_mask].mean(dim=1))
warped_centers = torch.stack(warped_centers)
# Compute actual future instance means
centers = []
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=device), torch.arange(w, dtype=torch.float, device=device)
))
n_instances = int(pred_inst[0, t + 1].max().item())
if n_instances == 0:
# No instance, so nothing to update.
consistent_instance_seg.append(pred_inst[0, t + 1])
continue
for instance_id in range(1, n_instances + 1):
instance_mask = (pred_inst[0, t + 1] == instance_id)
centers.append(grid[:, instance_mask].mean(dim=1))
centers = torch.stack(centers)
# Compute distance matrix between warped centers and actual centers
distances = torch.norm(centers.unsqueeze(0) - warped_centers.unsqueeze(1), dim=-1).cpu().numpy()
# outputs (row, col) with row: index in frame t, col: index in frame t+1
# the missing ids in col must be added (correspond to new instances)
ids_t, ids_t_one = linear_sum_assignment(distances)
matching_distances = distances[ids_t, ids_t_one]
# Offset by one as id=0 is the background
ids_t += 1
ids_t_one += 1
# swap ids_t with real ids. as those ids correspond to the position in the distance matrix.
id_mapping = dict(zip(np.arange(1, len(t_instance_ids) + 1), t_instance_ids))
ids_t = np.vectorize(id_mapping.__getitem__, otypes=[np.int64])(ids_t)
# Filter low quality match
ids_t = ids_t[matching_distances < matching_threshold]
ids_t_one = ids_t_one[matching_distances < matching_threshold]
# Elements that are in t+1, but weren't matched
remaining_ids = set(torch.unique(pred_inst[0, t + 1]).cpu().numpy()).difference(set(ids_t_one))
# remove background
remaining_ids.remove(0)
# Set remaining_ids to a new unique id
for remaining_id in list(remaining_ids):
largest_instance_id += 1
ids_t = np.append(ids_t, largest_instance_id)
ids_t_one = np.append(ids_t_one, remaining_id)
consistent_instance_seg.append(update_instance_ids(pred_inst[0, t + 1], old_ids=ids_t_one, new_ids=ids_t))
consistent_instance_seg = torch.stack(consistent_instance_seg).unsqueeze(0)
return consistent_instance_seg
def predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False, make_consistent=True, vehicles_id=1,
):
preds = output['segmentation'].detach()
preds = torch.argmax(preds, dim=2, keepdims=True)
foreground_masks = preds.squeeze(2) == vehicles_id
batch_size, seq_len = preds.shape[:2]
pred_inst = []
for b in range(batch_size):
pred_inst_batch = []
for t in range(seq_len):
pred_instance_t, _ = get_instance_segmentation_and_centers(
output['instance_center'][b, t].detach(),
output['instance_offset'][b, t].detach(),
foreground_masks[b, t].detach()
)
pred_inst_batch.append(pred_instance_t)
pred_inst.append(torch.stack(pred_inst_batch, dim=0))
pred_inst = torch.stack(pred_inst).squeeze(2)
if make_consistent:
if output['instance_flow'] is None:
print('Using zero flow because instance_future_output is None')
output['instance_flow'] = torch.zeros_like(output['instance_offset'])
consistent_instance_seg = []
for b in range(batch_size):
consistent_instance_seg.append(
make_instance_id_temporally_consistent(pred_inst[b:b+1],
output['instance_flow'][b:b+1].detach())
)
consistent_instance_seg = torch.cat(consistent_instance_seg, dim=0)
else:
consistent_instance_seg = pred_inst
if compute_matched_centers:
assert batch_size == 1
# Generate trajectories
matched_centers = {}
_, seq_len, h, w = consistent_instance_seg.shape
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=preds.device),
torch.arange(w, dtype=torch.float, device=preds.device)
))
for instance_id in torch.unique(consistent_instance_seg[0, 0])[1:].cpu().numpy():
for t in range(seq_len):
instance_mask = consistent_instance_seg[0, t] == instance_id
if instance_mask.sum() > 0:
matched_centers[instance_id] = matched_centers.get(instance_id, []) + [
grid[:, instance_mask].mean(dim=-1)]
for key, value in matched_centers.items():
matched_centers[key] = torch.stack(value).cpu().numpy()[:, ::-1]
return consistent_instance_seg, matched_centers
return consistent_instance_seg
| 13,871
| 40.657658
| 119
|
py
|
fiery
|
fiery-master/fiery/utils/lyft_splits.py
|
TRAIN_LYFT_INDICES = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16,
17, 18, 19, 20, 21, 23, 24, 27, 28, 29, 30, 31, 32,
33, 35, 36, 37, 39, 41, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 55, 56, 59, 60, 62, 63, 65, 68, 69,
70, 71, 72, 73, 74, 75, 76, 78, 79, 81, 82, 83, 84,
86, 87, 88, 89, 93, 95, 97, 98, 99, 103, 104, 107, 108,
109, 110, 111, 113, 114, 115, 116, 117, 118, 119, 121, 122, 124,
127, 128, 130, 131, 132, 134, 135, 136, 137, 138, 139, 143, 144,
146, 147, 148, 149, 150, 151, 152, 153, 154, 156, 157, 158, 159,
161, 162, 165, 166, 167, 171, 172, 173, 174, 175, 176, 177, 178,
179]
VAL_LYFT_INDICES = [0, 2, 4, 13, 22, 25, 26, 34, 38, 40, 42, 54, 57,
58, 61, 64, 66, 67, 77, 80, 85, 90, 91, 92, 94, 96,
100, 101, 102, 105, 106, 112, 120, 123, 125, 126, 129, 133, 140,
141, 142, 145, 155, 160, 163, 164, 168, 169, 170]
| 1,113
| 64.529412
| 86
|
py
|
public-apis
|
public-apis-master/scripts/validate/format.py
|
# -*- coding: utf-8 -*-
import re
import sys
from string import punctuation
from typing import List, Tuple, Dict
# Temporary replacement
# The descriptions that contain () at the end must adapt to the new policy later
punctuation = punctuation.replace('()', '')
anchor = '###'
auth_keys = ['apiKey', 'OAuth', 'X-Mashape-Key', 'User-Agent', 'No']
https_keys = ['Yes', 'No']
cors_keys = ['Yes', 'No', 'Unknown']
index_title = 0
index_desc = 1
index_auth = 2
index_https = 3
index_cors = 4
num_segments = 5
min_entries_per_category = 3
max_description_length = 100
anchor_re = re.compile(anchor + '\s(.+)')
category_title_in_index_re = re.compile('\*\s\[(.*)\]')
link_re = re.compile('\[(.+)\]\((http.*)\)')
# Type aliases
APIList = List[str]
Categories = Dict[str, APIList]
CategoriesLineNumber = Dict[str, int]
def error_message(line_number: int, message: str) -> str:
line = line_number + 1
return f'(L{line:03d}) {message}'
def get_categories_content(contents: List[str]) -> Tuple[Categories, CategoriesLineNumber]:
categories = {}
category_line_num = {}
for line_num, line_content in enumerate(contents):
if line_content.startswith(anchor):
category = line_content.split(anchor)[1].strip()
categories[category] = []
category_line_num[category] = line_num
continue
if not line_content.startswith('|') or line_content.startswith('|---'):
continue
raw_title = [
raw_content.strip() for raw_content in line_content.split('|')[1:-1]
][0]
title_match = link_re.match(raw_title)
if title_match:
title = title_match.group(1).upper()
categories[category].append(title)
return (categories, category_line_num)
def check_alphabetical_order(lines: List[str]) -> List[str]:
err_msgs = []
categories, category_line_num = get_categories_content(contents=lines)
for category, api_list in categories.items():
if sorted(api_list) != api_list:
err_msg = error_message(
category_line_num[category],
f'{category} category is not alphabetical order'
)
err_msgs.append(err_msg)
return err_msgs
def check_title(line_num: int, raw_title: str) -> List[str]:
err_msgs = []
title_match = link_re.match(raw_title)
# url should be wrapped in "[TITLE](LINK)" Markdown syntax
if not title_match:
err_msg = error_message(line_num, 'Title syntax should be "[TITLE](LINK)"')
err_msgs.append(err_msg)
else:
# do not allow "... API" in the entry title
title = title_match.group(1)
if title.upper().endswith(' API'):
err_msg = error_message(line_num, 'Title should not end with "... API". Every entry is an API here!')
err_msgs.append(err_msg)
return err_msgs
def check_description(line_num: int, description: str) -> List[str]:
err_msgs = []
first_char = description[0]
if first_char.upper() != first_char:
err_msg = error_message(line_num, 'first character of description is not capitalized')
err_msgs.append(err_msg)
last_char = description[-1]
if last_char in punctuation:
err_msg = error_message(line_num, f'description should not end with {last_char}')
err_msgs.append(err_msg)
desc_length = len(description)
if desc_length > max_description_length:
err_msg = error_message(line_num, f'description should not exceed {max_description_length} characters (currently {desc_length})')
err_msgs.append(err_msg)
return err_msgs
def check_auth(line_num: int, auth: str) -> List[str]:
err_msgs = []
backtick = '`'
if auth != 'No' and (not auth.startswith(backtick) or not auth.endswith(backtick)):
err_msg = error_message(line_num, 'auth value is not enclosed with `backticks`')
err_msgs.append(err_msg)
if auth.replace(backtick, '') not in auth_keys:
err_msg = error_message(line_num, f'{auth} is not a valid Auth option')
err_msgs.append(err_msg)
return err_msgs
def check_https(line_num: int, https: str) -> List[str]:
err_msgs = []
if https not in https_keys:
err_msg = error_message(line_num, f'{https} is not a valid HTTPS option')
err_msgs.append(err_msg)
return err_msgs
def check_cors(line_num: int, cors: str) -> List[str]:
err_msgs = []
if cors not in cors_keys:
err_msg = error_message(line_num, f'{cors} is not a valid CORS option')
err_msgs.append(err_msg)
return err_msgs
def check_entry(line_num: int, segments: List[str]) -> List[str]:
raw_title = segments[index_title]
description = segments[index_desc]
auth = segments[index_auth]
https = segments[index_https]
cors = segments[index_cors]
title_err_msgs = check_title(line_num, raw_title)
desc_err_msgs = check_description(line_num, description)
auth_err_msgs = check_auth(line_num, auth)
https_err_msgs = check_https(line_num, https)
cors_err_msgs = check_cors(line_num, cors)
err_msgs = [
*title_err_msgs,
*desc_err_msgs,
*auth_err_msgs,
*https_err_msgs,
*cors_err_msgs
]
return err_msgs
def check_file_format(lines: List[str]) -> List[str]:
err_msgs = []
category_title_in_index = []
alphabetical_err_msgs = check_alphabetical_order(lines)
err_msgs.extend(alphabetical_err_msgs)
num_in_category = min_entries_per_category + 1
category = ''
category_line = 0
for line_num, line_content in enumerate(lines):
category_title_match = category_title_in_index_re.match(line_content)
if category_title_match:
category_title_in_index.append(category_title_match.group(1))
# check each category for the minimum number of entries
if line_content.startswith(anchor):
category_match = anchor_re.match(line_content)
if category_match:
if category_match.group(1) not in category_title_in_index:
err_msg = error_message(line_num, f'category header ({category_match.group(1)}) not added to Index section')
err_msgs.append(err_msg)
else:
err_msg = error_message(line_num, 'category header is not formatted correctly')
err_msgs.append(err_msg)
if num_in_category < min_entries_per_category:
err_msg = error_message(category_line, f'{category} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})')
err_msgs.append(err_msg)
category = line_content.split(' ')[1]
category_line = line_num
num_in_category = 0
continue
# skips lines that we do not care about
if not line_content.startswith('|') or line_content.startswith('|---'):
continue
num_in_category += 1
segments = line_content.split('|')[1:-1]
if len(segments) < num_segments:
err_msg = error_message(line_num, f'entry does not have all the required columns (have {len(segments)}, need {num_segments})')
err_msgs.append(err_msg)
continue
for segment in segments:
# every line segment should start and end with exactly 1 space
if len(segment) - len(segment.lstrip()) != 1 or len(segment) - len(segment.rstrip()) != 1:
err_msg = error_message(line_num, 'each segment must start and end with exactly 1 space')
err_msgs.append(err_msg)
segments = [segment.strip() for segment in segments]
entry_err_msgs = check_entry(line_num, segments)
err_msgs.extend(entry_err_msgs)
return err_msgs
def main(filename: str) -> None:
with open(filename, mode='r', encoding='utf-8') as file:
lines = list(line.rstrip() for line in file)
file_format_err_msgs = check_file_format(lines)
if file_format_err_msgs:
for err_msg in file_format_err_msgs:
print(err_msg)
sys.exit(1)
if __name__ == '__main__':
num_args = len(sys.argv)
if num_args < 2:
print('No .md file passed (file should contain Markdown table syntax)')
sys.exit(1)
filename = sys.argv[1]
main(filename)
| 8,464
| 29.44964
| 168
|
py
|
public-apis
|
public-apis-master/scripts/validate/__init__.py
|
# -*- coding: utf-8 -*-
from validate import format
from validate import links
| 80
| 15.2
| 27
|
py
|
public-apis
|
public-apis-master/scripts/validate/links.py
|
# -*- coding: utf-8 -*-
import re
import sys
import random
from typing import List, Tuple
import requests
from requests.models import Response
def find_links_in_text(text: str) -> List[str]:
"""Find links in a text and return a list of URLs."""
link_pattern = re.compile(r'((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'\".,<>?«»“”‘’]))')
raw_links = re.findall(link_pattern, text)
links = [
str(raw_link[0]) for raw_link in raw_links
]
return links
def find_links_in_file(filename: str) -> List[str]:
"""Find links in a file and return a list of URLs from text file."""
with open(filename, mode='r', encoding='utf-8') as file:
readme = file.read()
index_section = readme.find('## Index')
if index_section == -1:
index_section = 0
content = readme[index_section:]
links = find_links_in_text(content)
return links
def check_duplicate_links(links: List[str]) -> Tuple[bool, List]:
"""Check for duplicated links.
Returns a tuple with True or False and duplicate list.
"""
seen = {}
duplicates = []
has_duplicate = False
for link in links:
link = link.rstrip('/')
if link not in seen:
seen[link] = 1
else:
if seen[link] == 1:
duplicates.append(link)
if duplicates:
has_duplicate = True
return (has_duplicate, duplicates)
def fake_user_agent() -> str:
"""Faking user agent as some hosting services block not-whitelisted UA."""
user_agents = [
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko)',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
]
return random.choice(user_agents)
def get_host_from_link(link: str) -> str:
host = link.split('://', 1)[1] if '://' in link else link
# Remove routes, arguments and anchors
if '/' in host:
host = host.split('/', 1)[0]
elif '?' in host:
host = host.split('?', 1)[0]
elif '#' in host:
host = host.split('#', 1)[0]
return host
def has_cloudflare_protection(resp: Response) -> bool:
"""Checks if there is any cloudflare protection in the response.
Cloudflare implements multiple network protections on a given link,
this script tries to detect if any of them exist in the response from request.
Common protections have the following HTTP code as a response:
- 403: When host header is missing or incorrect (and more)
- 503: When DDOS protection exists
See more about it at:
- https://support.cloudflare.com/hc/en-us/articles/115003014512-4xx-Client-Error
- https://support.cloudflare.com/hc/en-us/articles/115003011431-Troubleshooting-Cloudflare-5XX-errors
- https://www.cloudflare.com/ddos/
- https://superuser.com/a/888526
Discussions in issues and pull requests:
- https://github.com/public-apis/public-apis/pull/2409
- https://github.com/public-apis/public-apis/issues/2960
"""
code = resp.status_code
server = resp.headers.get('Server') or resp.headers.get('server')
cloudflare_flags = [
'403 Forbidden',
'cloudflare',
'Cloudflare',
'Security check',
'Please Wait... | Cloudflare',
'We are checking your browser...',
'Please stand by, while we are checking your browser...',
'Checking your browser before accessing',
'This process is automatic.',
'Your browser will redirect to your requested content shortly.',
'Please allow up to 5 seconds',
'DDoS protection by',
'Ray ID:',
'Cloudflare Ray ID:',
'_cf_chl',
'_cf_chl_opt',
'__cf_chl_rt_tk',
'cf-spinner-please-wait',
'cf-spinner-redirecting'
]
if code in [403, 503] and server == 'cloudflare':
html = resp.text
flags_found = [flag in html for flag in cloudflare_flags]
any_flag_found = any(flags_found)
if any_flag_found:
return True
return False
def check_if_link_is_working(link: str) -> Tuple[bool, str]:
"""Checks if a link is working.
If an error is identified when the request for the link occurs,
the return will be a tuple with the first value True and the second
value a string containing the error message.
If no errors are identified, the return will be a tuple with the
first value False and the second an empty string.
"""
has_error = False
error_message = ''
try:
resp = requests.get(link, timeout=25, headers={
'User-Agent': fake_user_agent(),
'host': get_host_from_link(link)
})
code = resp.status_code
if code >= 400 and not has_cloudflare_protection(resp):
has_error = True
error_message = f'ERR:CLT: {code} : {link}'
except requests.exceptions.SSLError as error:
has_error = True
error_message = f'ERR:SSL: {error} : {link}'
except requests.exceptions.ConnectionError as error:
has_error = True
error_message = f'ERR:CNT: {error} : {link}'
except (TimeoutError, requests.exceptions.ConnectTimeout):
has_error = True
error_message = f'ERR:TMO: {link}'
except requests.exceptions.TooManyRedirects as error:
has_error = True
error_message = f'ERR:TMR: {error} : {link}'
except (Exception, requests.exceptions.RequestException) as error:
has_error = True
error_message = f'ERR:UKN: {error} : {link}'
return (has_error, error_message)
def check_if_list_of_links_are_working(list_of_links: List[str]) -> List[str]:
error_messages = []
for link in list_of_links:
has_error, error_message = check_if_link_is_working(link)
if has_error:
error_messages.append(error_message)
return error_messages
def start_duplicate_links_checker(links: List[str]) -> None:
print('Checking for duplicate links...')
has_duplicate_link, duplicates_links = check_duplicate_links(links)
if has_duplicate_link:
print(f'Found duplicate links:')
for duplicate_link in duplicates_links:
print(duplicate_link)
sys.exit(1)
else:
print('No duplicate links.')
def start_links_working_checker(links: List[str]) -> None:
print(f'Checking if {len(links)} links are working...')
errors = check_if_list_of_links_are_working(links)
if errors:
num_errors = len(errors)
print(f'Apparently {num_errors} links are not working properly. See in:')
for error_message in errors:
print(error_message)
sys.exit(1)
def main(filename: str, only_duplicate_links_checker: bool) -> None:
links = find_links_in_file(filename)
start_duplicate_links_checker(links)
if not only_duplicate_links_checker:
start_links_working_checker(links)
if __name__ == '__main__':
num_args = len(sys.argv)
only_duplicate_links_checker = False
if num_args < 2:
print('No .md file passed')
sys.exit(1)
elif num_args == 3:
third_arg = sys.argv[2].lower()
if third_arg == '-odlc' or third_arg == '--only_duplicate_links_checker':
only_duplicate_links_checker = True
else:
print(f'Third invalid argument. Usage: python {__file__} [-odlc | --only_duplicate_links_checker]')
sys.exit(1)
filename = sys.argv[1]
main(filename, only_duplicate_links_checker)
| 8,022
| 28.281022
| 211
|
py
|
public-apis
|
public-apis-master/scripts/tests/test_validate_links.py
|
# -*- coding: utf-8 -*-
import unittest
from validate.links import find_links_in_text
from validate.links import check_duplicate_links
from validate.links import fake_user_agent
from validate.links import get_host_from_link
from validate.links import has_cloudflare_protection
class FakeResponse():
def __init__(self, code: int, headers: dict, text: str) -> None:
self.status_code = code
self.headers = headers
self.text = text
class TestValidateLinks(unittest.TestCase):
def setUp(self):
self.duplicate_links = [
'https://www.example.com',
'https://www.example.com',
'https://www.example.com',
'https://www.anotherexample.com',
]
self.no_duplicate_links = [
'https://www.firstexample.com',
'https://www.secondexample.com',
'https://www.anotherexample.com',
]
self.code_200 = 200
self.code_403 = 403
self.code_503 = 503
self.cloudflare_headers = {'Server': 'cloudflare'}
self.no_cloudflare_headers = {'Server': 'google'}
self.text_with_cloudflare_flags = '403 Forbidden Cloudflare We are checking your browser...'
self.text_without_cloudflare_flags = 'Lorem Ipsum'
def test_find_link_in_text(self):
text = """
# this is valid
http://example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com.br
https://www.example.com.gov.br
[Example](https://www.example.com?param1=1¶m2=2#anchor)
lorem ipsum https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor lorem ipsum
# this not is valid
example.com
https:example.com
https:/example.com
https//example.com
https//.com
"""
links = find_links_in_text(text)
self.assertIsInstance(links, list)
self.assertEqual(len(links), 7)
for link in links:
with self.subTest():
self.assertIsInstance(link, str)
def test_find_link_in_text_with_invalid_argument(self):
with self.assertRaises(TypeError):
find_links_in_text()
find_links_in_text(1)
find_links_in_text(True)
def test_if_check_duplicate_links_has_the_correct_return(self):
result_1 = check_duplicate_links(self.duplicate_links)
result_2 = check_duplicate_links(self.no_duplicate_links)
self.assertIsInstance(result_1, tuple)
self.assertIsInstance(result_2, tuple)
has_duplicate_links, links = result_1
no_duplicate_links, no_links = result_2
self.assertTrue(has_duplicate_links)
self.assertFalse(no_duplicate_links)
self.assertIsInstance(links, list)
self.assertIsInstance(no_links, list)
self.assertEqual(len(links), 2)
self.assertEqual(len(no_links), 0)
def test_if_fake_user_agent_has_a_str_as_return(self):
user_agent = fake_user_agent()
self.assertIsInstance(user_agent, str)
def test_get_host_from_link(self):
links = [
'example.com',
'https://example.com',
'https://www.example.com',
'https://www.example.com.br',
'https://www.example.com/route',
'https://www.example.com?p=1&q=2',
'https://www.example.com#anchor'
]
for link in links:
host = get_host_from_link(link)
with self.subTest():
self.assertIsInstance(host, str)
self.assertNotIn('://', host)
self.assertNotIn('/', host)
self.assertNotIn('?', host)
self.assertNotIn('#', host)
with self.assertRaises(TypeError):
get_host_from_link()
def test_has_cloudflare_protection_with_code_403_and_503_in_response(self):
resp_with_cloudflare_protection_code_403 = FakeResponse(
code=self.code_403,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
resp_with_cloudflare_protection_code_503 = FakeResponse(
code=self.code_503,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_with_cloudflare_protection_code_403)
result2 = has_cloudflare_protection(resp_with_cloudflare_protection_code_503)
self.assertTrue(result1)
self.assertTrue(result2)
def test_has_cloudflare_protection_when_there_is_no_protection(self):
resp_without_cloudflare_protection1 = FakeResponse(
code=self.code_200,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection2 = FakeResponse(
code=self.code_403,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection3 = FakeResponse(
code=self.code_503,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_without_cloudflare_protection1)
result2 = has_cloudflare_protection(resp_without_cloudflare_protection2)
result3 = has_cloudflare_protection(resp_without_cloudflare_protection3)
self.assertFalse(result1)
self.assertFalse(result2)
self.assertFalse(result3)
| 5,725
| 32.098266
| 100
|
py
|
public-apis
|
public-apis-master/scripts/tests/__init__.py
|
# -*- coding: utf-8 -*-
| 24
| 11.5
| 23
|
py
|
public-apis
|
public-apis-master/scripts/tests/test_validate_format.py
|
# -*- coding: utf-8 -*-
import unittest
from validate.format import error_message
from validate.format import get_categories_content
from validate.format import check_alphabetical_order
from validate.format import check_title
from validate.format import check_description, max_description_length
from validate.format import check_auth, auth_keys
from validate.format import check_https, https_keys
from validate.format import check_cors, cors_keys
from validate.format import check_entry
from validate.format import check_file_format, min_entries_per_category, num_segments
class TestValidadeFormat(unittest.TestCase):
def test_error_message_return_and_return_type(self):
line_num_unity = 1
line_num_ten = 10
line_num_hundred = 100
line_num_thousand = 1000
msg = 'This is a unit test'
err_msg_unity = error_message(line_num_unity, msg)
err_msg_ten = error_message(line_num_ten, msg)
err_msg_hundred = error_message(line_num_hundred, msg)
err_msg_thousand = error_message(line_num_thousand, msg)
self.assertIsInstance(err_msg_unity, str)
self.assertIsInstance(err_msg_ten, str)
self.assertIsInstance(err_msg_hundred, str)
self.assertIsInstance(err_msg_thousand, str)
self.assertEqual(err_msg_unity, '(L002) This is a unit test')
self.assertEqual(err_msg_ten, '(L011) This is a unit test')
self.assertEqual(err_msg_hundred, '(L101) This is a unit test')
self.assertEqual(err_msg_thousand, '(L1001) This is a unit test')
def test_if_get_categories_content_return_correct_data_of_categories(self):
fake_contents = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
result = get_categories_content(fake_contents)
self.assertIsInstance(result, tuple)
categories, category_line_num = result
self.assertIsInstance(categories, dict)
self.assertIsInstance(category_line_num, dict)
expected_result = ({'A': ['AA', 'AB'], 'B': ['BA', 'BB']}, {'A': 0, 'B': 6})
for res, ex_res in zip(result, expected_result):
with self.subTest():
self.assertEqual(res, ex_res)
def test_if_check_alphabetical_order_return_correct_msg_error(self):
correct_lines = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
incorrect_lines = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
err_msgs_1 = check_alphabetical_order(correct_lines)
err_msgs_2 = check_alphabetical_order(incorrect_lines)
self.assertIsInstance(err_msgs_1, list)
self.assertIsInstance(err_msgs_2, list)
self.assertEqual(len(err_msgs_1), 0)
self.assertEqual(len(err_msgs_2), 2)
expected_err_msgs = [
'(L001) A category is not alphabetical order',
'(L007) B category is not alphabetical order'
]
for err_msg, ex_err_msg in zip(err_msgs_2, expected_err_msgs):
with self.subTest():
self.assertEqual(err_msg, ex_err_msg)
def test_check_title_with_correct_title(self):
raw_title = '[A](https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_title_with_markdown_syntax_incorrect(self):
raw_title = '[A(https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) Title syntax should be "[TITLE](LINK)"'
self.assertEqual(err_msg, expected_err_msg)
def test_check_title_with_api_at_the_end_of_the_title(self):
raw_title = '[A API](https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) Title should not end with "... API". Every entry is an API here!'
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_with_correct_description(self):
desc = 'This is a fake description'
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_description_with_first_char_is_not_capitalized(self):
desc = 'this is a fake description'
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) first character of description is not capitalized'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_with_punctuation_in_the_end(self):
base_desc = 'This is a fake description'
punctuation = r"""!"#$%&'*+,-./:;<=>?@[\]^_`{|}~"""
desc_with_punc = [base_desc + punc for punc in punctuation]
for desc in desc_with_punc:
with self.subTest():
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) description should not end with {desc[-1]}'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_that_exceeds_the_character_limit(self):
long_desc = 'Desc' * max_description_length
long_desc_length = len(long_desc)
err_msgs = check_description(0, long_desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) description should not exceed {max_description_length} characters (currently {long_desc_length})'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_auth_with_valid_auth(self):
auth_valid = [f'`{auth}`' for auth in auth_keys if auth != 'No']
auth_valid.append('No')
for auth in auth_valid:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_auth_without_backtick(self):
auth_without_backtick = [auth for auth in auth_keys if auth != 'No']
for auth in auth_without_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) auth value is not enclosed with `backticks`'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_auth_with_invalid_auth(self):
auth_invalid_without_backtick = ['Yes', 'yes', 'no', 'random', 'Unknown']
auth_invalid_with_backtick = ['`Yes`', '`yes`', '`no`', '`random`', '`Unknown`']
for auth in auth_invalid_without_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 2)
err_msg_1 = err_msgs[0]
err_msg_2 = err_msgs[1]
expected_err_msg_1 = f'(L001) auth value is not enclosed with `backticks`'
expected_err_msg_2 = f'(L001) {auth} is not a valid Auth option'
self.assertIsInstance(err_msg_1, str)
self.assertIsInstance(err_msg_2, str)
self.assertEqual(err_msg_1, expected_err_msg_1)
self.assertEqual(err_msg_2, expected_err_msg_2)
for auth in auth_invalid_with_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {auth} is not a valid Auth option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_https_with_valid_https(self):
for https in https_keys:
with self.subTest():
err_msgs = check_https(0, https)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_https_with_invalid_https(self):
invalid_https_keys = ['yes', 'no', 'Unknown', 'https', 'http']
for https in invalid_https_keys:
with self.subTest():
err_msgs = check_https(0, https)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {https} is not a valid HTTPS option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_cors_with_valid_cors(self):
for cors in cors_keys:
with self.subTest():
err_msgs = check_cors(0, cors)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_cors_with_invalid_cors(self):
invalid_cors_keys = ['yes', 'no', 'unknown', 'cors']
for cors in invalid_cors_keys:
with self.subTest():
err_msgs = check_cors(0, cors)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {cors} is not a valid CORS option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_entry_with_correct_segments(self):
correct_segments = ['[A](https://www.ex.com)', 'Desc', '`apiKey`', 'Yes', 'Yes']
err_msgs = check_entry(0, correct_segments)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_entry_with_incorrect_segments(self):
incorrect_segments = ['[A API](https://www.ex.com)', 'desc.', 'yes', 'yes', 'yes']
err_msgs = check_entry(0, incorrect_segments)
expected_err_msgs = [
'(L001) Title should not end with "... API". Every entry is an API here!',
'(L001) first character of description is not capitalized',
'(L001) description should not end with .',
'(L001) auth value is not enclosed with `backticks`',
'(L001) yes is not a valid Auth option',
'(L001) yes is not a valid HTTPS option',
'(L001) yes is not a valid CORS option'
]
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 7)
for err_msg in err_msgs:
with self.subTest():
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msgs, expected_err_msgs)
def test_check_file_format_with_correct_format(self):
correct_format = [
'## Index',
'* [A](#a)',
'* [B](#b)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
err_msgs = check_file_format(lines=correct_format)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_file_format_with_category_header_not_added_to_index(self):
incorrect_format = [
'## Index',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = '(L003) category header (A) not added to Index section'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_with_category_without_min_entries(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'* [B](#b)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
category_with_err = 'A'
num_in_category = 1
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L005) {category_with_err} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_entry_without_all_necessary_columns(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` |', # missing https and cors
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
current_segments_num = 3
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L008) entry does not have all the required columns (have {current_segments_num}, need {num_segments})'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_without_1_space_between_the_segments(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc |`apiKey`| Yes | Yes |', # space between segment of auth column missing
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L007) each segment must start and end with exactly 1 space'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
| 18,154
| 37.875803
| 155
|
py
|
LiDAR2INS
|
LiDAR2INS-master/eigen3/debug/gdb/printers.py
|
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support many of the other eigen types
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
import gdb
import re
import itertools
from bisect import bisect_left
# Basic row/column iteration code for use with Sparse and Dense matrices
class _MatrixEntryIterator(object):
def __init__ (self, rows, cols, rowMajor):
self.rows = rows
self.cols = cols
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
return (row, col)
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val):
"Extract all the necessary information"
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = [x.replace(" ", "") for x in template_params]
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator(_MatrixEntryIterator):
def __init__ (self, rows, cols, dataPtr, rowMajor):
super(EigenMatrixPrinter._iterator, self).__init__(rows, cols, rowMajor)
self.dataPtr = dataPtr
def __next__(self):
row, col = super(EigenMatrixPrinter._iterator, self).__next__()
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenSparseMatrixPrinter:
"Print an Eigen SparseMatrix"
def __init__(self, val):
"Extract all the necessary information"
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = [x.replace(" ", "") for x in template_params]
self.options = 0
if len(template_params) > 1:
self.options = template_params[1];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
self.data = self.val['m_data']
self.data = self.data.cast(self.innerType.pointer())
class _iterator(_MatrixEntryIterator):
def __init__ (self, rows, cols, val, rowMajor):
super(EigenSparseMatrixPrinter._iterator, self).__init__(rows, cols, rowMajor)
self.val = val
def __next__(self):
row, col = super(EigenSparseMatrixPrinter._iterator, self).__next__()
# repeat calculations from SparseMatrix.h:
outer = row if self.rowMajor else col
inner = col if self.rowMajor else row
start = self.val['m_outerIndex'][outer]
end = ((start + self.val['m_innerNonZeros'][outer]) if self.val['m_innerNonZeros'] else
self.val['m_outerIndex'][outer+1])
# and from CompressedStorage.h:
data = self.val['m_data']
if start >= end:
item = 0
elif (end > start) and (inner == data['m_indices'][end-1]):
item = data['m_values'][end-1]
else:
# create Python index list from the target range within m_indices
indices = [data['m_indices'][x] for x in range(int(start), int(end)-1)]
# find the index with binary search
idx = int(start) + bisect_left(indices, inner)
if ((idx < end) and (data['m_indices'][idx] == inner)):
item = data['m_values'][idx]
else:
item = 0
return ('[%d,%d]' % (row, col), item)
def children(self):
if self.data:
return self._iterator(self.rows(), self.cols(), self.val, self.rowMajor)
return iter([]) # empty matrix, for now
def rows(self):
return self.val['m_outerSize'] if self.rowMajor else self.val['m_innerSize']
def cols(self):
return self.val['m_innerSize'] if self.rowMajor else self.val['m_outerSize']
def to_string(self):
if self.data:
status = ("not compressed" if self.val['m_innerNonZeros'] else "compressed")
else:
status = "empty"
dimensions = "%d x %d" % (self.rows(), self.cols())
layout = "row" if self.rowMajor else "column"
return "Eigen::SparseMatrix<%s>, %s, %s major, %s" % (
self.innerType, dimensions, layout, status )
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val)
pretty_printers_dict[re.compile('^Eigen::SparseMatrix<.*>$')] = lambda val: EigenSparseMatrixPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
| 9,617
| 29.533333
| 160
|
py
|
LiDAR2INS
|
LiDAR2INS-master/eigen3/debug/gdb/__init__.py
|
# Intentionally empty
| 22
| 10.5
| 21
|
py
|
LiDAR2INS
|
LiDAR2INS-master/eigen3/scripts/relicense.py
|
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: mierle@gmail.com (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
| 2,368
| 32.842857
| 77
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/scripts/make_docs.py
|
#!/usr/bin/python
# encoding: utf-8
#
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2015 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: sameeragarwal@google.com (Sameer Agarwal)
#
# Note: You will need Sphinx and Pygments installed for this to work.
from __future__ import print_function
import glob
import io
import os
import sys
# Number of arguments
N = len(sys.argv)
if N < 3:
print('make_docs.py src_root destination_root')
sys.exit(1)
src_dir = sys.argv[1] + '/docs/source'
build_root = sys.argv[2]
cache_dir = build_root + '/doctrees'
html_dir = build_root + '/html'
# Called from Command Line
if N == 3:
sphinx_exe = 'sphinx-build'
# Called from CMake (using the SPHINX_EXECUTABLE found)
elif N == 4:
sphinx_exe = sys.argv[3]
# Run Sphinx to build the documentation.
os.system('%s -b html -d %s %s %s' %(sphinx_exe, cache_dir, src_dir, html_dir))
replacements = [
# By default MathJax uses does not use TeX fonts. This simple search
# and replace fixes that.
('''config=TeX-AMS-MML_HTMLorMML"></script>''',
'''config=TeX-AMS_HTML">
MathJax.Hub.Config({
"HTML-CSS": {
availableFonts: ["TeX"]
}
});
</script>'''),
# The title for the homepage is not ideal, so change it.
('<title>Ceres Solver — Ceres Solver</title>',
'<title>Ceres Solver — A Large Scale Non-linear Optimization Library</title>')
]
# This is a nasty hack to strip the breadcrumb navigation. A better strategy is
# to fork the upstream template, but that is no fun either. Whitespace matters!
# This doesn't use regular expressions since the escaping makes it untenable.
breadcrumb_start_other = \
'''<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="index.html">Docs</a> »</li>
<li>'''
# The index page has a slightly different breadcrumb.
breadcrumb_start_index = breadcrumb_start_other.replace('index.html', '#')
breadcrumb_end = \
'''</li>
<li class="wy-breadcrumbs-aside">
</li>
</ul>
<hr/>
</div>'''
for name in glob.glob('%s/*.html' % html_dir):
print('Postprocessing: ', name)
with io.open(name, encoding="utf-8") as fptr:
out = fptr.read()
for input_pattern, output_pattern in replacements:
out = out.replace(input_pattern, output_pattern)
try:
breadcrumb_start = breadcrumb_start_index \
if name.endswith('index.html') \
else breadcrumb_start_other
pre_breadcrumb_start, post_breadcrumb_start = out.split(breadcrumb_start)
title, post_breadcrumb_end = post_breadcrumb_start.split(breadcrumb_end)
print('Stripping breadcrumb for -', title)
out = pre_breadcrumb_start + post_breadcrumb_end
except ValueError:
print('Skipping breadcrumb strip for', name)
with io.open(name, 'w', encoding="utf-8") as fptr:
fptr.write(out)
| 4,409
| 34.28
| 87
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Ceres Solver documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 20 20:34:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ceres Solver'
copyright = u'2018 Google Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.14'
# The full version, including alpha/beta/rc tags.
release = '1.14.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes",]
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Ceres Solver"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CeresSolverdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CeresSolver.tex', u'Ceres Solver',
u'Sameer Agarwal, Keir Mierle & Others', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceressolver', u'Ceres Solver',
[u'Sameer Agarwal, Keir Mierle & Others'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CeresSolver', u'Ceres Solver',
u'Sameer Agarwal, Keir Mierle & Others', 'CeresSolver', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 7,957
| 31.748971
| 94
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/internal/ceres/schur_eliminator_template.py
|
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2017 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: sameeragarwal@google.com (Sameer Agarwal)
#
# Script for explicitly generating template specialization of the
# SchurEliminator class. It is a rather large class
# and the number of explicit instantiations is also large. Explicitly
# generating these instantiations in separate .cc files breaks the
# compilation into separate compilation unit rather than one large cc
# file which takes 2+GB of RAM to compile.
#
# This script creates two sets of files.
#
# 1. schur_eliminator_x_x_x.cc
# where, the x indicates the template parameters and
#
# 2. schur_eliminator.cc
#
# that contains a factory function for instantiating these classes
# based on runtime parameters.
#
# The list of tuples, specializations indicates the set of
# specializations that is generated.
# Set of template specializations to generate
HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of SchurEliminator.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_template_specializations.py.
"""
DYNAMIC_FILE = """
#include "ceres/schur_eliminator_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class SchurEliminator<%s, %s, %s>;
} // namespace internal
} // namespace ceres
"""
SPECIALIZATION_FILE = """
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class SchurEliminator<%s, %s, %s>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY_FILE_HEADER = """
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
SchurEliminatorBase*
SchurEliminatorBase::Create(const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY = """ return new SchurEliminator<%s, %s, %s>(options);"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
<< options.row_block_size << ","
<< options.e_block_size << ","
<< options.f_block_size << ">";
return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
}
} // namespace internal
} // namespace ceres
"""
| 5,928
| 37.00641
| 86
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/internal/ceres/generate_bundle_adjustment_tests.py
|
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2018 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: keir@google.com (Keir Mierle)
#
# Generate bundle adjustment tests as separate binaries. Since the bundle
# adjustment tests are fairly processing intensive, serializing them makes the
# tests take forever to run. Splitting them into separate binaries makes it
# easier to parallelize in continuous integration systems, and makes local
# processing on multi-core workstations much faster.
# Product of ORDERINGS, THREAD_CONFIGS, and SOLVER_CONFIGS is the full set of
# tests to generate.
ORDERINGS = ["kAutomaticOrdering", "kUserOrdering"]
SINGLE_THREADED = "1"
MULTI_THREADED = "4"
THREAD_CONFIGS = [SINGLE_THREADED, MULTI_THREADED]
SOLVER_CONFIGS = [
# Linear solver Sparse backend Preconditioner
('DENSE_SCHUR', 'NO_SPARSE', 'IDENTITY'),
('ITERATIVE_SCHUR', 'NO_SPARSE', 'JACOBI'),
('ITERATIVE_SCHUR', 'NO_SPARSE', 'SCHUR_JACOBI'),
('ITERATIVE_SCHUR', 'SUITE_SPARSE', 'CLUSTER_JACOBI'),
('ITERATIVE_SCHUR', 'EIGEN_SPARSE', 'CLUSTER_JACOBI'),
('ITERATIVE_SCHUR', 'CX_SPARSE', 'CLUSTER_JACOBI'),
('ITERATIVE_SCHUR', 'ACCELERATE_SPARSE','CLUSTER_JACOBI'),
('ITERATIVE_SCHUR', 'SUITE_SPARSE', 'CLUSTER_TRIDIAGONAL'),
('ITERATIVE_SCHUR', 'EIGEN_SPARSE', 'CLUSTER_TRIDIAGONAL'),
('ITERATIVE_SCHUR', 'CX_SPARSE', 'CLUSTER_TRIDIAGONAL'),
('ITERATIVE_SCHUR', 'ACCELERATE_SPARSE','CLUSTER_TRIDIAGONAL'),
('SPARSE_NORMAL_CHOLESKY', 'SUITE_SPARSE', 'IDENTITY'),
('SPARSE_NORMAL_CHOLESKY', 'EIGEN_SPARSE', 'IDENTITY'),
('SPARSE_NORMAL_CHOLESKY', 'CX_SPARSE', 'IDENTITY'),
('SPARSE_NORMAL_CHOLESKY', 'ACCELERATE_SPARSE','IDENTITY'),
('SPARSE_SCHUR', 'SUITE_SPARSE', 'IDENTITY'),
('SPARSE_SCHUR', 'EIGEN_SPARSE', 'IDENTITY'),
('SPARSE_SCHUR', 'CX_SPARSE', 'IDENTITY'),
('SPARSE_SCHUR', 'ACCELERATE_SPARSE','IDENTITY'),
]
FILENAME_SHORTENING_MAP = dict(
DENSE_SCHUR='denseschur',
ITERATIVE_SCHUR='iterschur',
SPARSE_NORMAL_CHOLESKY='sparsecholesky',
SPARSE_SCHUR='sparseschur',
NO_SPARSE='', # Omit sparse reference entirely for dense tests.
SUITE_SPARSE='suitesparse',
EIGEN_SPARSE='eigensparse',
CX_SPARSE='cxsparse',
ACCELERATE_SPARSE='acceleratesparse',
IDENTITY='identity',
JACOBI='jacobi',
SCHUR_JACOBI='schurjacobi',
CLUSTER_JACOBI='clustjacobi',
CLUSTER_TRIDIAGONAL='clusttri',
kAutomaticOrdering='auto',
kUserOrdering='user',
)
COPYRIGHT_HEADER = (
"""// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// ========================================
//
// This file is generated using generate_bundle_adjustment_tests.py.""")
BUNDLE_ADJUSTMENT_TEST_TEMPLATE = (COPYRIGHT_HEADER + """
#include "bundle_adjustment_test_util.h"
%(preprocessor_conditions_begin)s
namespace ceres {
namespace internal {
TEST_F(BundleAdjustmentTest,
%(test_class_name)s) { // NOLINT
BundleAdjustmentProblem bundle_adjustment_problem;
Solver::Options* options =
bundle_adjustment_problem.mutable_solver_options();
options->num_threads = %(num_threads)s;
options->linear_solver_type = %(linear_solver)s;
options->sparse_linear_algebra_library_type = %(sparse_backend)s;
options->preconditioner_type = %(preconditioner)s;
if (%(ordering)s) {
options->linear_solver_ordering.reset();
}
Problem* problem = bundle_adjustment_problem.mutable_problem();
RunSolverForConfigAndExpectResidualsMatch(*options, problem);
}
} // namespace internal
} // namespace ceres
%(preprocessor_conditions_end)s
""")
def camelcasify(token):
"""Convert capitalized underscore tokens to camel case"""
return ''.join([x.lower().capitalize() for x in token.split('_')])
def generate_bundle_test(linear_solver,
sparse_backend,
preconditioner,
ordering,
thread_config):
"""Generate a bundle adjustment test executable configured appropriately"""
# Preconditioner only makes sense for iterative schur; drop it otherwise.
preconditioner_tag = preconditioner
if linear_solver != 'ITERATIVE_SCHUR':
preconditioner_tag = ''
# Omit references to the sparse backend when one is not in use.
sparse_backend_tag = sparse_backend
if sparse_backend == 'NO_SPARSE':
sparse_backend_tag = ''
# Use a double underscore; otherwise the names are harder to understand.
test_class_name = '_'.join(filter(lambda x: x, [
camelcasify(linear_solver),
camelcasify(sparse_backend_tag),
camelcasify(preconditioner_tag),
ordering[1:], # Strip 'k'
'Threads' if thread_config == MULTI_THREADED else '']))
# Initial template parameters (augmented more below).
template_parameters = dict(
linear_solver=linear_solver,
sparse_backend=sparse_backend,
preconditioner=preconditioner,
ordering=ordering,
num_threads=thread_config,
test_class_name=test_class_name)
# Accumulate appropriate #ifdef/#ifndefs for the solver's sparse backend.
preprocessor_conditions_begin = []
preprocessor_conditions_end = []
if sparse_backend == 'SUITE_SPARSE':
preprocessor_conditions_begin.append('#ifndef CERES_NO_SUITESPARSE')
preprocessor_conditions_end.insert(0, '#endif // CERES_NO_SUITESPARSE')
elif sparse_backend == 'CX_SPARSE':
preprocessor_conditions_begin.append('#ifndef CERES_NO_CXSPARSE')
preprocessor_conditions_end.insert(0, '#endif // CERES_NO_CXSPARSE')
elif sparse_backend == 'ACCELERATE_SPARSE':
preprocessor_conditions_begin.append('#ifndef CERES_NO_ACCELERATE_SPARSE')
preprocessor_conditions_end.insert(0, '#endif // CERES_NO_ACCELERATE_SPARSE')
elif sparse_backend == 'EIGEN_SPARSE':
preprocessor_conditions_begin.append('#ifdef CERES_USE_EIGEN_SPARSE')
preprocessor_conditions_end.insert(0, '#endif // CERES_USE_EIGEN_SPARSE')
# Accumulate appropriate #ifdef/#ifndefs for threading conditions.
if thread_config == MULTI_THREADED:
preprocessor_conditions_begin.append('#ifndef CERES_NO_THREADS')
preprocessor_conditions_end.insert(0, '#endif // CERES_NO_THREADS')
# If there are #ifdefs, put newlines around them.
if preprocessor_conditions_begin:
preprocessor_conditions_begin.insert(0, '')
preprocessor_conditions_begin.append('')
preprocessor_conditions_end.insert(0, '')
preprocessor_conditions_end.append('')
# Put #ifdef/#ifndef stacks into the template parameters.
template_parameters['preprocessor_conditions_begin'] = '\n'.join(
preprocessor_conditions_begin)
template_parameters['preprocessor_conditions_end'] = '\n'.join(
preprocessor_conditions_end)
# Substitute variables into the test template, and write the result to a file.
filename_tag = '_'.join(FILENAME_SHORTENING_MAP.get(x) for x in [
linear_solver,
sparse_backend_tag,
preconditioner_tag,
ordering]
if FILENAME_SHORTENING_MAP.get(x))
if (thread_config == MULTI_THREADED):
filename_tag += '_threads'
filename = ('generated_bundle_adjustment_tests/ba_%s_test.cc' %
filename_tag.lower())
with open(filename, 'w') as fd:
fd.write(BUNDLE_ADJUSTMENT_TEST_TEMPLATE % template_parameters)
# All done.
print 'Generated', filename
return filename
if __name__ == '__main__':
# Iterate over all the possible configurations and generate the tests.
generated_files = []
for linear_solver, sparse_backend, preconditioner in SOLVER_CONFIGS:
for ordering in ORDERINGS:
for thread_config in THREAD_CONFIGS:
generated_files.append(
generate_bundle_test(linear_solver,
sparse_backend,
preconditioner,
ordering,
thread_config))
# Generate the CMakeLists.txt as well.
with open('generated_bundle_adjustment_tests/CMakeLists.txt', 'w') as fd:
fd.write(COPYRIGHT_HEADER.replace('//', '#').replace('http:#', 'http://'))
fd.write('\n')
fd.write('\n')
for generated_file in generated_files:
fd.write('ceres_test(%s)\n' %
generated_file.split('/')[1].replace('_test.cc', ''))
| 11,723
| 42.910112
| 82
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/internal/ceres/partitioned_matrix_view_template.py
|
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2015 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: sameeragarwal@google.com (Sameer Agarwal)
#
# Script for explicitly generating template specialization of the
# PartitionedMatrixView class. Explicitly generating these
# instantiations in separate .cc files breaks the compilation into
# separate compilation unit rather than one large cc file.
#
# This script creates two sets of files.
#
# 1. partitioned_matrix_view_x_x_x.cc
# where the x indicates the template parameters and
#
# 2. partitioned_matrix_view.cc
#
# that contains a factory function for instantiating these classes
# based on runtime parameters.
#
# The list of tuples, specializations indicates the set of
# specializations that is generated.
HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of PartitionedMatrixView.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_template_specializations.py.
"""
DYNAMIC_FILE = """
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<%s, %s, %s>;
} // namespace internal
} // namespace ceres
"""
SPECIALIZATION_FILE = """
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<%s, %s, %s>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY_FILE_HEADER = """
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
PartitionedMatrixViewBase*
PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY = """ return new PartitionedMatrixView<%s, %s, %s>(matrix, options.elimination_groups[0]);"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
<< options.row_block_size << ","
<< options.e_block_size << ","
<< options.f_block_size << ">";
return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
matrix, options.elimination_groups[0]);
};
} // namespace internal
} // namespace ceres
"""
| 5,983
| 38.111111
| 101
|
py
|
LiDAR2INS
|
LiDAR2INS-master/ceres/internal/ceres/generate_template_specializations.py
|
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2015 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: sameeragarwal@google.com (Sameer Agarwal)
#
# Script for explicitly generating template specialization of the
# SchurEliminator class. It is a rather large class
# and the number of explicit instantiations is also large. Explicitly
# generating these instantiations in separate .cc files breaks the
# compilation into separate compilation unit rather than one large cc
# file which takes 2+GB of RAM to compile.
#
# This script creates three sets of files.
#
# 1. schur_eliminator_x_x_x.cc and partitioned_matrix_view_x_x_x.cc
# where, the x indicates the template parameters and
#
# 2. schur_eliminator.cc & partitioned_matrix_view.cc
#
# that contains a factory function for instantiating these classes
# based on runtime parameters.
#
# 3. schur_templates.cc
#
# that contains a function which can be queried to determine what
# template specializations are available.
#
# The following list of tuples, specializations indicates the set of
# specializations that is generated.
SPECIALIZATIONS = [(2, 2, 2),
(2, 2, 3),
(2, 2, 4),
(2, 2, "Eigen::Dynamic"),
(2, 3, 3),
(2, 3, 4),
(2, 3, 6),
(2, 3, 9),
(2, 3, "Eigen::Dynamic"),
(2, 4, 3),
(2, 4, 4),
(2, 4, 6),
(2, 4, 8),
(2, 4, 9),
(2, 4, "Eigen::Dynamic"),
(2, "Eigen::Dynamic", "Eigen::Dynamic"),
(3, 3, 3),
(4, 4, 2),
(4, 4, 3),
(4, 4, 4),
(4, 4, "Eigen::Dynamic")]
import schur_eliminator_template
import partitioned_matrix_view_template
import os
import glob
def SuffixForSize(size):
if size == "Eigen::Dynamic":
return "d"
return str(size)
def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
return "_".join([prefix] + map(SuffixForSize, (row_block_size,
e_block_size,
f_block_size)))
def GenerateFactoryConditional(row_block_size, e_block_size, f_block_size):
conditionals = []
if (row_block_size != "Eigen::Dynamic"):
conditionals.append("(options.row_block_size == %s)" % row_block_size)
if (e_block_size != "Eigen::Dynamic"):
conditionals.append("(options.e_block_size == %s)" % e_block_size)
if (f_block_size != "Eigen::Dynamic"):
conditionals.append("(options.f_block_size == %s)" % f_block_size)
if (len(conditionals) == 0):
return "%s"
if (len(conditionals) == 1):
return " if " + conditionals[0] + "{\n %s\n }\n"
return " if (" + " &&\n ".join(conditionals) + ") {\n %s\n }\n"
def Specialize(name, data):
"""
Generate specialization code and the conditionals to instantiate it.
"""
# Specialization files
for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
output = SpecializationFilename("generated/" + name,
row_block_size,
e_block_size,
f_block_size) + ".cc"
with open(output, "w") as f:
f.write(data["HEADER"])
f.write(data["SPECIALIZATION_FILE"] %
(row_block_size, e_block_size, f_block_size))
# Generate the _d_d_d specialization.
output = SpecializationFilename("generated/" + name,
"Eigen::Dynamic",
"Eigen::Dynamic",
"Eigen::Dynamic") + ".cc"
with open(output, "w") as f:
f.write(data["HEADER"])
f.write(data["DYNAMIC_FILE"] %
("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic"))
# Factory
with open(name + ".cc", "w") as f:
f.write(data["HEADER"])
f.write(data["FACTORY_FILE_HEADER"])
for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
factory_conditional = GenerateFactoryConditional(
row_block_size, e_block_size, f_block_size)
factory = data["FACTORY"] % (row_block_size, e_block_size, f_block_size)
f.write(factory_conditional % factory);
f.write(data["FACTORY_FOOTER"])
QUERY_HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// What template specializations are available.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_template_specializations.py.
"""
QUERY_FILE_HEADER = """
#include "ceres/internal/eigen.h"
#include "ceres/schur_templates.h"
namespace ceres {
namespace internal {
void GetBestSchurTemplateSpecialization(int* row_block_size,
int* e_block_size,
int* f_block_size) {
LinearSolver::Options options;
options.row_block_size = *row_block_size;
options.e_block_size = *e_block_size;
options.f_block_size = *f_block_size;
*row_block_size = Eigen::Dynamic;
*e_block_size = Eigen::Dynamic;
*f_block_size = Eigen::Dynamic;
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
QUERY_FOOTER = """
#endif
return;
}
} // namespace internal
} // namespace ceres
"""
QUERY_ACTION = """ *row_block_size = %s;
*e_block_size = %s;
*f_block_size = %s;
return;"""
def GenerateQueryFile():
"""
Generate file that allows querying for available template specializations.
"""
with open("schur_templates.cc", "w") as f:
f.write(QUERY_HEADER)
f.write(QUERY_FILE_HEADER)
for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
factory_conditional = GenerateFactoryConditional(
row_block_size, e_block_size, f_block_size)
action = QUERY_ACTION % (row_block_size, e_block_size, f_block_size)
f.write(factory_conditional % action)
f.write(QUERY_FOOTER)
if __name__ == "__main__":
for f in glob.glob("generated/*"):
os.remove(f)
Specialize("schur_eliminator",
schur_eliminator_template.__dict__)
Specialize("partitioned_matrix_view",
partitioned_matrix_view_template.__dict__)
GenerateQueryFile()
| 9,677
| 38.182186
| 80
|
py
|
xgboost
|
xgboost-master/tests/ci_build/test_r_package.py
|
"""Utilities for packaging R code and running tests."""
import argparse
import os
import shutil
import subprocess
from pathlib import Path
from platform import system
from test_utils import R_PACKAGE, ROOT, DirectoryExcursion, cd, print_time, record_time
def get_mingw_bin() -> str:
return os.path.join("c:/rtools40/mingw64/", "bin")
@cd(ROOT)
@record_time
def pack_rpackage() -> Path:
"""Compose the directory used for creating R package tar ball."""
dest = Path("xgboost")
def pkgroot(path: str) -> None:
"""Change makefiles according to the package layout."""
with open(Path("R-package") / "src" / path, "r") as fd:
makefile = fd.read()
makefile = makefile.replace("PKGROOT=../../", "PKGROOT=.", 1)
with open(dest / "src" / path, "w") as fd:
fd.write(makefile)
output = subprocess.run(["git", "clean", "-xdf", "--dry-run"], capture_output=True)
if output.returncode != 0:
raise ValueError("Failed to check git repository status.", output)
would_remove = output.stdout.decode("utf-8").strip().split("\n")
if would_remove and not all(f.find("tests/ci_build") != -1 for f in would_remove):
raise ValueError(
"\n".join(would_remove) + "\nPlease cleanup the working git repository."
)
shutil.copytree("R-package", dest)
os.remove(dest / "demo" / "runall.R")
# core
shutil.copytree("src", dest / "src" / "src")
shutil.copytree("include", dest / "src" / "include")
shutil.copytree("amalgamation", dest / "src" / "amalgamation")
# rabit
rabit = Path("rabit")
os.mkdir(dest / "src" / rabit)
shutil.copytree(rabit / "src", dest / "src" / "rabit" / "src")
shutil.copytree(rabit / "include", dest / "src" / "rabit" / "include")
# dmlc-core
dmlc_core = Path("dmlc-core")
os.mkdir(dest / "src" / dmlc_core)
shutil.copytree(dmlc_core / "include", dest / "src" / "dmlc-core" / "include")
shutil.copytree(dmlc_core / "src", dest / "src" / "dmlc-core" / "src")
# makefile & license
shutil.copyfile("LICENSE", dest / "LICENSE")
osxmakef = dest / "src" / "Makevars.win-e"
if os.path.exists(osxmakef):
os.remove(osxmakef)
pkgroot("Makevars.in")
pkgroot("Makevars.win")
# misc
rwsp = Path("R-package") / "remove_warning_suppression_pragma.sh"
if system() != "Windows":
subprocess.check_call(rwsp)
rwsp = dest / "remove_warning_suppression_pragma.sh"
if system() != "Windows":
subprocess.check_call(rwsp)
os.remove(rwsp)
os.remove(dest / "CMakeLists.txt")
shutil.rmtree(dest / "tests" / "helper_scripts")
return dest
@cd(ROOT)
@record_time
def build_rpackage(path: str) -> str:
def find_tarball() -> str:
found = []
for root, subdir, files in os.walk("."):
for f in files:
if f.endswith(".tar.gz") and f.startswith("xgboost"):
found.append(os.path.join(root, f))
if not found:
raise ValueError("Failed to find output tar ball.")
if len(found) > 1:
raise ValueError("Found more than one packages:", found)
return found[0]
env = os.environ.copy()
print("Ncpus:", f"{os.cpu_count()}")
env.update({"MAKEFLAGS": f"-j{os.cpu_count()}"})
subprocess.check_call([R, "CMD", "build", path], env=env)
tarball = find_tarball()
return tarball
@cd(ROOT)
@record_time
def check_rpackage(path: str) -> None:
env = os.environ.copy()
print("Ncpus:", f"{os.cpu_count()}")
env.update(
{
"MAKEFLAGS": f"-j{os.cpu_count()}",
# cran specific environment variables
"_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_": str(2.5),
}
)
# Actually we don't run this check on windows due to dependency issue.
if system() == "Windows":
# make sure compiler from rtools is used.
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
env.update({"CC": CC, "CXX": CXX})
status = subprocess.run([R, "CMD", "check", "--as-cran", path], env=env)
with open(Path("xgboost.Rcheck") / "00check.log", "r") as fd:
check_log = fd.read()
with open(Path("xgboost.Rcheck") / "00install.out", "r") as fd:
install_log = fd.read()
msg = f"""
----------------------- Install ----------------------
{install_log}
----------------------- Check -----------------------
{check_log}
"""
if status.returncode != 0:
print(msg)
raise ValueError("Failed r package check.")
if check_log.find("WARNING") != -1:
print(msg)
raise ValueError("Has unresolved warnings.")
if check_log.find("Examples with CPU time") != -1:
print(msg)
raise ValueError("Suspicious NOTE.")
@cd(R_PACKAGE)
@record_time
def check_rmarkdown() -> None:
assert system() != "Windows", "Document test doesn't support Windows."
env = os.environ.copy()
env.update({"MAKEFLAGS": f"-j{os.cpu_count()}"})
print("Checking R documentation.")
bin_dir = os.path.dirname(R)
rscript = os.path.join(bin_dir, "Rscript")
subprocess.check_call([rscript, "-e", "roxygen2::roxygenize()"], env=env)
output = subprocess.run(["git", "diff", "--name-only"], capture_output=True)
if len(output.stdout.decode("utf-8").strip()) != 0:
output = subprocess.run(["git", "diff"], capture_output=True)
raise ValueError(
"Please run `roxygen2::roxygenize()`. Diff:\n",
output.stdout.decode("utf-8"),
)
@cd(R_PACKAGE)
@record_time
def test_with_autotools() -> None:
"""Windows only test. No `--as-cran` check, only unittests. We don't want to manage
the dependencies on Windows machine.
"""
assert system() == "Windows"
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
cmd = [R, "CMD", "INSTALL", str(os.path.curdir)]
env = os.environ.copy()
env.update({"CC": CC, "CXX": CXX, "MAKEFLAGS": f"-j{os.cpu_count()}"})
subprocess.check_call(cmd, env=env)
subprocess.check_call(
["R.exe", "-q", "-e", "library(testthat); setwd('tests'); source('testthat.R')"]
)
subprocess.check_call(["R.exe", "-q", "-e", "demo(runall, package = 'xgboost')"])
@record_time
def test_with_cmake(args: argparse.Namespace) -> None:
os.mkdir("build")
with DirectoryExcursion("build"):
if args.compiler == "mingw":
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
env = os.environ.copy()
env.update({"CC": CC, "CXX": CXX})
subprocess.check_call(
[
"cmake",
os.path.pardir,
"-DUSE_OPENMP=ON",
"-DR_LIB=ON",
"-DCMAKE_CONFIGURATION_TYPES=Release",
"-G",
"Unix Makefiles",
],
env=env,
)
subprocess.check_call(["make", "-j", "install"])
elif args.compiler == "msvc":
subprocess.check_call(
[
"cmake",
os.path.pardir,
"-DUSE_OPENMP=ON",
"-DR_LIB=ON",
"-DCMAKE_CONFIGURATION_TYPES=Release",
"-A",
"x64",
]
)
subprocess.check_call(
[
"cmake",
"--build",
os.path.curdir,
"--target",
"install",
"--config",
"Release",
]
)
else:
raise ValueError("Wrong compiler")
with DirectoryExcursion(R_PACKAGE):
subprocess.check_call(
[
R,
"-q",
"-e",
"library(testthat); setwd('tests'); source('testthat.R')",
]
)
subprocess.check_call([R, "-q", "-e", "demo(runall, package = 'xgboost')"])
@record_time
def main(args: argparse.Namespace) -> None:
if args.task == "pack":
pack_rpackage()
elif args.task == "build":
src_dir = pack_rpackage()
build_rpackage(src_dir)
elif args.task == "doc":
check_rmarkdown()
elif args.task == "check":
if args.build_tool == "autotools" and system() != "Windows":
src_dir = pack_rpackage()
tarball = build_rpackage(src_dir)
check_rpackage(tarball)
elif args.build_tool == "autotools":
test_with_autotools()
else:
test_with_cmake(args)
else:
raise ValueError("Unexpected task.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Helper script for making R package and running R tests on CI. There are"
" also other helper scripts in the R tests directory for installing"
" dependencies and running linter."
)
)
parser.add_argument(
"--task",
type=str,
choices=["pack", "build", "check", "doc"],
default="check",
required=False,
)
parser.add_argument(
"--compiler",
type=str,
choices=["mingw", "msvc"],
help="Compiler used for compiling CXX code. Only relevant for windows build",
default="mingw",
required=False,
)
parser.add_argument(
"--build-tool",
type=str,
choices=["cmake", "autotools"],
help="Build tool for compiling CXX code and install R package.",
default="autotools",
required=False,
)
parser.add_argument(
"--r",
type=str,
default="R" if system() != "Windows" else "R.exe",
help="Path to the R executable.",
)
args = parser.parse_args()
R = args.r
try:
main(args)
finally:
print_time()
| 10,217
| 31.438095
| 88
|
py
|
xgboost
|
xgboost-master/tests/ci_build/tidy.py
|
#!/usr/bin/env python
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
from multiprocessing import Pool, cpu_count
from time import time
import yaml
def call(args):
'''Subprocess run wrapper.'''
completed = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_msg = completed.stdout.decode('utf-8')
# `workspace` is a name used in Jenkins CI. Normally we should keep the
# dir as `xgboost`.
matched = re.search('(workspace|xgboost)/.*(src|tests|include)/.*warning:',
error_msg,
re.MULTILINE)
if matched is None:
return_code = 0
else:
return_code = 1
return (completed.returncode, return_code, error_msg, args)
class ClangTidy(object):
''' clang tidy wrapper.
Args:
args: Command line arguments.
cpp_lint: Run linter on C++ source code.
cuda_lint: Run linter on CUDA source code.
use_dmlc_gtest: Whether to use gtest bundled in dmlc-core.
'''
def __init__(self, args):
self.cpp_lint = args.cpp
self.cuda_lint = args.cuda
self.use_dmlc_gtest: bool = args.use_dmlc_gtest
self.cuda_archs = args.cuda_archs.copy() if args.cuda_archs else []
if args.tidy_version:
self.exe = 'clang-tidy-' + str(args.tidy_version)
else:
self.exe = 'clang-tidy'
print('Run linter on CUDA: ', self.cuda_lint)
print('Run linter on C++:', self.cpp_lint)
print('Use dmlc gtest:', self.use_dmlc_gtest)
print('CUDA archs:', ' '.join(self.cuda_archs))
if not self.cpp_lint and not self.cuda_lint:
raise ValueError('Both --cpp and --cuda are set to 0.')
self.root_path = os.path.abspath(os.path.curdir)
print('Project root:', self.root_path)
self.cdb_path = os.path.join(self.root_path, 'cdb')
def __enter__(self):
self.start = time()
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self._generate_cdb()
return self
def __exit__(self, *args):
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self.end = time()
print('Finish running clang-tidy:', self.end - self.start)
def _generate_cdb(self):
'''Run CMake to generate compilation database.'''
os.mkdir(self.cdb_path)
os.chdir(self.cdb_path)
cmake_args = ['cmake', '..', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DGOOGLE_TEST=ON']
if self.use_dmlc_gtest:
cmake_args.append('-DUSE_DMLC_GTEST=ON')
else:
cmake_args.append('-DUSE_DMLC_GTEST=OFF')
if self.cuda_lint:
cmake_args.extend(['-DUSE_CUDA=ON', '-DUSE_NCCL=ON'])
if self.cuda_archs:
arch_list = ';'.join(self.cuda_archs)
cmake_args.append(f'-DGPU_COMPUTE_VER={arch_list}')
subprocess.run(cmake_args)
os.chdir(self.root_path)
def convert_nvcc_command_to_clang(self, command):
'''Convert nvcc flags to corresponding clang flags.'''
components = command.split()
compiler: str = components[0]
if compiler.find('nvcc') != -1:
compiler = 'clang++'
components[0] = compiler
# check each component in a command
converted_components = [compiler]
for i in range(1, len(components)):
if components[i] == '-lineinfo':
continue
elif components[i] == '-fuse-ld=gold':
continue
elif components[i] == '-rdynamic':
continue
elif components[i] == "-Xfatbin=-compress-all":
continue
elif components[i] == "-forward-unknown-to-host-compiler":
continue
elif (components[i] == '-x' and
components[i+1] == 'cu'):
# -x cu -> -x cuda
converted_components.append('-x')
converted_components.append('cuda')
components[i+1] = ''
continue
elif components[i].find('-Xcompiler') != -1:
continue
elif components[i].find('--expt') != -1:
continue
elif components[i].find('-ccbin') != -1:
continue
elif components[i].find('--generate-code') != -1:
keyword = 'code=sm'
pos = components[i].find(keyword)
capability = components[i][pos + len(keyword) + 1:
pos + len(keyword) + 3]
if pos != -1:
converted_components.append(
'--cuda-gpu-arch=sm_' + capability)
elif components[i].find('--std=c++14') != -1:
converted_components.append('-std=c++14')
elif components[i].startswith('-isystem='):
converted_components.extend(components[i].split('='))
else:
converted_components.append(components[i])
converted_components.append('-isystem /usr/local/cuda/include/')
command = ''
for c in converted_components:
command = command + ' ' + c
command = command.strip()
return command
def _configure_flags(self, path, command):
src = os.path.join(self.root_path, 'src')
src = src.replace('/', '\\/')
include = os.path.join(self.root_path, 'include')
include = include.replace('/', '\\/')
header_filter = '(' + src + '|' + include + ')'
common_args = [self.exe,
"-header-filter=" + header_filter,
'-config='+self.clang_tidy]
common_args.append(path)
common_args.append('--')
command = self.convert_nvcc_command_to_clang(command)
command = command.split()[1:] # remove clang/c++/g++
if '-c' in command:
index = command.index('-c')
del command[index+1]
command.remove('-c')
if '-o' in command:
index = command.index('-o')
del command[index+1]
command.remove('-o')
common_args.extend(command)
# Two passes, one for device code another for host code.
if path.endswith('cu'):
args = [common_args.copy(), common_args.copy()]
args[0].append('--cuda-host-only')
args[1].append('--cuda-device-only')
else:
args = [common_args.copy()]
for a in args:
a.append('-Wno-unused-command-line-argument')
return args
def _configure(self):
'''Load and configure compile_commands and clang_tidy.'''
def should_lint(path):
if not self.cpp_lint and path.endswith('.cc'):
return False
isxgb = path.find('rabit') == -1
isxgb = isxgb and path.find('dmlc-core') == -1
isxgb = isxgb and (not path.startswith(self.cdb_path))
if isxgb:
print(path)
return True
cdb_file = os.path.join(self.cdb_path, 'compile_commands.json')
with open(cdb_file, 'r') as fd:
self.compile_commands = json.load(fd)
tidy_file = os.path.join(self.root_path, '.clang-tidy')
with open(tidy_file) as fd:
self.clang_tidy = yaml.safe_load(fd)
self.clang_tidy = str(self.clang_tidy)
all_files = []
for entry in self.compile_commands:
path = entry['file']
if should_lint(path):
args = self._configure_flags(path, entry['command'])
all_files.extend(args)
return all_files
def run(self):
'''Run clang-tidy.'''
all_files = self._configure()
passed = True
BAR = '-'*32
with Pool(cpu_count()) as pool:
results = pool.map(call, all_files)
for i, (process_status, tidy_status, msg, args) in enumerate(results):
# Don't enforce clang-tidy to pass for now due to namespace
# for cub in thrust is not correct.
if tidy_status == 1:
passed = False
print(BAR, '\n'
'Command args:', ' '.join(args), ', ',
'Process return code:', process_status, ', ',
'Tidy result code:', tidy_status, ', ',
'Message:\n', msg,
BAR, '\n')
if not passed:
print('Errors in `thrust` namespace can be safely ignored.',
'Please address rest of the clang-tidy warnings.')
return passed
def test_tidy(args):
'''See if clang-tidy and our regex is working correctly. There are
many subtleties we need to be careful. For instances:
* Is the string re-directed to pipe encoded as UTF-8? or is it
bytes?
* On Jenkins there's no 'xgboost' directory, are we catching the
right keywords?
* Should we use re.DOTALL?
* Should we use re.MULTILINE?
Tests here are not thorough, at least we want to guarantee tidy is
not missing anything on Jenkins.
'''
root_path = os.path.abspath(os.path.curdir)
tidy_file = os.path.join(root_path, '.clang-tidy')
test_file_path = os.path.join(root_path,
'tests', 'ci_build', 'test_tidy.cc')
with open(tidy_file) as fd:
tidy_config = fd.read()
tidy_config = str(tidy_config)
tidy_config = '-config='+tidy_config
if not args.tidy_version:
tidy = 'clang-tidy'
else:
tidy = 'clang-tidy-' + str(args.tidy_version)
args = [tidy, tidy_config, test_file_path]
(proc_code, tidy_status, error_msg, _) = call(args)
assert proc_code == 0
assert tidy_status == 1
print('clang-tidy is working.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run clang-tidy.")
parser.add_argument("--cpp", type=int, default=1)
parser.add_argument(
"--tidy-version",
type=int,
default=None,
help="Specify the version of preferred clang-tidy.",
)
parser.add_argument("--cuda", type=int, default=1)
parser.add_argument(
"--use-dmlc-gtest",
action="store_true",
help="Whether to use gtest bundled in dmlc-core.",
)
parser.add_argument(
"--cuda-archs", action="append", help="List of CUDA archs to build"
)
args = parser.parse_args()
test_tidy(args)
with ClangTidy(args) as linter:
passed = linter.run()
if not passed:
sys.exit(1)
| 10,858
| 34.486928
| 82
|
py
|
xgboost
|
xgboost-master/tests/ci_build/change_version.py
|
"""
1. Modify ``CMakeLists.txt`` in source tree and ``python-package/xgboost/VERSION`` if
needed, run CMake .
If this is a RC release, the Python version has the form <major>.<minor>.<patch>rc1
2. Modify ``DESCRIPTION`` and ``configure.ac`` in R-package. Run ``autoreconf``.
3. Run ``mvn`` in ``jvm-packages``
If this is a RC release, the version for JVM packages has the form
<major>.<minor>.<patch>-RC1
"""
import argparse
import datetime
import os
import re
import subprocess
import sys
import tempfile
from test_utils import JVM_PACKAGES, PY_PACKAGE, R_PACKAGE, ROOT, cd
@cd(ROOT)
def cmake(major: int, minor: int, patch: int) -> None:
version = f"{major}.{minor}.{patch}"
with open("CMakeLists.txt", "r") as fd:
cmakelist = fd.read()
pattern = r"project\(xgboost LANGUAGES .* VERSION ([0-9]+\.[0-9]+\.[0-9]+)\)"
matched = re.search(pattern, cmakelist)
assert matched, "Couldn't find the version string in CMakeLists.txt."
print(matched.start(1), matched.end(1))
cmakelist = cmakelist[: matched.start(1)] + version + cmakelist[matched.end(1) :]
with open("CMakeLists.txt", "w") as fd:
fd.write(cmakelist)
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.call(["cmake", "-S", ".", "-B", tmpdir])
@cd(PY_PACKAGE)
def pypkg(
major: int, minor: int, patch: int, rc: int, is_rc: bool, is_dev: bool
) -> None:
version = f"{major}.{minor}.{patch}"
pyver = version
if is_rc:
pyver = pyver + f"rc{rc}"
if is_dev:
pyver = pyver + "-dev"
pyver_path = os.path.join("xgboost", "VERSION")
with open(pyver_path, "w") as fd:
fd.write(pyver + "\n")
pyprj_path = os.path.join("pyproject.toml")
with open(pyprj_path, "r") as fd:
pyprj = fd.read()
matched = re.search('version = "' + r"([0-9]+\.[0-9]+\.[0-9]+.*)" + '"', pyprj)
assert matched, "Couldn't find version string in pyproject.toml."
pyprj = pyprj[: matched.start(1)] + pyver + pyprj[matched.end(1) :]
with open(pyprj_path, "w") as fd:
fd.write(pyprj)
@cd(R_PACKAGE)
def rpkg(major: int, minor: int, patch: int) -> None:
version = f"{major}.{minor}.{patch}.1"
# Version: 2.0.0.1
desc_path = "DESCRIPTION"
with open(desc_path, "r") as fd:
description = fd.read()
pattern = r"Version:\ ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"
matched = re.search(pattern, description)
assert matched, "Couldn't find version string in DESCRIPTION."
description = (
description[: matched.start(1)] + version + description[matched.end(1) :]
)
pattern = r"Date:\ ([0-9]+\-[0-9]+\-[0-9]+)"
today = datetime.date.today()
matched = re.search(pattern, description)
assert matched, "Couldn't find date string in DESCRIPTION."
description = (
description[: matched.start(1)] + str(today) + description[matched.end(1) :]
)
with open(desc_path, "w") as fd:
fd.write(description)
config_path = "configure.ac"
# AC_INIT([xgboost],[2.0.0],[],[xgboost],[])
version = f"{major}.{minor}.{patch}"
with open(config_path, "r") as fd:
config = fd.read()
pattern = (
r"AC_INIT\(\[xgboost\],\[([0-9]+\.[0-9]+\.[0-9]+)\],\[\],\[xgboost\],\[\]\)"
)
matched = re.search(pattern, config)
assert matched, "Couldn't find version string in configure.ac"
config = config[: matched.start(1)] + version + config[matched.end(1) :]
with open(config_path, "w") as fd:
fd.write(config)
subprocess.check_call(["autoreconf"])
@cd(JVM_PACKAGES)
def jvmpkgs(
major: int, minor: int, patch: int, rc: int, is_rc: bool, is_dev: bool
) -> None:
version = f"{major}.{minor}.{patch}"
if is_dev:
version += "-SNAPSHOT"
if is_rc:
version += f"-RC{rc}"
subprocess.check_call(["mvn", "versions:set", f"-DnewVersion={version}"])
@cd(ROOT)
def main(args: argparse.Namespace) -> None:
major = args.major
minor = args.minor
patch = args.patch
rc = args.rc
is_rc = args.is_rc == 1
is_dev = args.is_dev == 1
if is_rc and is_dev:
raise ValueError("It cannot be both a rc and a dev branch.")
if is_rc:
assert rc >= 1, "RC version starts from 1."
else:
assert rc == 0, "RC is not used."
cmake(major, minor, patch)
pypkg(major, minor, patch, rc, is_rc, is_dev)
rpkg(major, minor, patch)
jvmpkgs(major, minor, patch, rc, is_rc, is_dev)
print(
"""
Please examine the changes and commit. Be aware that mvn might leave backup files in the
source tree.
"""
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--major", type=int)
parser.add_argument("--minor", type=int)
parser.add_argument("--patch", type=int)
parser.add_argument("--rc", type=int, default=0)
parser.add_argument("--is-rc", type=int, choices=[0, 1])
parser.add_argument("--is-dev", type=int, choices=[0, 1])
args = parser.parse_args()
try:
main(args)
except Exception as e:
print("Error:", e, file=sys.stderr)
exit(-1)
| 5,198
| 31.49375
| 88
|
py
|
xgboost
|
xgboost-master/tests/ci_build/lint_python.py
|
import argparse
import os
import pathlib
import subprocess
import sys
from collections import Counter
from multiprocessing import Pool, cpu_count
from typing import Dict, List, Tuple
from test_utils import PY_PACKAGE, ROOT, cd, print_time, record_time
class LintersPaths:
"""The paths each linter run on."""
BLACK = (
# core
"python-package/",
# tests
"tests/python/test_config.py",
"tests/python/test_data_iterator.py",
"tests/python/test_dt.py",
"tests/python/test_predict.py",
"tests/python/test_quantile_dmatrix.py",
"tests/python/test_tree_regularization.py",
"tests/python/test_shap.py",
"tests/python-gpu/test_gpu_data_iterator.py",
"tests/python-gpu/test_gpu_prediction.py",
"tests/python-gpu/load_pickle.py",
"tests/python-gpu/test_gpu_pickling.py",
"tests/python-gpu/test_gpu_eval_metrics.py",
"tests/test_distributed/test_with_spark/",
"tests/test_distributed/test_gpu_with_spark/",
# demo
"demo/dask/",
"demo/json-model/json_parser.py",
"demo/guide-python/cat_in_the_dat.py",
"demo/guide-python/categorical.py",
"demo/guide-python/feature_weights.py",
"demo/guide-python/sklearn_parallel.py",
"demo/guide-python/spark_estimator_examples.py",
"demo/guide-python/individual_trees.py",
"demo/guide-python/quantile_regression.py",
"demo/guide-python/multioutput_regression.py",
"demo/guide-python/learning_to_rank.py",
"demo/guide-python/quantile_data_iterator.py",
"demo/guide-python/update_process.py",
"demo/aft_survival/aft_survival_viz_demo.py",
# CI
"tests/ci_build/lint_python.py",
"tests/ci_build/test_r_package.py",
"tests/ci_build/test_utils.py",
"tests/ci_build/change_version.py",
)
ISORT = (
# core
"python-package/",
# tests
"tests/test_distributed/",
"tests/python/",
"tests/python-gpu/",
"tests/ci_build/",
# demo
"demo/",
# misc
"dev/",
"doc/",
)
MYPY = (
# core
"python-package/",
# tests
"tests/python/test_dt.py",
"tests/python/test_data_iterator.py",
"tests/python-gpu/test_gpu_data_iterator.py",
"tests/python-gpu/load_pickle.py",
"tests/test_distributed/test_with_spark/test_data.py",
"tests/test_distributed/test_gpu_with_spark/test_data.py",
"tests/test_distributed/test_gpu_with_dask/test_gpu_with_dask.py",
# demo
"demo/json-model/json_parser.py",
"demo/guide-python/external_memory.py",
"demo/guide-python/cat_in_the_dat.py",
"demo/guide-python/feature_weights.py",
"demo/guide-python/individual_trees.py",
"demo/guide-python/quantile_regression.py",
"demo/guide-python/multioutput_regression.py",
"demo/guide-python/learning_to_rank.py",
"demo/aft_survival/aft_survival_viz_demo.py",
# CI
"tests/ci_build/lint_python.py",
"tests/ci_build/test_r_package.py",
"tests/ci_build/test_utils.py",
"tests/ci_build/change_version.py",
)
def check_cmd_print_failure_assistance(cmd: List[str]) -> bool:
if subprocess.run(cmd).returncode == 0:
return True
subprocess.run([cmd[0], "--version"])
msg = """
Please run the following command on your machine to address the error:
"""
msg += " ".join(cmd)
print(msg, file=sys.stderr)
return False
@record_time
@cd(PY_PACKAGE)
def run_black(rel_path: str, fix: bool) -> bool:
cmd = ["black", "-q", os.path.join(ROOT, rel_path)]
if not fix:
cmd += ["--check"]
return check_cmd_print_failure_assistance(cmd)
@record_time
@cd(PY_PACKAGE)
def run_isort(rel_path: str, fix: bool) -> bool:
# Isort gets confused when trying to find the config file, so specified explicitly.
cmd = [
"isort",
"--settings-path",
PY_PACKAGE,
f"--src={PY_PACKAGE}",
os.path.join(ROOT, rel_path),
]
if not fix:
cmd += ["--check"]
return check_cmd_print_failure_assistance(cmd)
@record_time
@cd(PY_PACKAGE)
def run_mypy(rel_path: str) -> bool:
cmd = ["mypy", os.path.join(ROOT, rel_path)]
return check_cmd_print_failure_assistance(cmd)
class PyLint:
"""A helper for running pylint, mostly copied from dmlc-core/scripts."""
MESSAGE_CATEGORIES = {
"Fatal",
"Error",
"Warning",
"Convention",
"Refactor",
"Information",
}
MESSAGE_PREFIX_TO_CATEGORY = {
category[0]: category for category in MESSAGE_CATEGORIES
}
@classmethod
@cd(PY_PACKAGE)
def get_summary(cls, path: str) -> Tuple[str, Dict[str, int], str, str, bool]:
"""Get the summary of pylint's errors, warnings, etc."""
ret = subprocess.run(["pylint", path], capture_output=True)
stdout = ret.stdout.decode("utf-8")
emap: Dict[str, int] = Counter()
for line in stdout.splitlines():
if ":" in line and (
category := cls.MESSAGE_PREFIX_TO_CATEGORY.get(
line.split(":")[-2].strip()[0]
)
):
emap[category] += 1
return path, emap, stdout, ret.stderr.decode("utf-8"), ret.returncode == 0
@staticmethod
def print_summary_map(result_map: Dict[str, Dict[str, int]]) -> int:
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
ftype = "Python"
nfail = sum(map(bool, result_map.values()))
print(
f"====={len(result_map) - nfail}/{len(result_map)} {ftype} files passed check====="
)
for fname, emap in result_map.items():
if emap:
print(
f"{fname}: {sum(emap.values())} Errors of {len(emap)} Categories map={emap}"
)
return nfail
@classmethod
def run(cls) -> bool:
"""Run pylint with parallelization on a batch of paths."""
all_errors: Dict[str, Dict[str, int]] = {}
with Pool(cpu_count()) as pool:
error_maps = pool.map(
cls.get_summary,
(os.fspath(file) for file in pathlib.Path(PY_PACKAGE).glob("**/*.py")),
)
for path, emap, out, err, succeeded in error_maps:
all_errors[path] = emap
if succeeded:
continue
print(out)
if len(err) != 0:
print(err)
nerr = cls.print_summary_map(all_errors)
return nerr == 0
@record_time
def run_pylint() -> bool:
return PyLint.run()
@record_time
def main(args: argparse.Namespace) -> None:
if args.format == 1:
black_results = [run_black(path, args.fix) for path in LintersPaths.BLACK]
if not all(black_results):
sys.exit(-1)
isort_results = [run_isort(path, args.fix) for path in LintersPaths.ISORT]
if not all(isort_results):
sys.exit(-1)
if args.type_check == 1:
mypy_results = [run_mypy(path) for path in LintersPaths.MYPY]
if not all(mypy_results):
sys.exit(-1)
if args.pylint == 1:
if not run_pylint():
sys.exit(-1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Run static checkers for XGBoost, see `python_lint.yml' "
"conda env file for a list of dependencies."
)
)
parser.add_argument("--format", type=int, choices=[0, 1], default=1)
parser.add_argument("--type-check", type=int, choices=[0, 1], default=1)
parser.add_argument("--pylint", type=int, choices=[0, 1], default=1)
parser.add_argument(
"--fix",
action="store_true",
help="Fix the formatting issues instead of emitting an error.",
)
args = parser.parse_args()
try:
main(args)
finally:
print_time()
| 8,177
| 29.401487
| 96
|
py
|
xgboost
|
xgboost-master/tests/ci_build/rename_whl.py
|
import os
import sys
from contextlib import contextmanager
@contextmanager
def cd(path):
path = os.path.normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
if len(sys.argv) != 4:
print('Usage: {} [wheel to rename] [commit id] [platform tag]'.format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with cd(dirname):
tokens = basename.split('-')
assert len(tokens) == 5
version = tokens[1].split('+')[0]
keywords = {'pkg_name': tokens[0],
'version': version,
'commit_id': commit_id,
'platform_tag': platform_tag}
new_name = '{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl'.format(**keywords)
print('Renaming {} to {}...'.format(basename, new_name))
if os.path.isfile(new_name):
os.remove(new_name)
os.rename(basename, new_name)
filesize = os.path.getsize(new_name) / 1024 / 1024 # MB
msg = f"Limit of wheel size set by PyPI is exceeded. {new_name}: {filesize}"
assert filesize <= 300, msg
| 1,240
| 25.978261
| 96
|
py
|
xgboost
|
xgboost-master/tests/ci_build/test_utils.py
|
"""Utilities for the CI."""
import os
from datetime import datetime, timedelta
from functools import wraps
from typing import Any, Callable, Dict, TypedDict, TypeVar, Union
class DirectoryExcursion:
def __init__(self, path: Union[os.PathLike, str]) -> None:
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
def __enter__(self) -> None:
os.chdir(self.path)
def __exit__(self, *args: Any) -> None:
os.chdir(self.curdir)
R = TypeVar("R")
def cd(path: Union[os.PathLike, str]) -> Callable:
"""Decorator for changing directory temporarily."""
def chdir(func: Callable[..., R]) -> Callable[..., R]:
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
with DirectoryExcursion(path):
return func(*args, **kwargs)
return inner
return chdir
Record = TypedDict("Record", {"count": int, "total": timedelta})
timer: Dict[str, Record] = {}
def record_time(func: Callable[..., R]) -> Callable[..., R]:
"""Decorator for recording function runtime."""
global timer
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
if func.__name__ not in timer:
timer[func.__name__] = {"count": 0, "total": timedelta(0)}
s = datetime.now()
try:
r = func(*args, **kwargs)
finally:
e = datetime.now()
timer[func.__name__]["count"] += 1
timer[func.__name__]["total"] += e - s
return r
return inner
def print_time() -> None:
"""Print all recorded items by :py:func:`record_time`."""
global timer
for k, v in timer.items():
print(
"Name:",
k,
"Called:",
v["count"],
"Elapsed:",
f"{v['total'].seconds} secs",
)
ROOT = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir
)
)
R_PACKAGE = os.path.join(ROOT, "R-package")
JVM_PACKAGES = os.path.join(ROOT, "jvm-packages")
PY_PACKAGE = os.path.join(ROOT, "python-package")
| 2,141
| 24.807229
| 82
|
py
|
xgboost
|
xgboost-master/tests/python/test_data_iterator.py
|
from typing import Callable, Dict, List
import numpy as np
import pytest
from hypothesis import given, settings, strategies
from scipy.sparse import csr_matrix
import xgboost as xgb
from xgboost import testing as tm
from xgboost.data import SingleBatchInternalIter as SingleBatch
from xgboost.testing import IteratorForTest, make_batches, non_increasing
pytestmark = tm.timeout(30)
def test_single_batch(tree_method: str = "approx") -> None:
from sklearn.datasets import load_breast_cancer
n_rounds = 10
X, y = load_breast_cancer(return_X_y=True)
X = X.astype(np.float32)
y = y.astype(np.float32)
Xy = xgb.DMatrix(SingleBatch(data=X, label=y))
from_it = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=n_rounds)
Xy = xgb.DMatrix(X, y)
from_dmat = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=n_rounds)
assert from_it.get_dump() == from_dmat.get_dump()
X, y = load_breast_cancer(return_X_y=True, as_frame=True)
X = X.astype(np.float32)
Xy = xgb.DMatrix(SingleBatch(data=X, label=y))
from_pd = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=n_rounds)
# remove feature info to generate exact same text representation.
from_pd.feature_names = None
from_pd.feature_types = None
assert from_pd.get_dump() == from_it.get_dump()
X, y = load_breast_cancer(return_X_y=True)
X = csr_matrix(X)
Xy = xgb.DMatrix(SingleBatch(data=X, label=y))
from_it = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=n_rounds)
X, y = load_breast_cancer(return_X_y=True)
Xy = xgb.DMatrix(SingleBatch(data=X, label=y), missing=0.0)
from_np = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=n_rounds)
assert from_np.get_dump() == from_it.get_dump()
def run_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tree_method: str,
subsample: bool,
use_cupy: bool,
) -> None:
n_rounds = 2
# The test is more difficult to pass if the subsample rate is smaller as the root_sum
# is accumulated in parallel. Reductions with different number of entries lead to
# different floating point errors.
subsample_rate = 0.8 if subsample else 1.0
it = IteratorForTest(
*make_batches(n_samples_per_batch, n_features, n_batches, use_cupy),
cache="cache"
)
if n_batches == 0:
with pytest.raises(ValueError, match="1 batch"):
Xy = xgb.DMatrix(it)
return
Xy = xgb.DMatrix(it)
assert Xy.num_row() == n_samples_per_batch * n_batches
assert Xy.num_col() == n_features
parameters = {
"tree_method": tree_method,
"max_depth": 2,
"subsample": subsample_rate,
"seed": 0,
}
if tree_method == "gpu_hist":
parameters["sampling_method"] = "gradient_based"
results_from_it: Dict[str, Dict[str, List[float]]] = {}
from_it = xgb.train(
parameters,
Xy,
num_boost_round=n_rounds,
evals=[(Xy, "Train")],
evals_result=results_from_it,
verbose_eval=False,
)
if not subsample:
assert non_increasing(results_from_it["Train"]["rmse"])
X, y, w = it.as_arrays()
if use_cupy:
_y = y.get()
else:
_y = y
np.testing.assert_allclose(Xy.get_label(), _y)
Xy = xgb.DMatrix(X, y, weight=w)
assert Xy.num_row() == n_samples_per_batch * n_batches
assert Xy.num_col() == n_features
results_from_arrays: Dict[str, Dict[str, List[float]]] = {}
from_arrays = xgb.train(
parameters,
Xy,
num_boost_round=n_rounds,
evals=[(Xy, "Train")],
evals_result=results_from_arrays,
verbose_eval=False,
)
arr_predt = from_arrays.predict(Xy)
if not subsample:
assert non_increasing(results_from_arrays["Train"]["rmse"])
rtol = 1e-2
# CPU sketching is more memory efficient but less consistent due to small chunks
it_predt = from_it.predict(Xy)
arr_predt = from_arrays.predict(Xy)
np.testing.assert_allclose(it_predt, arr_predt, rtol=rtol)
np.testing.assert_allclose(
results_from_it["Train"]["rmse"],
results_from_arrays["Train"]["rmse"],
rtol=rtol,
)
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 13),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
) -> None:
run_data_iterator(
n_samples_per_batch, n_features, n_batches, "approx", subsample, False
)
run_data_iterator(
n_samples_per_batch, n_features, n_batches, "hist", subsample, False
)
class IterForCacheTest(xgb.DataIter):
def __init__(self, x: np.ndarray, y: np.ndarray, w: np.ndarray) -> None:
self.kwargs = {"data": x, "label": y, "weight": w}
super().__init__(release_data=False)
def next(self, input_data: Callable) -> int:
if self.it == 1:
return 0
self.it += 1
input_data(**self.kwargs)
return 1
def reset(self) -> None:
self.it = 0
def test_data_cache() -> None:
n_batches = 1
n_features = 2
n_samples_per_batch = 16
data = make_batches(n_samples_per_batch, n_features, n_batches, False)
batches = [v[0] for v in data]
it = IterForCacheTest(*batches)
xgb.QuantileDMatrix(it)
assert it._input_id == id(batches[0])
| 5,556
| 29.201087
| 89
|
py
|
xgboost
|
xgboost-master/tests/python/test_dmatrix.py
|
import os
import tempfile
import numpy as np
import pytest
import scipy.sparse
from hypothesis import given, settings, strategies
from scipy.sparse import csr_matrix, rand
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.data import np_dtypes
rng = np.random.RandomState(1)
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
def set_base_margin_info(DType, DMatrixT, tm: str):
rng = np.random.default_rng()
X = DType(rng.normal(0, 1.0, size=100).astype(np.float32).reshape(50, 2))
if hasattr(X, "iloc"):
y = X.iloc[:, 0]
else:
y = X[:, 0]
base_margin = X
# no error at set
Xy = DMatrixT(X, y, base_margin=base_margin)
# Error at train, caused by check in predictor.
with pytest.raises(ValueError, match=r".*base_margin.*"):
xgb.train({"tree_method": tm}, Xy)
if not hasattr(X, "iloc"):
# column major matrix
got = DType(Xy.get_base_margin().reshape(50, 2))
assert (got == base_margin).all()
assert base_margin.T.flags.c_contiguous is False
assert base_margin.T.flags.f_contiguous is True
Xy.set_info(base_margin=base_margin.T)
got = DType(Xy.get_base_margin().reshape(2, 50))
assert (got == base_margin.T).all()
# Row vs col vec.
base_margin = y
Xy.set_base_margin(base_margin)
bm_col = Xy.get_base_margin()
Xy.set_base_margin(base_margin.reshape(1, base_margin.size))
bm_row = Xy.get_base_margin()
assert (bm_row == bm_col).all()
# type
base_margin = base_margin.astype(np.float64)
Xy.set_base_margin(base_margin)
bm_f64 = Xy.get_base_margin()
assert (bm_f64 == bm_col).all()
# too many dimensions
base_margin = X.reshape(2, 5, 2, 5)
with pytest.raises(ValueError, match=r".*base_margin.*"):
Xy.set_base_margin(base_margin)
class TestDMatrix:
def test_warn_missing(self):
from xgboost import data
with pytest.warns(UserWarning):
data._warn_unused_missing('uri', 4)
with pytest.warns(None) as record:
data._warn_unused_missing('uri', None)
data._warn_unused_missing('uri', np.nan)
assert len(record) == 0
with pytest.warns(None) as record:
x = rng.randn(10, 10)
y = rng.randn(10)
xgb.DMatrix(x, y, missing=4)
assert len(record) == 0
def test_dmatrix_numpy_init(self):
data = np.random.randn(5, 5)
dm = xgb.DMatrix(data)
assert dm.num_row() == 5
assert dm.num_col() == 5
data = np.array([[1, 2], [3, 4]])
dm = xgb.DMatrix(data)
assert dm.num_row() == 2
assert dm.num_col() == 2
# 0d array
with pytest.raises(ValueError):
xgb.DMatrix(np.array(1))
# 1d array
with pytest.raises(ValueError):
xgb.DMatrix(np.array([1, 2, 3]))
# 3d array
data = np.random.randn(5, 5, 5)
with pytest.raises(ValueError):
xgb.DMatrix(data)
# object dtype
data = np.array([['a', 'b'], ['c', 'd']])
with pytest.raises(ValueError):
xgb.DMatrix(data)
def test_csr(self):
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
def test_csc(self):
row = np.array([0, 2, 2, 0, 1, 2])
col = np.array([0, 0, 1, 2, 2, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.csc_matrix((data, (row, col)), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
indptr = np.array([0, 3, 5])
data = np.array([0, 1, 2, 3, 4])
row_idx = np.array([0, 1, 2, 0, 2])
X = scipy.sparse.csc_matrix((data, row_idx, indptr), shape=(3, 2))
assert tm.predictor_equal(xgb.DMatrix(X.tocsr()), xgb.DMatrix(X))
def test_coo(self):
row = np.array([0, 2, 2, 0, 1, 2])
col = np.array([0, 0, 1, 2, 2, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
def test_np_view(self):
# Sliced Float32 array
y = np.array([12, 34, 56], np.float32)[::2]
from_view = xgb.DMatrix(np.array([[]]), label=y).get_label()
from_array = xgb.DMatrix(np.array([[]]), label=y + 0).get_label()
assert (from_view.shape == from_array.shape)
assert (from_view == from_array).all()
# Sliced UInt array
z = np.array([12, 34, 56], np.uint32)[::2]
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('group', z)
from_view = dmat.get_uint_info('group_ptr')
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('group', z + 0)
from_array = dmat.get_uint_info('group_ptr')
assert (from_view.shape == from_array.shape)
assert (from_view == from_array).all()
def test_slice(self):
X = rng.randn(100, 100)
y = rng.randint(low=0, high=3, size=100).astype(np.float32)
d = xgb.DMatrix(X, y)
np.testing.assert_equal(d.get_label(), y)
fw = rng.uniform(size=100).astype(np.float32)
d.set_info(feature_weights=fw)
# base margin is per-class in multi-class classifier
base_margin = rng.randn(100, 3).astype(np.float32)
d.set_base_margin(base_margin)
np.testing.assert_allclose(d.get_base_margin().reshape(100, 3), base_margin)
ridxs = [1, 2, 3, 4, 5, 6]
sliced = d.slice(ridxs)
# Slicing works with label and other meta info fields
np.testing.assert_equal(sliced.get_label(), y[1:7])
np.testing.assert_equal(sliced.get_float_info('feature_weights'), fw)
np.testing.assert_equal(sliced.get_base_margin(), base_margin[1:7, :].flatten())
np.testing.assert_equal(sliced.get_base_margin(), sliced.get_float_info('base_margin'))
# Slicing a DMatrix results into a DMatrix that's equivalent to a DMatrix that's
# constructed from the corresponding NumPy slice
d2 = xgb.DMatrix(X[1:7, :], y[1:7])
d2.set_base_margin(base_margin[1:7, :])
eval_res = {}
_ = xgb.train(
{'num_class': 3, 'objective': 'multi:softprob',
'eval_metric': 'mlogloss'},
d,
num_boost_round=2, evals=[(d2, 'd2'), (sliced, 'sliced')], evals_result=eval_res)
np.testing.assert_equal(eval_res['d2']['mlogloss'], eval_res['sliced']['mlogloss'])
ridxs_arr = np.array(ridxs)[1:] # handles numpy slice correctly
sliced = d.slice(ridxs_arr)
np.testing.assert_equal(sliced.get_label(), y[2:7])
def test_feature_names_slice(self):
data = np.random.randn(5, 5)
# different length
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=list('abcdef'))
# contains duplicates
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'd'])
# contains symbol
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'e<1'])
dm = xgb.DMatrix(data)
dm.feature_names = list('abcde')
assert dm.feature_names == list('abcde')
assert dm.slice([0, 1]).num_col() == dm.num_col()
assert dm.slice([0, 1]).feature_names == dm.feature_names
dm.feature_types = 'q'
assert dm.feature_types == list('qqqqq')
dm.feature_types = list('qiqiq')
assert dm.feature_types == list('qiqiq')
with pytest.raises(ValueError):
dm.feature_types = list('abcde')
# reset
dm.feature_names = None
assert dm.feature_names is None
assert dm.feature_types is None
def test_feature_names(self):
data = np.random.randn(100, 5)
target = np.array([0, 1] * 50)
cases = [['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5'],
[u'要因1', u'要因2', u'要因3', u'要因4', u'要因5']]
for features in cases:
dm = xgb.DMatrix(data, label=target,
feature_names=features)
assert dm.feature_names == features
assert dm.num_row() == 100
assert dm.num_col() == 5
params = {'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'eta': 0.3,
'num_class': 3}
bst = xgb.train(params, dm, num_boost_round=10)
scores = bst.get_fscore()
assert list(sorted(k for k in scores)) == features
dummy = np.random.randn(5, 5)
dm = xgb.DMatrix(dummy, feature_names=features)
bst.predict(dm)
# different feature name must raises error
dm = xgb.DMatrix(dummy, feature_names=list('abcde'))
with pytest.raises(ValueError):
bst.predict(dm)
@pytest.mark.skipif(**tm.no_pandas())
def test_save_binary(self):
import pandas as pd
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'm.dmatrix')
data = pd.DataFrame({
"a": [0, 1],
"b": [2, 3],
"c": [4, 5]
})
m0 = xgb.DMatrix(data.loc[:, ["a", "b"]], data["c"])
assert m0.feature_names == ['a', 'b']
m0.save_binary(path)
m1 = xgb.DMatrix(path)
assert m0.feature_names == m1.feature_names
assert m0.feature_types == m1.feature_types
def test_get_info(self):
dtrain, _ = tm.load_agaricus(__file__)
dtrain.get_float_info('label')
dtrain.get_float_info('weight')
dtrain.get_float_info('base_margin')
dtrain.get_uint_info('group_ptr')
group_len = np.array([2, 3, 4])
dtrain.set_group(group_len)
np.testing.assert_equal(group_len, dtrain.get_group())
def test_qid(self):
rows = 100
cols = 10
X, y = rng.randn(rows, cols), rng.randn(rows)
qid = rng.randint(low=0, high=10, size=rows, dtype=np.uint32)
qid = np.sort(qid)
Xy = xgb.DMatrix(X, y)
Xy.set_info(qid=qid)
group_ptr = Xy.get_uint_info('group_ptr')
assert group_ptr[0] == 0
assert group_ptr[-1] == rows
def test_feature_weights(self):
kRows = 10
kCols = 50
rng = np.random.RandomState(1994)
fw = rng.uniform(size=kCols)
X = rng.randn(kRows, kCols)
m = xgb.DMatrix(X)
m.set_info(feature_weights=fw)
np.testing.assert_allclose(fw, m.get_float_info('feature_weights'))
# Handle empty
m.set_info(feature_weights=np.empty((0, )))
assert m.get_float_info('feature_weights').shape[0] == 0
fw -= 1
with pytest.raises(ValueError):
m.set_info(feature_weights=fw)
def test_sparse_dmatrix_csr(self):
nrow = 100
ncol = 1000
x = rand(nrow, ncol, density=0.0005, format='csr', random_state=rng)
assert x.indices.max() < ncol
x.data[:] = 1
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
watchlist = [(dtrain, 'train')]
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
bst = xgb.train(param, dtrain, 5, watchlist)
bst.predict(dtrain)
i32 = csr_matrix((x.data.astype(np.int32), x.indices, x.indptr), shape=x.shape)
f32 = csr_matrix(
(i32.data.astype(np.float32), x.indices, x.indptr), shape=x.shape
)
di32 = xgb.DMatrix(i32)
df32 = xgb.DMatrix(f32)
dense = xgb.DMatrix(f32.toarray(), missing=0)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "f32.dmatrix")
df32.save_binary(path)
with open(path, "rb") as fd:
df32_buffer = np.array(fd.read())
path = os.path.join(tmpdir, "f32.dmatrix")
di32.save_binary(path)
with open(path, "rb") as fd:
di32_buffer = np.array(fd.read())
path = os.path.join(tmpdir, "dense.dmatrix")
dense.save_binary(path)
with open(path, "rb") as fd:
dense_buffer = np.array(fd.read())
np.testing.assert_equal(df32_buffer, di32_buffer)
np.testing.assert_equal(df32_buffer, dense_buffer)
def test_sparse_dmatrix_csc(self):
nrow = 1000
ncol = 100
x = rand(nrow, ncol, density=0.0005, format='csc', random_state=rng)
assert x.indices.max() < nrow - 1
x.data[:] = 1
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
watchlist = [(dtrain, 'train')]
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
bst = xgb.train(param, dtrain, 5, watchlist)
bst.predict(dtrain)
def test_unknown_data(self):
class Data:
pass
with pytest.raises(TypeError):
with pytest.warns(UserWarning):
d = Data()
xgb.DMatrix(d)
from scipy import sparse
rng = np.random.RandomState(1994)
X = rng.rand(10, 10)
y = rng.rand(10)
X = sparse.dok_matrix(X)
Xy = xgb.DMatrix(X, y)
assert Xy.num_row() == 10
assert Xy.num_col() == 10
@pytest.mark.skipif(**tm.no_pandas())
def test_np_categorical(self):
n_features = 10
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
X = X.values.astype(np.float32)
feature_types = ['c'] * n_features
assert isinstance(X, np.ndarray)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_scipy_categorical(self):
from scipy import sparse
n_features = 10
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
X = X.values.astype(np.float32)
feature_types = ['c'] * n_features
X[1, 3] = np.NAN
X[2, 4] = np.NAN
X = sparse.csr_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
X = sparse.csc_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
X = sparse.coo_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_uri_categorical(self):
path = os.path.join(dpath, 'agaricus.txt.train')
feature_types = ["q"] * 5 + ["c"] + ["q"] * 120
Xy = xgb.DMatrix(
path + "?indexing_mode=1&format=libsvm", feature_types=feature_types
)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_base_margin(self):
set_base_margin_info(np.asarray, xgb.DMatrix, "hist")
@given(
strategies.integers(0, 1000),
strategies.integers(0, 100),
strategies.fractions(0, 1),
)
@settings(deadline=None, print_blob=True)
def test_to_csr(self, n_samples, n_features, sparsity) -> None:
if n_samples == 0 or n_features == 0 or sparsity == 1.0:
csr = scipy.sparse.csr_matrix(np.empty((0, 0)))
else:
csr = tm.make_sparse_regression(n_samples, n_features, sparsity, False)[
0
].astype(np.float32)
m = xgb.DMatrix(data=csr)
ret = m.get_data()
np.testing.assert_equal(csr.indptr, ret.indptr)
np.testing.assert_equal(csr.data, ret.data)
np.testing.assert_equal(csr.indices, ret.indices)
def test_dtypes(self) -> None:
n_samples = 128
n_features = 16
for orig, x in np_dtypes(n_samples, n_features):
m0 = xgb.DMatrix(orig)
m1 = xgb.DMatrix(x)
assert tm.predictor_equal(m0, m1)
| 16,765
| 34.748401
| 95
|
py
|
xgboost
|
xgboost-master/tests/python/test_survival.py
|
import json
import os
from typing import List, Optional, Tuple, cast
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
dpath = tm.data_dir(__file__)
@pytest.fixture(scope="module")
def toy_data() -> Tuple[xgb.DMatrix, np.ndarray, np.ndarray]:
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
INF = np.inf
y_lower = np.array([10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
dmat = xgb.DMatrix(X)
dmat.set_float_info("label_lower_bound", y_lower)
dmat.set_float_info("label_upper_bound", y_upper)
return dmat, y_lower, y_upper
def test_default_metric(toy_data: Tuple[xgb.DMatrix, np.ndarray, np.ndarray]) -> None:
Xy, y_lower, y_upper = toy_data
def run(evals: Optional[list]) -> None:
# test with or without actual evaluation.
booster = xgb.train(
{"objective": "survival:aft", "aft_loss_distribution": "extreme"},
Xy,
num_boost_round=1,
evals=evals,
)
config = json.loads(booster.save_config())
metrics = config["learner"]["metrics"]
assert len(metrics) == 1
assert metrics[0]["aft_loss_param"]["aft_loss_distribution"] == "extreme"
booster = xgb.train(
{"objective": "survival:aft"},
Xy,
num_boost_round=1,
evals=evals,
)
config = json.loads(booster.save_config())
metrics = config["learner"]["metrics"]
assert len(metrics) == 1
assert metrics[0]["aft_loss_param"]["aft_loss_distribution"] == "normal"
run([(Xy, "Train")])
run(None)
def test_aft_survival_toy_data(
toy_data: Tuple[xgb.DMatrix, np.ndarray, np.ndarray]
) -> None:
# See demo/aft_survival/aft_survival_viz_demo.py
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
dmat, y_lower, y_upper = toy_data
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
# includes the corresponding predicted label (y_pred)
acc_rec = []
class Callback(xgb.callback.TrainingCallback):
def __init__(self):
super().__init__()
def after_iteration(
self,
model: xgb.Booster,
epoch: int,
evals_log: xgb.callback.TrainingCallback.EvalsLog,
):
y_pred = model.predict(dmat)
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X))
acc_rec.append(acc)
return False
evals_result: xgb.callback.TrainingCallback.EvalsLog = {}
params = {
"max_depth": 3,
"objective": "survival:aft",
"min_child_weight": 0,
"tree_method": "exact",
}
bst = xgb.train(
params,
dmat,
15,
[(dmat, "train")],
evals_result=evals_result,
callbacks=[Callback()],
)
nloglik_rec = cast(List[float], evals_result["train"]["aft-nloglik"])
# AFT metric (negative log likelihood) improve monotonically
assert all(p >= q for p, q in zip(nloglik_rec, nloglik_rec[:1]))
# "Accuracy" improve monotonically.
# Over time, XGBoost model makes predictions that fall within given label ranges.
assert all(p <= q for p, q in zip(acc_rec, acc_rec[1:]))
assert acc_rec[-1] == 1.0
def gather_split_thresholds(tree):
if "split_condition" in tree:
return (
gather_split_thresholds(tree["children"][0])
| gather_split_thresholds(tree["children"][1])
| {tree["split_condition"]}
)
return set()
# Only 2.5, 3.5, and 4.5 are used as split thresholds.
model_json = [json.loads(e) for e in bst.get_dump(dump_format="json")]
for i, tree in enumerate(model_json):
assert gather_split_thresholds(tree).issubset({2.5, 3.5, 4.5})
def test_aft_empty_dmatrix():
X = np.array([]).reshape((0, 2))
y_lower, y_upper = np.array([]), np.array([])
dtrain = xgb.DMatrix(X)
dtrain.set_info(label_lower_bound=y_lower, label_upper_bound=y_upper)
bst = xgb.train({'objective': 'survival:aft', 'tree_method': 'hist'},
dtrain, num_boost_round=2, evals=[(dtrain, 'train')])
@pytest.mark.skipif(**tm.no_pandas())
def test_aft_survival_demo_data():
import pandas as pd
df = pd.read_csv(os.path.join(dpath, 'veterans_lung_cancer.csv'))
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
dtrain = xgb.DMatrix(X)
dtrain.set_float_info('label_lower_bound', y_lower_bound)
dtrain.set_float_info('label_upper_bound', y_upper_bound)
base_params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'tree_method': 'hist',
'learning_rate': 0.05,
'aft_loss_distribution_scale': 1.20,
'max_depth': 6,
'lambda': 0.01,
'alpha': 0.02}
nloglik_rec = {}
dists = ['normal', 'logistic', 'extreme']
for dist in dists:
params = base_params
params.update({'aft_loss_distribution': dist})
evals_result = {}
bst = xgb.train(params, dtrain, num_boost_round=500, evals=[(dtrain, 'train')],
evals_result=evals_result)
nloglik_rec[dist] = evals_result['train']['aft-nloglik']
# AFT metric (negative log likelihood) improve monotonically
assert all(p >= q for p, q in zip(nloglik_rec[dist], nloglik_rec[dist][:1]))
# For this data, normal distribution works the best
assert nloglik_rec['normal'][-1] < 4.9
assert nloglik_rec['logistic'][-1] > 4.9
assert nloglik_rec['extreme'][-1] > 4.9
| 5,895
| 33.887574
| 87
|
py
|
xgboost
|
xgboost-master/tests/python/test_interaction_constraints.py
|
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestInteractionConstraints:
def run_interaction_constraints(
self, tree_method, feature_names=None, interaction_constraints='[[0, 1]]'
):
x1 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x2 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x3 = np.random.choice([1, 2, 3], size=1000, replace=True)
y = x1 + x2 + x3 + x1 * x2 * x3 \
+ np.random.normal(
loc=0.001, scale=1.0, size=1000) + 3 * np.sin(x1)
X = np.column_stack((x1, x2, x3))
dtrain = xgboost.DMatrix(X, label=y, feature_names=feature_names)
params = {
'max_depth': 3,
'eta': 0.1,
'nthread': 2,
'interaction_constraints': interaction_constraints,
'tree_method': tree_method
}
num_boost_round = 12
# Fit a model that only allows interaction between x1 and x2
bst = xgboost.train(
params, dtrain, num_boost_round, evals=[(dtrain, 'train')])
# Set all observations to have the same x3 values then increment
# by the same amount
def f(x):
tmat = xgboost.DMatrix(
np.column_stack((x1, x2, np.repeat(x, 1000))), feature_names=feature_names)
return bst.predict(tmat)
preds = [f(x) for x in [1, 2, 3]]
# Check incrementing x3 has the same effect on all observations
# since x3 is constrained to be independent of x1 and x2
# and all observations start off from the same x3 value
diff1 = preds[1] - preds[0]
assert np.all(np.abs(diff1 - diff1[0]) < 1e-4)
diff2 = preds[2] - preds[1]
assert np.all(np.abs(diff2 - diff2[0]) < 1e-4)
def test_exact_interaction_constraints(self):
self.run_interaction_constraints(tree_method='exact')
def test_hist_interaction_constraints(self):
self.run_interaction_constraints(tree_method='hist')
def test_approx_interaction_constraints(self):
self.run_interaction_constraints(tree_method='approx')
def test_interaction_constraints_feature_names(self):
with pytest.raises(ValueError):
constraints = [('feature_0', 'feature_1')]
self.run_interaction_constraints(tree_method='exact',
interaction_constraints=constraints)
with pytest.raises(ValueError):
constraints = [('feature_0', 'feature_3')]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
constraints = [('feature_0', 'feature_1')]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
constraints = [['feature_0', 'feature_1'], ['feature_2']]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
@pytest.mark.skipif(**tm.no_sklearn())
def training_accuracy(self, tree_method):
"""Test accuracy, reused by GPU tests."""
from sklearn.metrics import accuracy_score
dtrain = xgboost.DMatrix(
dpath + "agaricus.txt.train?indexing_mode=1&format=libsvm"
)
dtest = xgboost.DMatrix(
dpath + "agaricus.txt.test?indexing_mode=1&format=libsvm"
)
params = {
'eta': 1,
'max_depth': 6,
'objective': 'binary:logistic',
'tree_method': tree_method,
'interaction_constraints': '[[1,2], [2,3,4]]'
}
num_boost_round = 5
params['grow_policy'] = 'lossguide'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
params['grow_policy'] = 'depthwise'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
@pytest.mark.parametrize("tree_method", ["hist", "approx", "exact"])
def test_hist_training_accuracy(self, tree_method):
self.training_accuracy(tree_method=tree_method)
| 4,864
| 39.882353
| 91
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.