repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
cppflow
|
cppflow-master/examples/multi_input_output/create_model.py
|
#!/usr/bin/env python
"""
Example for a multiple inputs and outputs functionality.
"""
# MIT License
#
# Copyright (c) 2020 Sergio Izquierdo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @file create_model.py
#
# @brief Creates and saves a simple multi input multi output Keras model.
#
# @section Creates and saves a simple multi input multi output Keras model.
#
# @section author_create_model Author(s)
# - Created by Sergio Izquierdo
# Imports
import tensorflow as tf
input_1 = tf.keras.Input(shape=(5,), name='my_input_1')
input_2 = tf.keras.Input(shape=(5,), name='my_input_2')
x1 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_1)
x2 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_2)
output_1 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
name='my_outputs_1')(x1)
output_2 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
name='my_outputs_2')(x2)
model = tf.keras.Model(inputs=[input_1, input_2], outputs=[output_1, output_2])
model.compile()
# Export the model to a SavedModel
model.save('model', save_format='tf')
| 2,144
| 36.631579
| 79
|
py
|
cppflow
|
cppflow-master/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Workaround for issue https://github.com/sphinx-contrib/googleanalytics/issues/2
# Note that a warning still will be issued "unsupported object from its setup() function"
# Remove this workaround when the issue has been resolved upstream
import sphinx.application
import sphinx.errors
sphinx.application.ExtensionError = sphinx.errors.ExtensionError
# -- Project information -----------------------------------------------------
project = 'cppflow'
copyright = '2020, Sergio Izquierdo'
author = 'Sergio Izquierdo'
# The full version, including alpha/beta/rc tags
release = '2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe',
'sphinxcontrib.googleanalytics',
]
breathe_projects = { 'cppflow': '../xml' }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Google Analytics --------------------------------------------------------
googleanalytics_id = 'UA-110983956-1'
| 2,455
| 34.085714
| 89
|
py
|
cppflow
|
cppflow-master/include/cppflow/ops_generator/generator.py
|
#!/usr/bin/env python
"""
Generates the raw_ops.h cpp code.
"""
# MIT License
#
# Copyright (c) 2020 Sergio Izquierdo
# Copyright (c) 2020 Jiannan Liu
# Copyright (c) 2022 Alfredo Rodriguez
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
# @file generator.py
# @author Alfredo Rodriguez
# @author Jiannan Liu
# @author Sergio Izquierdo
# @date @showdate "%B %d, %Y" 2020-09-16
import tensorflow as tf
from tensorflow.core.framework import op_def_pb2
from google.protobuf import text_format
from termcolor import colored
import re
import textwrap
ops = op_def_pb2.OpList()
text_format.Merge(open('ops.pbtxt').read(), ops)
class Attribute:
"""Class that describes the attribute.
Attributes:
attr: An attribute.
name: The attribute's name.
type: The attribute's type.
islist: Whether a list attributes.
number_attr: Number of attributes
default: The attribute's default value.
"""
def __init__(self, attr, number_attr_list):
self.attr = attr
self.name = self.attr.name
if self.attr.type == 'func':
raise Exception('Passing functions as arguments is '
'not yet supported')
# List attributes are defined as 'list(attr)''
self.type, self.islist = ((self.attr.type, False)
if self.attr.type[:4] != 'list'
else (self.attr.type[5:-1], True))
self.number_attr = [i for n, i in number_attr_list if self.name == n]
self.number_attr, self.type = ((self.number_attr[0].name, 'n_attr')
if len(self.number_attr)
else (None, self.type))
self.default = (bool(len(self.attr.default_value.ListFields())) and
not self.islist and
self.type not in ['shape', 'tensor'])
def declaration(self):
# Basic T types attributes are not used
if self.name == 'T': return ''
# Number attributes are infered from others (no need for an argument)
if self.number_attr is not None: return ''
# Convert from TF types to C++ types
cpptype = {
'shape' : 'const std::vector<int64_t>&',
'int' : 'int64_t',
'float' : 'float',
'string': 'const std::string&',
'type' : 'datatype', # Refers to cppflow::datatype
'bool' : 'bool',
'tensor': 'const tensor&'
}[self.type]
# Warp list attributes in a C++ vector
if self.islist:
cpptype = cpptype.replace('&', '') # Not inner reference types
cpptype = ('const std::vector<{}>&'
.format(cpptype.replace('const', '')))
# Get the default value for the attribute
# Not yet supported for lists
# Not supported for tensors or shape
if (self.default and not self.islist and
self.type not in ['shape', 'tensor']):
cppdefault = '=' + {
'int' : str(self.attr.default_value.i),
'bool' : str(self.attr.default_value.b).lower(),
'string' : '"' + str(self.attr.default_value.s)[2:-1] + '"',
'float' : ('{:.4e}'.format(self.attr.default_value.f)
.replace('inf',
'std::numeric_limits<float>::infinity()')),
'type' : ('static_cast<datatype>({})'
.format(self.attr.default_value.type))
}[self.type]
else:
cppdefault = ''
# datatype name=defaultval
return (cpptype + ' ' + self.name.replace('template', 'template_arg') +
cppdefault)
def code(self):
# Basic T types attributes are not used
if self.name == 'T': return ''
if self.islist:
return textwrap.dedent({
'string' : '''
std::vector<std::size_t> {0}_sizes; {0}_sizes.reserve({0}.size());
std::transform({0}.begin(), {0}.end(), std::back_inserter({0}_sizes), [](const auto& s) {{ return s.size();}});
TFE_OpSetAttrStringList(op.get(), "{orig:}", reinterpret_cast<const void *const *>({0}.data()), {0}_sizes.data(), static_cast<int>({0}.size()));
''',
'int' : 'TFE_OpSetAttrIntList(op.get(), "{orig:}", {0}.data(), static_cast<int>({0}.size()));',
'float' : 'TFE_OpSetAttrFloatList(op.get(), "{orig:}", {0}.data(), static_cast<int>({0}.size()));',
'bool' : 'TFE_OpSetAttrBoolList(op.get(), "{orig:}", std::vector<unsigned char>({0}.begin(), {0}.end()).data(), {0}.size());',
'type' : 'TFE_OpSetAttrTypeList(op.get(), "{orig:}", reinterpret_cast<const enum TF_DataType *>({0}.data()), static_cast<int>({0}.size()));',
'shape' : '''
std::vector<const int64_t*> {0}_values; {0}_values.reserve({0}.size());
std::vector<int> {0}_ndims; {0}_ndims.reserve({0}.size());
std::transform({0}.begin(), {0}.end(), std::back_inserter({0}_values), [](const auto& v) {{ return v.data();}});
std::transform({0}.begin(), {0}.end(), std::back_inserter({0}_ndims), [](const auto& v) {{ return static_cast<int>(v.size());}});
TFE_OpSetAttrShapeList(op.get(), "{orig:}", {0}_values.data(), {0}_ndims.data(), static_cast<int>({0}.size()), context::get_status());
status_check(context::get_status());
''',
}[self.type].format(self.name.replace('template', 'template_arg'),
orig=self.name)).replace('\n', '\n ')
else:
return (textwrap.dedent({
'shape' : '''
TFE_OpSetAttrShape(op.get(), "{orig:}", {0}.data(), static_cast<int>({0}.size()), context::get_status());
status_check(context::get_status());
''',
'int' : 'TFE_OpSetAttrInt(op.get(), "{orig:}", {0});',
'float' : 'TFE_OpSetAttrFloat(op.get(), "{orig:}", {0});',
'string': 'TFE_OpSetAttrString(op.get(), "{orig:}", (void*) {0}.c_str(), {0}.size());',
'type' : 'TFE_OpSetAttrType(op.get(), "{orig:}", {0});',
'bool' : 'TFE_OpSetAttrBool(op.get(), "{orig:}", (unsigned char){0});',
'tensor': '''
TFE_OpSetAttrTensor(op.get(), "{orig:}", {0}.get_tensor().get(), context::get_status());
status_check(context::get_status());
''',
'n_attr': 'TFE_OpSetAttrInt(op.get(), "{orig:}", {n_attr:}.size());'
}[self.type].format(self.name.replace('template', 'template_arg'),
orig=self.name, n_attr=self.number_attr))
.replace('\n', '\n '))
class Operation:
"""Class that describes the operation.
Attributes:
op: An operation.
inputs: The operation's inputs.
attr_list: The attribute's list.
"""
def __init__(self, op):
self.op = op
# More than one output?
if len(self.op.output_arg) != 1:
raise Exception('More than one or no output not yet supported')
self.inputs = [inp for inp in op.input_arg]
# Number attributes define the length of an input list
number_attr = [
(i.number_attr, i) for i in self.inputs if len(i.number_attr) > 0
]
# Attributes
self.attr_list = sorted([
Attribute(a, number_attr) for a in self.op.attr
], key=lambda a: a.default)
def code(self):
# C++ function body
template = textwrap.dedent('''
{}
inline {} {}({}{}) {{
// Define Op
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(TFE_NewOp(context::get_context(), "{}", context::get_status()), &TFE_DeleteOp);
status_check(context::get_status());
// Required input arguments
{}
// Attributes
{}
// Execute Op
int num_outputs_op = 1;
TFE_TensorHandle* res[1] = {{nullptr}};
TFE_Execute(op.get(), res, &num_outputs_op, context::get_status());
status_check(context::get_status());
return tensor(res[0]);
}}
''')
# Add single input template
add_inputs = textwrap.dedent('''
TFE_OpAddInput(op.get(), {}.tfe_handle.get(), context::get_status());
status_check(context::get_status());
''').replace('\n', '\n ')
add_inputs_list = textwrap.dedent('''
std::vector<TFE_TensorHandle*> {0}_handles; {0}_handles.reserve({0}.size());
std::transform({0}.begin(), {0}.end(), std::back_inserter({0}_handles), [](const auto& t) {{ return t.tfe_handle.get();}});
TFE_OpAddInputList(op.get(), {0}_handles.data(), static_cast<int>({0}.size()), context::get_status());
status_check(context::get_status());
''').replace('\n', '\n ')
# Return type of the function
out = 'tensor' if len(self.op.output_arg) else 'void'
# snake_case name of the operation
snk = (re.sub(r'(?<!^)(?=[A-Z])', '_', self.op.name).lower()
.replace('const', 'const_tensor'))
# Required input arguments
inp = ', '.join([
'const std::vector<tensor>&{}'.format(n.name)
if len(n.number_attr) or len(n.type_list_attr)
else 'const tensor& {}'.format(n.name.replace('tensor',
'input_tensor'))
for i, n in enumerate(self.inputs)
])
# Declaration of attributes
atr = ', '.join(a.declaration() for a in self.attr_list
if len(a.declaration()))
atr = (', ' + atr) if inp != '' and atr != '' else atr
# Operation original name
opn = self.op.name
# Code for input arguments
inp_code = '\n '.join(add_inputs_list.format(n.name)
if len(n.number_attr) or len(n.type_list_attr)
else add_inputs.format(n.name.replace('tensor', 'input_tensor'))
for n in self.inputs)
# Code for attributes
atr_code = '\n '.join(a.code() for a in self.attr_list
if len(a.code()))
return template.format('', out, snk, inp, atr, opn, inp_code, atr_code)
ops_file = textwrap.dedent('''
// MIT License
//
// Copyright (c) 2020 Sergio Izquierdo
// Copyright (c) 2020 Jiannan Liu
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/**
* @file raw_ops.h
* @brief TensorFlow raw_ops mappings
* THIS FILE IS AUTOGENERATED - TO UPDATE USE "generator.py"
* @author Jiannan Liu
* @author Sergio Izquierdo
*/
#ifndef INCLUDE_CPPFLOW_RAW_OPS_H_
#define INCLUDE_CPPFLOW_RAW_OPS_H_
// C headers
#include <tensorflow/c/eager/c_api.h>
#include <tensorflow/c/tf_datatype.h>
#include <tensorflow/c/tf_tensor.h>
// C++ headers
#include <cstdint>
#include <vector>
#include <limits>
#include <algorithm>
// CppFlow headers
#include "cppflow/tensor.h"
#include "cppflow/datatype.h"
namespace cppflow {{
{}
}} // namespace cppflow
#endif // INCLUDE_CPPFLOW_RAW_OPS_H_
''')
ops_code = ''
num_ops = 0
# All TF C API operations correspond with tf.raw_ops
for op_name in sorted(dir(tf.raw_ops)):
if not op_name.startswith('_'):
num_ops += 1
#if num_ops == 51:
# break
try:
# Grab operation definition
op = [op for op in ops.op if op.name == op_name]
if len(op) == 0: raise Exception('Operation not found')
op = Operation(op[0])
ops_code += op.code()
# Everything was ok!
print('{:<50} [{}]'.format(op_name, colored(' Ok ', 'green')))
except Exception as err:
print('{:<50} [{}]'.format(op_name, colored('Failed', 'red')))
print(' ', err)
with open('../raw_ops.h', 'w') as f:
f.write(ops_file.format(ops_code))
| 14,554
| 36.707254
| 172
|
py
|
BOExplain
|
BOExplain-main/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("../../"))
# -- Project information -----------------------------------------------------
project = 'BOExplain'
copyright = '2021, Brandon Lockhart'
author = 'Brandon Lockhart'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ 'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_context = {
'AUTHOR': author,
'DESCRIPTION': 'BOExplain, documentation site.',
'SITEMAP_BASE_URL': 'https://sfu-db.github.io/BOExplain/', # Trailing slash is needed
'VERSION': release,
}
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 2,173
| 32.96875
| 89
|
py
|
BOExplain
|
BOExplain-main/boexplain/__init__.py
|
from .files import fmin, fmax
__all__ = ["fmin", "fmax"]
| 57
| 18.333333
| 29
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/setup.py
|
import os
import sys
import pkg_resources
from setuptools import find_packages
from setuptools import setup
from typing import Dict
from typing import List
from typing import Optional
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.5.0",
"colorlog",
"joblib",
"numpy",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "mypy"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": ["sphinx", "sphinx_rtd_theme"],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
"thop",
"torch==1.4.0" if sys.platform == "darwin" else "torch==1.4.0+cpu",
"torchvision==0.5.0" if sys.platform == "darwin" else "torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["llvmlite<=0.31.0"] if (3, 5) == sys.version_info[:2] else []
) # Newer `llvmlite` is not distributed with wheels for Python 3.5.
+ (
["dask[dataframe]", "dask-ml", "keras", "tensorflow>=2.0.0", "tensorflow-datasets"]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"fanova",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"pytorch-ignite",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"torch==1.4.0" if sys.platform == "darwin" else "torch==1.4.0+cpu",
"torchvision==0.5.0" if sys.platform == "darwin" else "torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["keras", "tensorflow", "tensorflow-datasets"] if sys.version_info[:2] < (3, 8) else []
),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
pfnopt_pkg = find_any_distribution(["pfnopt"])
if pfnopt_pkg is not None:
msg = (
"We detected that PFNOpt is installed in your environment.\n"
"PFNOpt has been renamed Optuna. Please uninstall the old\n"
"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`)."
)
print(msg)
exit(1)
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="akiba@preferred.jp",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/rdb/alembic.ini",
"storages/rdb/alembic/*.*",
"storages/rdb/alembic/versions/*.*",
]
},
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
)
| 5,283
| 28.032967
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/__init__.py
| 0
| 0
| 0
|
py
|
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/distributions.py
|
import abc
import decimal
import json
import warnings
# from optuna import logging
from . import logging
class BaseDistribution(object, metaclass=abc.ABCMeta):
"""Base class for distributions.
Note that distribution classes are not supposed to be called by library users.
They are used by :class:`~optuna.trial.Trial` and :class:`~optuna.samplers` internally.
"""
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> Any
"""Convert internal representation of a parameter value into external representation.
Args:
param_value_in_internal_repr:
Optuna's internal representation of a parameter value.
Returns:
Optuna's external representation of a parameter value.
"""
return param_value_in_internal_repr
def to_internal_repr(self, param_value_in_external_repr):
# type: (Any) -> float
"""Convert external representation of a parameter value into internal representation.
Args:
param_value_in_external_repr:
Optuna's external representation of a parameter value.
Returns:
Optuna's internal representation of a parameter value.
"""
return param_value_in_external_repr
@abc.abstractmethod
def single(self):
# type: () -> bool
"""Test whether the range of this distribution contains just a single value.
When this method returns :obj:`True`, :mod:`~optuna.samplers` always sample
the same value from the distribution.
Returns:
:obj:`True` if the range of this distribution contains just a single value,
otherwise :obj:`False`.
"""
raise NotImplementedError
@abc.abstractmethod
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
"""Test if a parameter value is contained in the range of this distribution.
Args:
param_value_in_internal_repr:
Optuna's internal representation of a parameter value.
Returns:
:obj:`True` if the parameter value is contained in the range of this distribution,
otherwise :obj:`False`.
"""
raise NotImplementedError
def _asdict(self):
# type: () -> Dict
return self.__dict__
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, BaseDistribution):
return NotImplemented
if not type(self) is type(other):
return False
return self.__dict__ == other.__dict__
def __hash__(self):
# type: () -> int
return hash((self.__class__,) + tuple(sorted(self.__dict__.items())))
def __repr__(self):
# type: () -> str
kwargs = ", ".join("{}={}".format(k, v) for k, v in sorted(self.__dict__.items()))
return "{}({})".format(self.__class__.__name__, kwargs)
class UniformDistribution(BaseDistribution):
"""A uniform distribution in the linear domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_uniform`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is excluded from the range.
"""
def __init__(self, low, high):
# type: (float, float) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
self.low = low
self.high = high
def single(self):
# type: () -> bool
return self.low == self.high
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
if self.low == self.high:
return value == self.low
else:
return self.low <= value < self.high
class LogUniformDistribution(BaseDistribution):
"""A uniform distribution in the log domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_loguniform`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is excluded from the range.
"""
def __init__(self, low, high):
# type: (float, float) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)
self.low = low
self.high = high
def single(self):
# type: () -> bool
return self.low == self.high
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
if self.low == self.high:
return value == self.low
else:
return self.low <= value < self.high
class DiscreteUniformDistribution(BaseDistribution):
"""A discretized uniform distribution in the linear domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_discrete_uniform`, and passed
to :mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
q:
A discretization step.
"""
def __init__(self, low, high, q):
# type: (float, float, float) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}, q={}).".format(low, high, q)
)
self.low = low
self.high = high
self.q = q
def single(self):
# type: () -> bool
if self.low == self.high:
return True
high = decimal.Decimal(str(self.high))
low = decimal.Decimal(str(self.low))
q = decimal.Decimal(str(self.q))
if (high - low) < q:
return True
return False
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class IntUniformDistribution(BaseDistribution):
"""A uniform distribution on integers.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
step:
A step for spacing between values.
"""
def __init__(self, low, high, step=1):
# type: (int, int, int) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, but step={}.".format(step)
)
self.low = low
self.high = high
self.step = step
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> int
return int(param_value_in_internal_repr)
def to_internal_repr(self, param_value_in_external_repr):
# type: (int) -> float
return float(param_value_in_external_repr)
def single(self):
# type: () -> bool
if self.low == self.high:
return True
return (self.high - self.low) < self.step
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class IntLogUniformDistribution(BaseDistribution):
"""A uniform distribution on integers in the log domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
step:
A step for spacing between values.
"""
def __init__(self, low, high, step=1):
# type: (int, int, int) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, but step={}.".format(step)
)
if low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)
self.low = low
self.high = high
self.step = step
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> int
return int(param_value_in_internal_repr)
def to_internal_repr(self, param_value_in_external_repr):
# type: (int) -> float
return float(param_value_in_external_repr)
def single(self):
# type: () -> bool
if self.low == self.high:
return True
return (self.high - self.low) < self.step
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class CategoricalDistribution(BaseDistribution):
"""A categorical distribution.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_categorical`, and
passed to :mod:`~optuna.samplers` in general.
Args:
choices:
Parameter value candidates.
.. note::
Not all types are guaranteed to be compatible with all storages. It is recommended to
restrict the types of the choices to :obj:`None`, :class:`bool`, :class:`int`,
:class:`float` and :class:`str`.
Attributes:
choices:
Parameter value candidates.
"""
def __init__(self, choices):
# type: (Sequence[CategoricalChoiceType]) -> None
if len(choices) == 0:
raise ValueError("The `choices` must contains one or more elements.")
for choice in choices:
if choice is not None and not isinstance(choice, (bool, int, float, str)):
message = (
"Choices for a categorical distribution should be a tuple of None, bool, "
"int, float and str for persistent storage but contains {} which is of type "
"{}.".format(choice, type(choice).__name__)
)
warnings.warn(message)
logger = logging._get_library_root_logger()
logger.warning(message)
self.choices = choices
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> CategoricalChoiceType
return self.choices[int(param_value_in_internal_repr)]
def to_internal_repr(self, param_value_in_external_repr):
# type: (CategoricalChoiceType) -> float
try:
return self.choices.index(param_value_in_external_repr)
except ValueError as e:
raise ValueError(
"'{}' not in {}.".format(param_value_in_external_repr, self.choices)
) from e
def single(self):
# type: () -> bool
return len(self.choices) == 1
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
index = int(param_value_in_internal_repr)
return 0 <= index < len(self.choices)
DISTRIBUTION_CLASSES = (
UniformDistribution,
LogUniformDistribution,
DiscreteUniformDistribution,
IntUniformDistribution,
IntLogUniformDistribution,
CategoricalDistribution,
)
def json_to_distribution(json_str):
# type: (str) -> BaseDistribution
"""Deserialize a distribution in JSON format.
Args:
json_str: A JSON-serialized distribution.
Returns:
A deserialized distribution.
"""
json_dict = json.loads(json_str)
if json_dict["name"] == CategoricalDistribution.__name__:
json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"])
for cls in DISTRIBUTION_CLASSES:
if json_dict["name"] == cls.__name__:
return cls(**json_dict["attributes"])
raise ValueError("Unknown distribution class: {}".format(json_dict["name"]))
def distribution_to_json(dist):
# type: (BaseDistribution) -> str
"""Serialize a distribution to JSON format.
Args:
dist: A distribution to be serialized.
Returns:
A JSON string of a given distribution.
"""
return json.dumps({"name": dist.__class__.__name__, "attributes": dist._asdict()})
def check_distribution_compatibility(dist_old, dist_new):
# type: (BaseDistribution, BaseDistribution) -> None
"""A function to check compatibility of two distributions.
Note that this method is not supposed to be called by library users.
Args:
dist_old: A distribution previously recorded in storage.
dist_new: A distribution newly added to storage.
Returns:
True denotes given distributions are compatible. Otherwise, they are not.
"""
if dist_old.__class__ != dist_new.__class__:
raise ValueError("Cannot set different distribution kind to the same parameter name.")
if not isinstance(dist_old, CategoricalDistribution):
return
if not isinstance(dist_new, CategoricalDistribution):
return
if dist_old.choices != dist_new.choices:
raise ValueError(
CategoricalDistribution.__name__ + " does not support dynamic value space."
)
| 14,890
| 29.26626
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/progress_bar.py
|
import logging
from typing import Any
from typing import Optional
from tqdm.auto import tqdm
# from optuna import logging as optuna_logging
from . import logging as optuna_logging
_tqdm_handler = None # type: Optional[_TqdmLoggingHandler]
# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02
class _TqdmLoggingHandler(logging.StreamHandler):
def emit(self, record: Any) -> None:
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
class _ProgressBar(object):
"""Progress Bar implementation for `Study.optimize` on the top of `tqdm`.
Args:
is_valid:
Whether to show progress bars in `Study.optimize`.
n_trials:
The number of trials.
timeout:
Stop study after the given number of second(s).
"""
def __init__(
self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None,
) -> None:
self._is_valid = is_valid
self._n_trials = n_trials
self._timeout = timeout
if self._is_valid:
self._init_valid()
# TODO(hvy): Remove initialization indirection via this method when the progress bar is no
# longer experimental.
def _init_valid(self) -> None:
self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)
global _tqdm_handler
_tqdm_handler = _TqdmLoggingHandler()
_tqdm_handler.setLevel(logging.INFO)
_tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
optuna_logging.disable_default_handler()
optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
def update(self, elapsed_seconds: Optional[float]) -> None:
"""Update the progress bars if ``is_valid`` is ``True``.
Args:
elapsed_seconds:
The time past since `Study.optimize` started.
"""
if self._is_valid:
self._progress_bar.update(1)
if self._timeout is not None and elapsed_seconds is not None:
self._progress_bar.set_postfix_str(
"{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
)
def close(self) -> None:
"""Close progress bars."""
if self._is_valid:
self._progress_bar.close()
assert _tqdm_handler is not None
optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)
optuna_logging.enable_default_handler()
| 2,698
| 32.320988
| 96
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/exceptions.py
|
class OptunaError(Exception):
"""Base class for Optuna specific errors."""
pass
class TrialPruned(OptunaError):
"""Exception for pruned trials.
This error tells a trainer that the current :class:`~optuna.trial.Trial` was pruned. It is
supposed to be raised after :func:`optuna.trial.Trial.should_prune` as shown in the following
example.
Example:
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(200).reshape(-1, 1)
y = np.where(X[:, 0] < 0.5, 0, 1)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
classes = np.unique(y)
.. testcode::
import optuna
from sklearn.linear_model import SGDClassifier
def objective(trial):
alpha = trial.suggest_uniform('alpha', 0.0, 1.0)
clf = SGDClassifier(alpha=alpha)
n_train_iter = 100
for step in range(n_train_iter):
clf.partial_fit(X_train, y_train, classes=classes)
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=20)
"""
pass
class CLIUsageError(OptunaError):
"""Exception for CLI.
CLI raises this exception when it receives invalid configuration.
"""
pass
class StorageInternalError(OptunaError):
"""Exception for storage operation.
This error is raised when an operation failed in backend DB of storage.
"""
pass
class DuplicatedStudyError(OptunaError):
"""Exception for a duplicated study name.
This error is raised when a specified study name already exists in the storage.
"""
pass
class ExperimentalWarning(Warning):
"""Experimental Warning class.
This implementation exists here because the policy of `FutureWarning` has been changed
since Python 3.7 was released. See the details in
https://docs.python.org/3/library/warnings.html#warning-categories.
"""
pass
| 2,423
| 25.637363
| 97
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/structs.py
|
import warnings
from optuna import _study_direction
from optuna import exceptions
from optuna import logging
from optuna import trial
_logger = logging.get_logger(__name__)
_message = (
"`structs` is deprecated. Classes have moved to the following modules. "
"`structs.StudyDirection`->`study.StudyDirection`, "
"`structs.StudySummary`->`study.StudySummary`, "
"`structs.FrozenTrial`->`trial.FrozenTrial`, "
"`structs.TrialState`->`trial.TrialState`, "
"`structs.TrialPruned`->`exceptions.TrialPruned`."
)
warnings.warn(_message, DeprecationWarning)
_logger.warning(_message)
# The use of the structs.StudyDirection is deprecated and it is recommended that you use
# study.StudyDirection instead. See the API reference for more details.
StudyDirection = _study_direction.StudyDirection
# The use of the structs.TrialState is deprecated and it is recommended that you use
# trial.TrialState instead. See the API reference for more details.
TrialState = trial.TrialState
class FrozenTrial(object):
"""Status and results of a :class:`~optuna.trial.Trial`.
.. deprecated:: 1.4.0
This class was moved to :mod:`~optuna.trial`. Please use
:class:`~optuna.trial.FrozenTrial` instead.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
"""
def __init__(
self,
number, # type: int
state, # type: TrialState
value, # type: Optional[float]
datetime_start, # type: Optional[datetime]
datetime_complete, # type: Optional[datetime]
params, # type: Dict[str, Any]
distributions, # type: Dict[str, BaseDistribution]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
intermediate_values, # type: Dict[int, float]
trial_id, # type: int
):
# type: (...) -> None
message = (
"The use of `structs.FrozenTrial` is deprecated. "
"Please use `trial.FrozenTrial` instead."
)
warnings.warn(message, DeprecationWarning)
_logger.warning(message)
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
# Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation.
# TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved.
_ordered_fields = [
"number",
"value",
"datetime_start",
"datetime_complete",
"params",
"_distributions",
"user_attrs",
"system_attrs",
"intermediate_values",
"_trial_id",
"state",
]
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
# type: () -> int
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
# type: () -> str
return "{cls}({kwargs})".format(
cls=self.__class__.__name__,
kwargs=", ".join(
"{field}={value}".format(
field=field if not field.startswith("_") else field[1:],
value=repr(getattr(self, field)),
)
for field in self._ordered_fields
),
)
def _validate(self):
# type: () -> None
if self.datetime_start is None:
raise ValueError("`datetime_start` is supposed to be set.")
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError("`datetime_complete` is supposed to be set for a finished trial.")
else:
if self.datetime_complete is not None:
raise ValueError(
"`datetime_complete` is supposed to be None for an unfinished trial."
)
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError("`value` is supposed to be set for a complete trial.")
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError(
"Inconsistent parameters {} and distributions {}.".format(
set(self.params.keys()), set(self.distributions.keys())
)
)
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution "
"{}.".format(param_value, param_name, distribution)
)
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Dictionary that contains the distributions of :attr:`params`."""
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
self._distributions = value
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
@property
def duration(self):
# type: () -> Optional[timedelta]
"""Return the elapsed time taken to complete the trial.
Returns:
The duration.
"""
if self.datetime_start and self.datetime_complete:
return self.datetime_complete - self.datetime_start
else:
return None
class TrialPruned(exceptions.TrialPruned):
"""Exception for pruned trials.
.. deprecated:: 0.19.0
This class was moved to :mod:`~optuna.exceptions`. Please use
:class:`~optuna.exceptions.TrialPruned` instead.
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
message = (
"The use of `optuna.structs.TrialPruned` is deprecated. "
"Please use `optuna.exceptions.TrialPruned` instead."
)
warnings.warn(message, DeprecationWarning)
_logger.warning(message)
| 7,962
| 31.904959
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/logging.py
|
import logging
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
import threading
import colorlog
_lock = threading.Lock()
_default_handler = None # type: Optional[logging.Handler]
def create_default_formatter() -> colorlog.ColoredFormatter:
"""Create a default formatter of log messages.
This function is not supposed to be directly accessed by library users.
"""
return colorlog.ColoredFormatter(
"%(log_color)s[%(levelname)1.1s %(asctime)s]%(reset)s %(message)s"
)
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.setFormatter(create_default_formatter())
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(logging.INFO)
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_logger(name: str) -> logging.Logger:
"""Return a logger with the specified name.
This function is not supposed to be directly accessed by library users.
"""
_configure_library_root_logger()
return logging.getLogger(name)
def get_verbosity() -> int:
"""Return the current level for the Optuna's root logger.
Returns:
Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``.
.. note::
Optuna has following logging levels:
- ``optuna.logging.CRITICAL``, ``optuna.logging.FATAL``
- ``optuna.logging.ERROR``
- ``optuna.logging.WARNING``, ``optuna.logging.WARN``
- ``optuna.logging.INFO``
- ``optuna.logging.DEBUG``
"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""Set the level for the Optuna's root logger.
Args:
verbosity:
Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``.
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def disable_default_handler() -> None:
"""Disable the default handler of the Optuna's root logger.
Example:
Stop and then resume logging to :obj:`sys.stderr`.
.. testsetup::
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
.. testcode::
import optuna
study = optuna.create_study()
# There are no logs in sys.stderr.
optuna.logging.disable_default_handler()
study.optimize(objective, n_trials=10)
# There are logs in sys.stderr.
optuna.logging.enable_default_handler()
study.optimize(objective, n_trials=10)
# [I 2020-02-23 17:00:54,314] Finished trial#10 with value: ...
# [I 2020-02-23 17:00:54,356] Finished trial#11 with value: ...
# ...
"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the Optuna's root logger.
Please refer to the example shown in :func:`~optuna.logging.disable_default_handler()`.
"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def disable_propagation() -> None:
"""Disable propagation of the library log outputs.
Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""Enable propagation of the library log outputs.
Please disable the Optuna's default handler to prevent double logging if the root logger has
been configured.
Example:
Propagate all log output to the root logger in order to save them to the file.
.. testsetup::
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
.. testcode::
import optuna
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO) # Setup the root logger.
logger.addHandler(logging.FileHandler("foo.log", mode="w"))
optuna.logging.enable_propagation() # Propagate logs to the root logger.
optuna.logging.disable_default_handler() # Stop showing logs in sys.stderr.
study = optuna.create_study()
logger.info("Start optimization.")
study.optimize(objective, n_trials=10)
with open('foo.log') as f:
assert f.readline() == "Start optimization.\\n"
assert f.readline().startswith("Finished trial#0 with value:")
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
| 6,076
| 27.134259
| 96
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/study.py
|
import collections
import datetime
import gc
import math
import threading
import warnings
import numpy as np
import pandas as pd # NOQA
# from optuna._study_direction import StudyDirection
# from optuna import exceptions
# from optuna import logging
# from optuna import progress_bar as pbar_module
# from optuna import pruners
# from optuna import samplers
# from optuna import storages
# from optuna import trial as trial_module
# from optuna.trial import FrozenTrial
# from optuna.trial import TrialState
from ._study_direction import StudyDirection
from . import exceptions
from . import logging
from . import progress_bar as pbar_module
from . import pruners
from . import samplers
from . import storages
from . import trial as trial_module
from .trial import FrozenTrial
from .trial import TrialState
_logger = logging.get_logger(__name__)
class BaseStudy(object):
def __init__(self, study_id, storage):
# type: (int, storages.BaseStorage) -> None
self._study_id = study_id
self._storage = storage
@property
def best_params(self):
# type: () -> Dict[str, Any]
"""Return parameters of the best trial in the study.
Returns:
A dictionary containing parameters of the best trial.
"""
return self.best_trial.params
@property
def best_value(self):
# type: () -> float
"""Return the best objective value in the study.
Returns:
A float representing the best objective value.
"""
best_value = self.best_trial.value
assert best_value is not None
return best_value
@property
def best_trial(self):
# type: () -> FrozenTrial
"""Return the best trial in the study.
Returns:
A :class:`~optuna.FrozenTrial` object of the best trial.
"""
return self._storage.get_best_trial(self._study_id)
@property
def direction(self):
# type: () -> StudyDirection
"""Return the direction of the study.
Returns:
A :class:`~optuna.study.StudyDirection` object.
"""
return self._storage.get_study_direction(self._study_id)
@property
def trials(self):
# type: () -> List[FrozenTrial]
"""Return all trials in the study.
The returned trials are ordered by trial number.
This is a short form of ``self.get_trials(deepcopy=True)``.
Returns:
A list of :class:`~optuna.FrozenTrial` objects.
"""
return self.get_trials()
def get_trials(self, deepcopy=True):
# type: (bool) -> List[FrozenTrial]
"""Return all trials in the study.
The returned trials are ordered by trial number.
For library users, it's recommended to use more handy
:attr:`~optuna.study.Study.trials` property to get the trials instead.
Args:
deepcopy:
Flag to control whether to apply ``copy.deepcopy()`` to the trials.
Note that if you set the flag to :obj:`False`, you shouldn't mutate
any fields of the returned trial. Otherwise the internal state of
the study may corrupt and unexpected behavior may happen.
Returns:
A list of :class:`~optuna.FrozenTrial` objects.
"""
return self._storage.get_all_trials(self._study_id, deepcopy=deepcopy)
@property
def storage(self):
# type: () -> storages.BaseStorage
"""Return the storage object used by the study.
.. deprecated:: 0.15.0
The direct use of storage is deprecated.
Please access to storage via study's public methods
(e.g., :meth:`~optuna.study.Study.set_user_attr`).
Returns:
A storage object.
"""
warnings.warn(
"The direct use of storage is deprecated. "
"Please access to storage via study's public methods "
"(e.g., `Study.set_user_attr`)",
DeprecationWarning,
)
_logger.warning(
"The direct use of storage is deprecated. "
"Please access to storage via study's public methods "
"(e.g., `Study.set_user_attr`)"
)
return self._storage
class Study(BaseStudy):
"""A study corresponds to an optimization task, i.e., a set of trials.
This object provides interfaces to run a new :class:`~optuna.trial.Trial`, access trials'
history, set/get user-defined attributes of the study itself.
Note that the direct use of this constructor is not recommended.
To create and load a study, please refer to the documentation of
:func:`~optuna.study.create_study` and :func:`~optuna.study.load_study` respectively.
"""
def __init__(
self,
study_name, # type: str
storage, # type: Union[str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
seed=None,
cat_preds=None,
):
# type: (...) -> None
self.add_on = 0
self.study_name = study_name
storage = storages.get_storage(storage)
study_id = storage.get_study_id_from_name(study_name)
super(Study, self).__init__(study_id, storage)
# use TPE sampler
self.sampler = sampler or samplers.TPESampler()
# don't use prunning
self.pruner = pruner or pruners.NopPruner()
self._optimize_lock = threading.Lock()
self._stop_flag = False
self.evaled = set()
self.rnd = np.random.RandomState(seed=seed)
self.cat_preds = cat_preds
try:
self.cat_preds_set = set(cat_preds.values())
except:
pass
self.info = {}
self.info["names"] = []
def __getstate__(self):
# type: () -> Dict[Any, Any]
state = self.__dict__.copy()
del state["_optimize_lock"]
return state
def __setstate__(self, state):
# type: (Dict[Any, Any]) -> None
self.__dict__.update(state)
self._optimize_lock = threading.Lock()
@property
def user_attrs(self):
# type: () -> Dict[str, Any]
"""Return user attributes.
Returns:
A dictionary containing all user attributes.
"""
return self._storage.get_study_user_attrs(self._study_id)
@property
def system_attrs(self):
# type: () -> Dict[str, Any]
"""Return system attributes.
Returns:
A dictionary containing all system attributes.
"""
return self._storage.get_study_system_attrs(self._study_id)
def optimize(
self,
func, # type: ObjectiveFuncType
n_trials=None, # type: Optional[int]
timeout=None, # type: Optional[float]
n_jobs=1, # type: int
catch=(), # type: Union[Tuple[()], Tuple[Type[Exception]]]
callbacks=None, # type: Optional[List[Callable[[Study, FrozenTrial], None]]]
gc_after_trial=True, # type: bool
show_progress_bar=False, # type: bool
**kwargs,
):
# type: (...) -> None
"""Optimize an objective function.
Optimization is done by choosing a suitable set of hyperparameter values from a given
range. Uses a sampler which implements the task of value suggestion based on a specified
distribution. The sampler is specified in :func:`~optuna.study.create_study` and the
default choice for the sampler is TPE.
See also :class:`~optuna.samplers.TPESampler` for more details on 'TPE'.
Args:
func:
A callable that implements objective function.
n_trials:
The number of trials. If this argument is set to :obj:`None`, there is no
limitation on the number of trials. If :obj:`timeout` is also set to :obj:`None`,
the study continues to create trials until it receives a termination signal such
as Ctrl+C or SIGTERM.
timeout:
Stop study after the given number of second(s). If this argument is set to
:obj:`None`, the study is executed without time limitation. If :obj:`n_trials` is
also set to :obj:`None`, the study continues to create trials until it receives a
termination signal such as Ctrl+C or SIGTERM.
n_jobs:
The number of parallel jobs. If this argument is set to :obj:`-1`, the number is
set to CPU count.
catch:
A study continues to run even when a trial raises one of the exceptions specified
in this argument. Default is an empty tuple, i.e. the study will stop for any
exception except for :class:`~optuna.exceptions.TrialPruned`.
callbacks:
List of callback functions that are invoked at the end of each trial. Each function
must accept two parameters with the following types in this order:
:class:`~optuna.study.Study` and :class:`~optuna.FrozenTrial`.
gc_after_trial:
Flag to execute garbage collection at the end of each trial. By default, garbage
collection is enabled, just in case. You can turn it off with this argument if
memory is safely managed in your objective function.
show_progress_bar:
Flag to show progress bars or not. To disable progress bar, set this ``False``.
Currently, progress bar is experimental feature and disabled
when ``n_jobs`` :math:`\\ne 1`.
"""
# self._progress_bar = pbar_module._ProgressBar(
# show_progress_bar and n_jobs == 1, n_trials, timeout
# )
self._stop_flag = False
# optimize one iteration at a time
self._optimize_sequential(func, n_trials, timeout, catch, callbacks, gc_after_trial, None, **kwargs)
# self._progress_bar.close()
# del self._progress_bar
def set_user_attr(self, key, value):
# type: (str, Any) -> None
"""Set a user attribute to the study.
Args:
key: A key string of the attribute.
value: A value of the attribute. The value should be JSON serializable.
"""
self._storage.set_study_user_attr(self._study_id, key, value)
def set_system_attr(self, key, value):
# type: (str, Any) -> None
"""Set a system attribute to the study.
Note that Optuna internally uses this method to save system messages. Please use
:func:`~optuna.study.Study.set_user_attr` to set users' attributes.
Args:
key: A key string of the attribute.
value: A value of the attribute. The value should be JSON serializable.
"""
self._storage.set_study_system_attr(self._study_id, key, value)
def trials_dataframe(
self,
attrs=(
"number",
"value",
"datetime_start",
"datetime_complete",
"duration",
"params",
"user_attrs",
"system_attrs",
"state",
), # type: Tuple[str, ...]
multi_index=False, # type: bool
):
# type: (...) -> pd.DataFrame
"""Export trials as a pandas DataFrame_.
The DataFrame_ provides various features to analyze studies. It is also useful to draw a
histogram of objective values and to export trials as a CSV file.
If there are no trials, an empty DataFrame_ is returned.
Example:
.. testcode::
import optuna
import pandas
def objective(trial):
x = trial.suggest_uniform('x', -1, 1)
return x ** 2
study = optuna.create_study()
study.optimize(objective, n_trials=3)
# Create a dataframe from the study.
df = study.trials_dataframe()
assert isinstance(df, pandas.DataFrame)
assert df.shape[0] == 3 # n_trials.
Args:
attrs:
Specifies field names of :class:`~optuna.FrozenTrial` to include them to a
DataFrame of trials.
multi_index:
Specifies whether the returned DataFrame_ employs MultiIndex_ or not. Columns that
are hierarchical by nature such as ``(params, x)`` will be flattened to
``params_x`` when set to :obj:`False`.
Returns:
A pandas DataFrame_ of trials in the :class:`~optuna.study.Study`.
.. _DataFrame: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _MultiIndex: https://pandas.pydata.org/pandas-docs/stable/advanced.html
"""
trials = self.get_trials(deepcopy=False)
# If no trials, return an empty dataframe.
if not len(trials):
return pd.DataFrame()
assert all(isinstance(trial, FrozenTrial) for trial in trials)
attrs_to_df_columns = collections.OrderedDict() # type: Dict[str, str]
for attr in attrs:
if attr.startswith("_"):
# Python conventional underscores are omitted in the dataframe.
df_column = attr[1:]
else:
df_column = attr
attrs_to_df_columns[attr] = df_column
# column_agg is an aggregator of column names.
# Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.
# Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').
column_agg = collections.defaultdict(set) # type: Dict[str, Set]
non_nested_attr = ""
def _create_record_and_aggregate_column(trial):
# type: (FrozenTrial) -> Dict[Tuple[str, str], Any]
record = {}
for attr, df_column in attrs_to_df_columns.items():
value = getattr(trial, attr)
if isinstance(value, TrialState):
# Convert TrialState to str and remove the common prefix.
value = str(value).split(".")[-1]
if isinstance(value, dict):
for nested_attr, nested_value in value.items():
record[(df_column, nested_attr)] = nested_value
column_agg[attr].add((df_column, nested_attr))
else:
record[(df_column, non_nested_attr)] = value
column_agg[attr].add((df_column, non_nested_attr))
return record
records = list([_create_record_and_aggregate_column(trial) for trial in trials])
columns = sum(
(sorted(column_agg[k]) for k in attrs if k in column_agg), []
) # type: List[Tuple[str, str]]
df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))
if not multi_index:
# Flatten the `MultiIndex` columns where names are concatenated with underscores.
# Filtering is required to omit non-nested columns avoiding unwanted trailing
# underscores.
df.columns = [
"_".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns
]
return df
def _optimize_sequential(
self,
func, # type: ObjectiveFuncType
n_trials, # type: Optional[int]
timeout, # type: Optional[float]
catch, # type: Union[Tuple[()], Tuple[Type[Exception]]]
callbacks, # type: Optional[List[Callable[[Study, FrozenTrial], None]]]
gc_after_trial, # type: bool
time_start, # type: Optional[datetime.datetime]
**kwargs,
):
# type: (...) -> None
# trial counter
i_trial = 0
# timer
if time_start is None:
time_start = datetime.datetime.now()
while True:
if self._stop_flag:
break
# check number of trials
if n_trials is not None:
if i_trial >= n_trials:
break
i_trial += 1
# check if alloted time has expired
if timeout is not None:
elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()
if elapsed_seconds - self.add_on >= timeout:
break
self.info["names"] = []
self._run_trial(func, catch, gc_after_trial, **kwargs)
# self._progress_bar.update((datetime.datetime.now() - time_start).total_seconds())
self._storage.remove_session()
def _run_trial(
self,
func, # type: ObjectiveFuncType
catch, # type: Union[Tuple[()], Tuple[Type[Exception]]]
gc_after_trial, # type: bool
**kwargs,
):
# type: (...) -> trial_module.Trial
# trial_id enumerates the trials 0, 1, 2, ...
trial_id = self._storage.create_new_trial(self._study_id)
# create a new trial for this study (in file _trial.py)
trial = trial_module.Trial(self, trial_id)
# trial number is 0, 1, 2, ...
trial_number = trial.number
# evaluate the objective function
result = func(trial, **kwargs)
# The following line mitigates memory problems that can be occurred in some
# environments (e.g., services that use computing containers such as CircleCI).
if gc_after_trial:
gc.collect()
# return a float or TrialState.FAIL
try:
result = float(result)
except (
ValueError,
TypeError,
):
message = (
"Setting status of trial#{} as {} because the returned value from the "
"objective function cannot be casted to float. Returned value is: "
"{}".format(trial_number, TrialState.FAIL, repr(result))
)
_logger.warning(message)
self._storage.set_trial_system_attr(trial_id, "fail_reason", message)
self._storage.set_trial_state(trial_id, TrialState.FAIL)
return trial
if math.isnan(result):
message = (
"Setting status of trial#{} as {} because the objective function "
"returned {}.".format(trial_number, TrialState.FAIL, result)
)
_logger.warning(message)
self._storage.set_trial_system_attr(trial_id, "fail_reason", message)
self._storage.set_trial_state(trial_id, TrialState.FAIL)
return trial
# log results
self._storage.set_trial_value(trial_id, result)
self._storage.set_trial_state(trial_id, TrialState.COMPLETE)
# self._log_completed_trial(trial, result)
return trial
def _log_completed_trial(self, trial, result):
# type: (trial_module.Trial, float) -> None
_logger.info(
"Finished trial#{} with value: {} with parameters: {}. "
"Best is trial#{} with value: {}.".format(
trial.number, result, trial.params, self.best_trial.number, self.best_value
)
)
def create_study(
storage=None, # type: Union[None, str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
direction="minimize", # type: str
load_if_exists=False, # type: bool
seed=None,
cat_preds=None,
):
# type: (...) -> Study
"""Create a new :class:`~optuna.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~optuna.study.Study` will not be persistent.
.. note::
When a database URL is passed, Optuna internally uses `SQLAlchemy`_ to handle
the database. Please refer to `SQLAlchemy's document`_ for further details.
If you want to specify non-default options to `SQLAlchemy Engine`_, you can
instantiate :class:`~optuna.storages.RDBStorage` with your desired options and
pass it to the ``storage`` argument instead of a URL.
.. _SQLAlchemy: https://www.sqlalchemy.org/
.. _SQLAlchemy's document:
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
.. _SQLAlchemy Engine: https://docs.sqlalchemy.org/en/latest/core/engines.html
sampler:
A sampler object that implements background algorithm for value suggestion.
If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used
as the default. See also :class:`~optuna.samplers`.
pruner:
A pruner object that decides early stopping of unpromising trials. See also
:class:`~optuna.pruners`.
direction:
Direction of optimization. Set ``minimize`` for minimization and ``maximize`` for
maximization.
load_if_exists:
Flag to control the behavior to handle a conflict of study names.
In the case where a study named ``study_name`` already exists in the ``storage``,
a :class:`~optuna.exceptions.DuplicatedStudyError` is raised if ``load_if_exists`` is
set to :obj:`False`.
Otherwise, the creation of the study is skipped, and the existing one is returned.
Returns:
A :class:`~optuna.study.Study` object.
"""
# in memory or dbms (we will use only in memory data?)
storage = storages.get_storage(storage)
# study_id in our case is always 0, method in in_memory.py
study_id = storage.create_new_study(None)
# random string starting with "no-name"
study_name = storage.get_study_name_from_id(study_id)
# study seesion
study = Study(
study_name=study_name,
storage=storage,
sampler=sampler,
pruner=pruner,
seed=seed,
cat_preds=cat_preds,
)
if direction == "minimize":
_direction = StudyDirection.MINIMIZE
elif direction == "maximize":
_direction = StudyDirection.MAXIMIZE
else:
raise ValueError("Please set either 'minimize' or 'maximize' to direction.")
# set the study direction to be minimize or maximize
study._storage.set_study_direction(study_id, _direction)
return study
| 22,675
| 34.102167
| 108
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/__init__.py
|
import importlib
import types
from typing import Any
# from optuna import distributions # NOQA
# from optuna import exceptions # NOQA
# from optuna import logging # NOQA
# from optuna import pruners # NOQA
# from optuna import samplers # NOQA
# from optuna import storages # NOQA
# from optuna import study # NOQA
# from optuna import trial # NOQA
from . import distributions # NOQA
from . import exceptions # NOQA
from . import logging # NOQA
from . import pruners # NOQA
from . import samplers # NOQA
from . import storages # NOQA
from . import study # NOQA
from . import trial # NOQA
from .study import create_study # NOQA
from .study import Study # NOQA
from .trial import Trial # NOQA
# from study import create_study # NOQA
# from study import Study # NOQA
# from trial import Trial # NOQA
| 819
| 29.37037
| 42
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/_study_direction.py
|
import enum
class StudyDirection(enum.Enum):
"""Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
"""
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
| 418
| 21.052632
| 74
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/storages/base.py
|
import abc
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
# from optuna import study
# from optuna.trial import TrialState
from .. import study
from ..trial import TrialState
DEFAULT_STUDY_NAME_PREFIX = "no-name-"
class BaseStorage(object, metaclass=abc.ABCMeta):
"""Base class for storages.
This class is not supposed to be directly accessed by library users.
A storage class abstracts a backend database and provides library internal interfaces to
read/write histories of studies and trials.
**Thread safety**
A storage class can be shared among multiple threads, and must therefore be thread-safe.
It must guarantee that return values such as `FrozenTrial`s are never modified.
A storage class can assume that return values are never modified by its user.
When a user modifies a return value from a storage class, the internal state of the storage
may become inconsistent. Consequences are undefined.
**Ownership of RUNNING trials**
Trials in finished states are not allowed to be modified.
Trials in the WAITING state are not allowed to be modified except for the `state` field.
A storage class can assume that each RUNNING trial is only modified from a single process.
When a user modifies a RUNNING trial from multiple processes, the internal state of the storage
may become inconsistent. Consequences are undefined.
A storage class is not intended for inter-process communication.
Consequently, users using optuna with MPI or other multi-process programs must make sure that
only one process is used to access the optuna interface.
**Consistency models**
A storage class must support the monotonic-reads consistency model, that is, if a
process reads data `X`, any successive reads on data `X` cannot return older values.
It must support read-your-writes, that is, if a process writes to data `X`,
any successive reads on data `X` from the same process must read the written
value or one of the more recent values.
**Stronger consistency requirements for special data**
TODO(ytsmiling) Add load method to storage class implementations.
Under a multi-worker setting, a storage class must return the latest values of any attributes
of a study, not necessarily for the attributes of a `Trial`.
However, if the `load(study_id)` method is called, any successive reads on the `state`
attribute of a `Trial` are guaranteed to return the same or more recent values than the value
at the time of call to the `load` method.
Let `T` be a `Trial`.
Let `P` be the process that last updated the `state` attribute of `T`.
Then, any reads on any attributes of `T` are guaranteed to return the same or
more recent values than any writes by `P` on the attribute before `P` updated
the `state` attribute of `T`.
The same applies for `user_attrs', 'system_attrs' and 'intermediate_values` attributes.
.. note::
These attribute behaviors may become user customizable in the future.
**Data persistence**
A storage class does not guarantee that write operations are logged into a persistent
storage, even when write methods succeed.
Thus, when process failure occurs, some writes might be lost.
As exceptions, when a persistent storage is available, any writes on any attributes
of `Study` and writes on `state` of `Trial` are guaranteed to be persistent.
Additionally, any preceding writes on any attributes of `Trial` are guaranteed to
be written into a persistent storage before writes on `state` of `Trial` succeed.
The same applies for `user_attrs', 'system_attrs' and 'intermediate_values` attributes.
.. note::
These attribute behaviors may become user customizable in the future.
"""
# Basic study manipulation
@abc.abstractmethod
def create_new_study(self, study_name: Optional[str] = None) -> int:
"""Create a new study from a name.
If no name is specified, the storage class generates a name.
The returned study ID is unique among all current and deleted studies.
Args:
study_name:
Name of the new study to create.
Returns:
ID of the created study.
Raises:
:exc:`optuna.exceptions.DuplicatedStudyError`:
If a study with the same ``study_name`` already exists.
"""
# TODO(ytsmiling) Fix RDB storage implementation to ensure unique `study_id`.
raise NotImplementedError
@abc.abstractmethod
def delete_study(self, study_id: int) -> None:
"""Delete a study.
Args:
study_id:
ID of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None:
"""Register a user-defined attribute to a study.
This method overwrites any existing attribute.
Args:
study_id:
ID of the study.
key:
Attribute key.
value:
Attribute value. It should be JSON serializable.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def set_study_system_attr(self, study_id: int, key: str, value: Any) -> None:
"""Register an optuna-internal attribute to a study.
This method overwrites any existing attribute.
Args:
study_id:
ID of the study.
key:
Attribute key.
value:
Attribute value. It should be JSON serializable.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def set_study_direction(self, study_id: int, direction: study.StudyDirection) -> None:
"""Register an optimization problem direction to a study.
Args:
study_id:
ID of the study.
direction:
Either :obj:`~optuna.study.StudyDirection.MAXIMIZE` or
:obj:`~optuna.study.StudyDirection.MINIMIZE`.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
:exc:`ValueError`:
If the direction is already set and the passed ``direction`` is the opposite
direction or :obj:`~optuna.study.StudyDirection.NOT_SET`.
"""
raise NotImplementedError
# Basic study access
@abc.abstractmethod
def get_study_id_from_name(self, study_name: str) -> int:
"""Read the ID of a study.
Args:
study_name:
Name of the study.
Returns:
ID of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_name`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_study_id_from_trial_id(self, trial_id: int) -> int:
"""Read the ID of a study to which a trial belongs.
Args:
trial_id:
ID of the trial.
Returns:
ID of the study.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_study_name_from_id(self, study_id: int) -> str:
"""Read the study name of a study.
Args:
study_id:
ID of the study.
Returns:
Name of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_study_direction(self, study_id: int) -> study.StudyDirection:
"""Read whether a study maximizes or minimizes an objective.
Args:
study_id:
ID of a study.
Returns:
Optimization direction of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_study_user_attrs(self, study_id: int) -> Dict[str, Any]:
"""Read the user-defined attributes of a study.
Args:
study_id:
ID of the study.
Returns:
Dictionary with the user attributes of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_study_system_attrs(self, study_id: int) -> Dict[str, Any]:
"""Read the optuna-internal attributes of a study.
Args:
study_id:
ID of the study.
Returns:
Dictionary with the optuna-internal attributes of the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
# Basic trial manipulation
@abc.abstractmethod
def create_new_trial(
self, study_id: int, template_trial: Optional["FrozenTrial"] = None
) -> int:
"""Create and add a new trial to a study.
The returned trial ID is unique among all current and deleted trials.
Args:
study_id:
ID of the study.
template_trial:
Template :class:`~optuna.trial.FronzenTrial` with default user-attributes,
system-attributes, intermediate-values, and a state.
Returns:
ID of the created trial.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_state(self, trial_id: int, state: TrialState) -> bool:
"""Update the state of a trial.
Args:
trial_id:
ID of the trial.
state:
New state of the trial.
Returns:
:obj:`True` if the state is successfully updated.
:obj:`False` if the state is kept the same.
The latter happens when this method tries to update the state of
:obj:`~optuna.trial.TrialState.RUNNING` trial to
:obj:`~optuna.trial.TrialState.RUNNING`.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_param(
self,
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: "distributions.BaseDistribution",
) -> bool:
"""Add a parameter to a trial.
Args:
trial_id:
ID of the trial.
param_name:
Name of the parameter.
param_value_internal:
Internal representation of the parameter value.
distribution:
Sampled distribution of the parameter.
Returns:
:obj:`False` when the parameter is already set to the trial, :obj:`True` otherwise.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
@abc.abstractmethod
def get_trial_number_from_id(self, trial_id: int) -> int:
"""Read the trial number of a trial.
.. note::
The trial number is only unique within a study, and is sequential.
Args:
trial_id:
ID of the trial.
Returns:
Number of the trial.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_trial_param(self, trial_id: int, param_name: str) -> float:
"""Read the parameter of a trial.
Args:
trial_id:
ID of the trial.
param_name:
Name of the parameter.
Returns:
Internal representation of the parameter.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
If no such parameter exists.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_value(self, trial_id: int, value: float) -> None:
"""Set a return value of an objective function.
This method overwrites any existing trial value.
Args:
trial_id:
ID of the trial.
value:
Value of the objective function.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_intermediate_value(
self, trial_id: int, step: int, intermediate_value: float
) -> bool:
"""Report an intermediate value of an objective function.
Args:
trial_id:
ID of the trial.
step:
Step of the trial (e.g., the epoch when training a neural network).
intermediate_value:
Intermediate value corresponding to the step.
Returns:
:obj:`False` when the step is already set, :obj:`True` otherwise.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None:
"""Set a user-defined attribute to a trial.
This method overwrites any existing attribute.
Args:
trial_id:
ID of the trial.
key:
Attribute key.
value:
Attribute value. It should be JSON serializable.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
@abc.abstractmethod
def set_trial_system_attr(self, trial_id: int, key: str, value: Any) -> None:
"""Set an optuna-internal attribute to a trial.
This method overwrites any existing attribute.
Args:
trial_id:
ID of the trial.
key:
Attribute key.
value:
Attribute value. It should be JSON serializable.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
:exc:`RuntimeError`:
If the trial is already finished.
"""
raise NotImplementedError
# Basic trial access
@abc.abstractmethod
def get_trial(self, trial_id: int) -> "FrozenTrial":
"""Read a trial.
Args:
trial_id:
ID of the trial.
Returns:
Trial with a matching trial ID.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_all_trials(self, study_id: int, deepcopy: bool = True) -> List["FrozenTrial"]:
"""Read all trials in a study.
Args:
study_id:
ID of the study.
deepcopy:
Whether to copy the list of trials before returning.
Set to :obj:`True` if you intend to update the list or elements of the list.
Returns:
List of trials in the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
@abc.abstractmethod
def get_n_trials(self, study_id: int, state: Optional[TrialState] = None) -> int:
"""Count the number of trials in a study.
Args:
study_id:
ID of the study.
state:
:class:`~optuna.trial.TrialState` to filter trials.
Returns:
Number of trials in the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
"""
raise NotImplementedError
def get_best_trial(self, study_id: int) -> "FrozenTrial":
"""Return the trial with the best value in a study.
Args:
study_id:
ID of the study.
Returns:
The trial with the best objective value among all finished trials in the study.
Raises:
:exc:`KeyError`:
If no study with the matching ``study_id`` exists.
:exc:`RuntimeError`:
If no trials have been completed.
"""
all_trials = self.get_all_trials(study_id, deepcopy=False)
all_trials = [t for t in all_trials if t.state is TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError("No trials are completed yet.")
if self.get_study_direction(study_id) == study.StudyDirection.MAXIMIZE:
best_trial = max(all_trials, key=lambda t: t.value)
else:
best_trial = min(all_trials, key=lambda t: t.value)
return copy.deepcopy(best_trial)
def get_trial_params(self, trial_id: int) -> Dict[str, Any]:
"""Read the parameter dictionary of a trial.
Args:
trial_id:
ID of the trial.
Returns:
Dictionary of a parameters. Keys are parameter names and values are internal
representations of the parameter values.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
return self.get_trial(trial_id).params
def get_trial_user_attrs(self, trial_id: int) -> Dict[str, Any]:
"""Read the user-defined attributes of a trial.
Args:
trial_id:
ID of the trial.
Returns:
Dictionary with the user-defined attributes of the trial.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
return self.get_trial(trial_id).user_attrs
def get_trial_system_attrs(self, trial_id: int) -> Dict[str, Any]:
"""Read the optuna-internal attributes of a trial.
Args:
trial_id:
ID of the trial.
Returns:
Dictionary with the optuna-internal attributes of the trial.
Raises:
:exc:`KeyError`:
If no trial with the matching ``trial_id`` exists.
"""
return self.get_trial(trial_id).system_attrs
def remove_session(self) -> None:
"""Clean up all connections to a database."""
pass
def check_trial_is_updatable(self, trial_id: int, trial_state: TrialState) -> None:
"""Check whether a trial state is updatable.
Args:
trial_id:
ID of the trial.
Only used for an error message.
trial_state:
Trial state to check.
Raises:
:exc:`RuntimeError`:
If the trial is already finished.
"""
if trial_state.is_finished():
trial = self.get_trial(trial_id)
raise RuntimeError(
"Trial#{} has already finished and can not be updated.".format(trial.number)
)
| 20,677
| 30.377845
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/storages/in_memory.py
|
import copy
from datetime import datetime
import threading
import uuid
# from optuna import distributions # NOQA
# from optuna.exceptions import DuplicatedStudyError
# from optuna.storages import base
# from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX
# from optuna.study import StudyDirection
# from optuna.trial import FrozenTrial
# from optuna.trial import TrialState
from .. import distributions # NOQA
from ..exceptions import DuplicatedStudyError
from . import base
from .base import DEFAULT_STUDY_NAME_PREFIX
from ..study import StudyDirection
from ..trial import FrozenTrial
from ..trial import TrialState
class InMemoryStorage(base.BaseStorage):
"""Storage class that stores data in memory of the Python process.
This class is not supposed to be directly accessed by library users.
"""
def __init__(self):
# type: () -> None
self._trial_id_to_study_id_and_number = {} # type: Dict[int, Tuple[int, int]]
self._study_name_to_id = {} # type: Dict[str, int]
self._studies = {} # type: Dict[int, _StudyInfo]
self._max_study_id = -1
self._max_trial_id = -1
self._lock = threading.RLock()
def __getstate__(self):
# type: () -> Dict[Any, Any]
state = self.__dict__.copy()
del state["_lock"]
return state
def __setstate__(self, state):
# type: (Dict[Any, Any]) -> None
self.__dict__.update(state)
self._lock = threading.RLock()
def create_new_study(self, study_name=None):
# type: (Optional[str]) -> int
with self._lock:
study_id = self._max_study_id + 1
self._max_study_id += 1
if study_name is not None:
if study_name in self._study_name_to_id:
raise DuplicatedStudyError
else:
study_uuid = str(uuid.uuid4())
study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid
self._studies[study_id] = _StudyInfo(study_name)
self._study_name_to_id[study_name] = study_id
return study_id
def delete_study(self, study_id):
# type: (int) -> None
with self._lock:
self._check_study_id(study_id)
for trial in self._studies[study_id].trials:
del self._trial_id_to_study_id_and_number[trial._trial_id]
study_name = self._studies[study_id].name
del self._study_name_to_id[study_name]
del self._studies[study_id]
def set_study_direction(self, study_id, direction):
# type: (int, StudyDirection) -> None
with self._lock:
self._check_study_id(study_id)
study = self._studies[study_id]
if study.direction != StudyDirection.NOT_SET and study.direction != direction:
raise ValueError(
"Cannot overwrite study direction from {} to {}.".format(
study.direction, direction
)
)
study.direction = direction
def set_study_user_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_study_id(study_id)
self._studies[study_id].user_attrs[key] = value
def set_study_system_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_study_id(study_id)
self._studies[study_id].system_attrs[key] = value
def get_study_id_from_name(self, study_name):
# type: (str) -> int
with self._lock:
if study_name not in self._study_name_to_id:
raise KeyError("No such study {}.".format(study_name))
return self._study_name_to_id[study_name]
def get_study_id_from_trial_id(self, trial_id):
# type: (int) -> int
with self._lock:
self._check_trial_id(trial_id)
return self._trial_id_to_study_id_and_number[trial_id][0]
def get_study_name_from_id(self, study_id):
# type: (int) -> str
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].name
def get_study_direction(self, study_id):
# type: (int) -> StudyDirection
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].direction
def get_study_user_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
with self._lock:
self._check_study_id(study_id)
return copy.deepcopy(self._studies[study_id].user_attrs)
def get_study_system_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
with self._lock:
self._check_study_id(study_id)
return copy.deepcopy(self._studies[study_id].system_attrs)
def create_new_trial(self, study_id, template_trial=None):
# type: (int, Optional[FrozenTrial]) -> int
with self._lock:
self._check_study_id(study_id)
if template_trial is None:
trial = self._create_running_trial()
else:
trial = copy.deepcopy(template_trial)
trial_id = self._max_trial_id + 1
self._max_trial_id += 1
trial.number = len(self._studies[study_id].trials)
trial._trial_id = trial_id
self._trial_id_to_study_id_and_number[trial_id] = (study_id, trial.number)
self._studies[study_id].trials.append(trial)
self._update_cache(trial_id, study_id)
return trial_id
@staticmethod
def _create_running_trial():
# type: () -> FrozenTrial
return FrozenTrial(
trial_id=-1, # dummy value.
number=-1, # dummy value.
state=TrialState.RUNNING,
params={},
distributions={},
user_attrs={},
system_attrs={},
value=None,
intermediate_values={},
datetime_start=datetime.now(),
datetime_complete=None,
)
def set_trial_state(self, trial_id, state):
# type: (int, TrialState) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
self.check_trial_is_updatable(trial_id, trial.state)
if state == TrialState.RUNNING and trial.state != TrialState.WAITING:
return False
trial.state = state
if state.is_finished():
trial.datetime_complete = datetime.now()
self._set_trial(trial_id, trial)
study_id = self._trial_id_to_study_id_and_number[trial_id][0]
self._update_cache(trial_id, study_id)
else:
self._set_trial(trial_id, trial)
return True
def clear_params_and_dists(self, trial_id):
trial = self._get_trial(trial_id)
trial = copy.copy(trial)
for name in list(trial.params.keys()):
# if "_min" not in name and "_len" not in name:
del trial.params[name]
del trial.distributions[name]
self._set_trial(trial_id, trial)
return
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
# type: (int, str, float, distributions.BaseDistribution) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
study_id = self._trial_id_to_study_id_and_number[trial_id][0]
# Check param distribution compatibility with previous trial(s).
if param_name in self._studies[study_id].param_distribution:
distributions.check_distribution_compatibility(
self._studies[study_id].param_distribution[param_name], distribution
)
# Check param has not been set; otherwise, return False.
if param_name in trial.params:
return False
# Set param distribution.
self._studies[study_id].param_distribution[param_name] = distribution
# Set param.
trial = copy.copy(trial)
trial.params = copy.copy(trial.params)
trial.params[param_name] = distribution.to_external_repr(param_value_internal)
trial.distributions = copy.copy(trial.distributions)
trial.distributions[param_name] = distribution
self._set_trial(trial_id, trial)
return True
def get_trial_number_from_id(self, trial_id):
# type: (int) -> int
with self._lock:
self._check_trial_id(trial_id)
return self._trial_id_to_study_id_and_number[trial_id][1]
def get_best_trial(self, study_id):
# type: (int) -> FrozenTrial
with self._lock:
self._check_study_id(study_id)
best_trial_id = self._studies[study_id].best_trial_id
if best_trial_id is None:
raise ValueError("No trials are completed yet.")
return self.get_trial(best_trial_id)
def get_trial_param(self, trial_id, param_name):
# type: (int, str) -> float
with self._lock:
trial = self._get_trial(trial_id)
distribution = trial.distributions[param_name]
return distribution.to_internal_repr(trial.params[param_name])
def set_trial_value(self, trial_id, value):
# type: (int, float) -> None
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
self.check_trial_is_updatable(trial_id, trial.state)
trial.value = value
self._set_trial(trial_id, trial)
def _update_cache(self, trial_id: int, study_id: int) -> None:
trial = self._get_trial(trial_id)
if trial.state != TrialState.COMPLETE:
return
best_trial_id = self._studies[study_id].best_trial_id
if best_trial_id is None:
self._studies[study_id].best_trial_id = trial_id
return
best_trial = self._get_trial(best_trial_id)
assert best_trial is not None
best_value = best_trial.value
new_value = trial.value
if best_value is None:
self._studies[study_id].best_trial_id = trial_id
return
# Complete trials do not have `None` values.
assert new_value is not None
if self.get_study_direction(study_id) == StudyDirection.MAXIMIZE:
if best_value < new_value:
self._studies[study_id].best_trial_id = trial_id
else:
if best_value > new_value:
self._studies[study_id].best_trial_id = trial_id
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
# type: (int, int, float) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
values = copy.copy(trial.intermediate_values)
if step in values:
return False
values[step] = intermediate_value
trial.intermediate_values = values
self._set_trial(trial_id, trial)
return True
def set_trial_user_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_trial_id(trial_id)
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
trial.user_attrs = copy.copy(trial.user_attrs)
trial.user_attrs[key] = value
self._set_trial(trial_id, trial)
def set_trial_system_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
trial.system_attrs = copy.copy(trial.system_attrs)
trial.system_attrs[key] = value
self._set_trial(trial_id, trial)
def get_trial(self, trial_id):
# type: (int) -> FrozenTrial
with self._lock:
return copy.deepcopy(self._get_trial(trial_id))
def _get_trial(self, trial_id: int) -> FrozenTrial:
self._check_trial_id(trial_id)
# study_id=0, trial_number=0,1,2,...
study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id]
return self._studies[study_id].trials[trial_number]
def _set_trial(self, trial_id: int, trial: FrozenTrial) -> None:
study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id]
self._studies[study_id].trials[trial_number] = trial
def get_all_trials(self, study_id, deepcopy=True):
# type: (int, bool) -> List[FrozenTrial]
with self._lock:
self._check_study_id(study_id)
if deepcopy:
return copy.deepcopy(self._studies[study_id].trials)
else:
return self._studies[study_id].trials[:]
def get_n_trials(self, study_id, state=None):
# type: (int, Optional[TrialState]) -> int
with self._lock:
self._check_study_id(study_id)
if state is None:
return len(self._studies[study_id].trials)
return sum(
trial.state == state for trial in self.get_all_trials(study_id, deepcopy=False)
)
def _check_study_id(self, study_id):
# type: (int) -> None
if study_id not in self._studies:
raise KeyError("No study with study_id {} exists.".format(study_id))
def _check_trial_id(self, trial_id: int) -> None:
if trial_id not in self._trial_id_to_study_id_and_number:
raise KeyError("No trial with trial_id {} exists.".format(trial_id))
class _StudyInfo:
def __init__(self, name: str) -> None:
self.trials = [] # type: List[FrozenTrial]
self.param_distribution = {} # type: Dict[str, distributions.BaseDistribution]
self.user_attrs = {} # type: Dict[str, Any]
self.system_attrs = {} # type: Dict[str, Any]
self.name = name # type: str
self.direction = StudyDirection.NOT_SET
self.best_trial_id = None # type: Optional[int]
| 14,933
| 33.09589
| 95
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/storages/__init__.py
|
from typing import Union # NOQA
# from optuna.storages.base import BaseStorage
# from optuna.storages.in_memory import InMemoryStorage
from .base import BaseStorage
from .in_memory import InMemoryStorage
def get_storage(storage):
# type: (Union[None, str, BaseStorage]) -> BaseStorage
if storage is None:
return InMemoryStorage()
else:
return storage
| 382
| 26.357143
| 58
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_fixed.py
|
import datetime
# from optuna import distributions
# from optuna.trial._base import BaseTrial
# from optuna.trial._util import _adjust_discrete_uniform_high
from .. import distributions
from ._base import BaseTrial
from ._util import _adjust_discrete_uniform_high
class FixedTrial(BaseTrial):
"""A trial class which suggests a fixed value for each parameter.
This object has the same methods as :class:`~optuna.trial.Trial`, and it suggests pre-defined
parameter values. The parameter values can be determined at the construction of the
:class:`~optuna.trial.FixedTrial` object. In contrast to :class:`~optuna.trial.Trial`,
:class:`~optuna.trial.FixedTrial` does not depend on :class:`~optuna.study.Study`, and it is
useful for deploying optimization results.
Example:
Evaluate an objective function with parameter values given by a user.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
assert objective(optuna.trial.FixedTrial({'x': 1, 'y': 0})) == 1
.. note::
Please refer to :class:`~optuna.trial.Trial` for details of methods and properties.
Args:
params:
A dictionary containing all parameters.
number:
A trial number. Defaults to ``0``.
"""
def __init__(self, params, number=0):
# type: (Dict[str, Any], int) -> None
self._params = params
self._suggested_params = {} # type: Dict[str, Any]
self._distributions = {} # type: Dict[str, BaseDistribution]
self._user_attrs = {} # type: Dict[str, Any]
self._system_attrs = {} # type: Dict[str, Any]
self._datetime_start = datetime.datetime.now()
self._number = number
def suggest_float(self, name, low, high, *, log=False, step=None):
# type: (str, float, float, bool, Optional[float]) -> float
if step is not None:
if log:
raise NotImplementedError(
"The parameter `step` is not supported when `log` is True."
)
else:
return self._suggest(
name, distributions.DiscreteUniformDistribution(low=low, high=high, q=step)
)
else:
if log:
return self._suggest(
name, distributions.LogUniformDistribution(low=low, high=high)
)
else:
return self._suggest(name, distributions.UniformDistribution(low=low, high=high))
def suggest_uniform(self, name, low, high):
# type: (str, float, float) -> float
return self._suggest(name, distributions.UniformDistribution(low=low, high=high))
def suggest_loguniform(self, name, low, high):
# type: (str, float, float) -> float
return self._suggest(name, distributions.LogUniformDistribution(low=low, high=high))
def suggest_discrete_uniform(self, name, low, high, q):
# type: (str, float, float, float) -> float
high = _adjust_discrete_uniform_high(name, low, high, q)
discrete = distributions.DiscreteUniformDistribution(low=low, high=high, q=q)
return self._suggest(name, discrete)
def suggest_int(self, name, low, high, step=1, log=False):
# type: (str, int, int, int, bool) -> int
if log:
sample = self._suggest(
name, distributions.IntLogUniformDistribution(low=low, high=high, step=step)
)
else:
sample = self._suggest(
name, distributions.IntUniformDistribution(low=low, high=high, step=step)
)
return int(sample)
def suggest_categorical(self, name, choices):
# type: (str, Sequence[CategoricalChoiceType]) -> CategoricalChoiceType
choices = tuple(choices)
return self._suggest(name, distributions.CategoricalDistribution(choices=choices))
def _suggest(self, name, distribution):
# type: (str, BaseDistribution) -> Any
if name not in self._params:
raise ValueError(
"The value of the parameter '{}' is not found. Please set it at "
"the construction of the FixedTrial object.".format(name)
)
value = self._params[name]
param_value_in_internal_repr = distribution.to_internal_repr(value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of the parameter '{}' is out of "
"the range of the distribution {}.".format(value, name, distribution)
)
if name in self._distributions:
distributions.check_distribution_compatibility(self._distributions[name], distribution)
self._suggested_params[name] = value
self._distributions[name] = distribution
return value
def report(self, value, step):
# type: (float, int) -> None
pass
def should_prune(self, step=None):
# type: (Optional[int]) -> bool
return False
def set_user_attr(self, key, value):
# type: (str, Any) -> None
self._user_attrs[key] = value
def set_system_attr(self, key, value):
# type: (str, Any) -> None
self._system_attrs[key] = value
@property
def params(self):
# type: () -> Dict[str, Any]
return self._suggested_params
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
return self._distributions
@property
def user_attrs(self):
# type: () -> Dict[str, Any]
return self._user_attrs
@property
def system_attrs(self):
# type: () -> Dict[str, Any]
return self._system_attrs
@property
def datetime_start(self):
# type: () -> Optional[datetime.datetime]
return self._datetime_start
@property
def number(self) -> int:
return self._number
| 6,151
| 31.041667
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_frozen.py
|
import datetime
import warnings
# from optuna import distributions
# from optuna import logging
# from optuna.trial._state import TrialState
from .. import distributions
from .. import logging
from ._state import TrialState
_logger = logging.get_logger(__name__)
class FrozenTrial(object):
"""Status and results of a :class:`~optuna.trial.Trial`.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
"""
def __init__(
self,
number, # type: int
state, # type: TrialState
value, # type: Optional[float]
datetime_start, # type: Optional[datetime.datetime]
datetime_complete, # type: Optional[datetime.datetime]
params, # type: Dict[str, Any]
distributions, # type: Dict[str, BaseDistribution]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
intermediate_values, # type: Dict[int, float]
trial_id, # type: int
):
# type: (...) -> None
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
# Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation.
# TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved.
_ordered_fields = [
"number",
"value",
"datetime_start",
"datetime_complete",
"params",
"_distributions",
"user_attrs",
"system_attrs",
"intermediate_values",
"_trial_id",
"state",
]
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
# type: () -> int
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
# type: () -> str
return "{cls}({kwargs})".format(
cls=self.__class__.__name__,
kwargs=", ".join(
"{field}={value}".format(
field=field if not field.startswith("_") else field[1:],
value=repr(getattr(self, field)),
)
for field in self._ordered_fields
),
)
def _validate(self):
# type: () -> None
if self.datetime_start is None:
raise ValueError("`datetime_start` is supposed to be set.")
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError("`datetime_complete` is supposed to be set for a finished trial.")
else:
if self.datetime_complete is not None:
raise ValueError(
"`datetime_complete` is supposed to be None for an unfinished trial."
)
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError("`value` is supposed to be set for a complete trial.")
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError(
"Inconsistent parameters {} and distributions {}.".format(
set(self.params.keys()), set(self.distributions.keys())
)
)
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution "
"{}.".format(param_value, param_name, distribution)
)
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Dictionary that contains the distributions of :attr:`params`."""
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
self._distributions = value
@property
def trial_id(self):
# type: () -> int
"""Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID.
"""
warnings.warn(
"The use of `FrozenTrial.trial_id` is deprecated. "
"Please use `FrozenTrial.number` instead.",
DeprecationWarning,
)
_logger.warning(
"The use of `FrozenTrial.trial_id` is deprecated. "
"Please use `FrozenTrial.number` instead."
)
return self._trial_id
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
@property
def duration(self):
# type: () -> Optional[datetime.timedelta]
"""Return the elapsed time taken to complete the trial.
Returns:
The duration.
"""
if self.datetime_start and self.datetime_complete:
return self.datetime_complete - self.datetime_start
else:
return None
| 7,018
| 31.050228
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_util.py
|
import decimal
# from optuna import logging
from .. import logging
_logger = logging.get_logger(__name__)
def _adjust_discrete_uniform_high(name, low, high, q):
# type: (str, float, float, float) -> float
d_high = decimal.Decimal(str(high))
d_low = decimal.Decimal(str(low))
d_q = decimal.Decimal(str(q))
d_r = d_high - d_low
if d_r % d_q != decimal.Decimal("0"):
high = float((d_r // d_q) * d_q + d_low)
_logger.warning(
"The range of parameter `{}` is not divisible by `q`, and is "
"replaced by [{}, {}].".format(name, low, high)
)
return high
| 631
| 23.307692
| 74
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_base.py
|
import abc
import datetime
# from optuna import distributions
# from optuna import logging
from .. import distributions
from .. import logging
_logger = logging.get_logger(__name__)
class BaseTrial(object, metaclass=abc.ABCMeta):
"""Base class for trials.
Note that this class is not supposed to be directly accessed by library users.
"""
@abc.abstractmethod
def suggest_float(self, name, low, high, *, log=False, step=None):
# type: (str, float, float, bool, Optional[float])-> float
# TODO(nzw0301) swap log's position for step's one to match suggest_int for consistency.
raise NotImplementedError
@abc.abstractmethod
def suggest_uniform(self, name, low, high):
# type: (str, float, float) -> float
raise NotImplementedError
@abc.abstractmethod
def suggest_loguniform(self, name, low, high):
# type: (str, float, float) -> float
raise NotImplementedError
@abc.abstractmethod
def suggest_discrete_uniform(self, name, low, high, q):
# type: (str, float, float, float) -> float
raise NotImplementedError
@abc.abstractmethod
def suggest_int(self, name, low, high, step=1, log=False):
# type: (str, int, int, int, bool) -> int
raise NotImplementedError
@abc.abstractmethod
def suggest_categorical(self, name, choices):
# type: (str, Sequence[CategoricalChoiceType]) -> CategoricalChoiceType
raise NotImplementedError
@abc.abstractmethod
def report(self, value, step):
# type: (float, int) -> None
raise NotImplementedError
@abc.abstractmethod
def should_prune(self, step=None):
# type: (Optional[int]) -> bool
raise NotImplementedError
@abc.abstractmethod
def set_user_attr(self, key, value):
# type: (str, Any) -> None
raise NotImplementedError
@abc.abstractmethod
def set_system_attr(self, key, value):
# type: (str, Any) -> None
raise NotImplementedError
@property
@abc.abstractmethod
def params(self):
# type: () -> Dict[str, Any]
raise NotImplementedError
@property
@abc.abstractmethod
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
raise NotImplementedError
@property
@abc.abstractmethod
def user_attrs(self):
# type: () -> Dict[str, Any]
raise NotImplementedError
@property
@abc.abstractmethod
def system_attrs(self):
# type: () -> Dict[str, Any]
raise NotImplementedError
@property
@abc.abstractmethod
def datetime_start(self):
# type: () -> Optional[datetime.datetime]
raise NotImplementedError
@property
def number(self) -> int:
raise NotImplementedError
| 2,836
| 22.840336
| 96
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/__init__.py
|
# from optuna.trial._base import BaseTrial # NOQA
# from optuna.trial._fixed import FixedTrial # NOQA
# from optuna.trial._frozen import FrozenTrial # NOQA
# from optuna.trial._state import TrialState # NOQA
# from optuna.trial._trial import Trial # NOQA
from ._base import BaseTrial # NOQA
from ._fixed import FixedTrial # NOQA
from ._frozen import FrozenTrial # NOQA
from ._state import TrialState # NOQA
from ._trial import Trial # NOQA
| 449
| 44
| 54
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_state.py
|
import enum
class TrialState(enum.Enum):
"""State of a :class:`~optuna.trial.Trial`.
Attributes:
RUNNING:
The :class:`~optuna.trial.Trial` is running.
COMPLETE:
The :class:`~optuna.trial.Trial` has been finished without any error.
PRUNED:
The :class:`~optuna.trial.Trial` has been pruned with
:class:`~optuna.exceptions.TrialPruned`.
FAIL:
The :class:`~optuna.trial.Trial` has failed due to an uncaught error.
"""
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
def __repr__(self):
# type: () -> str
return str(self)
def is_finished(self):
# type: () -> bool
return self != TrialState.RUNNING and self != TrialState.WAITING
| 805
| 22.705882
| 81
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/trial/_trial.py
|
import datetime
import warnings
import numpy as np
from operator import itemgetter
from itertools import chain, combinations, product
# from optuna import distributions
# from optuna.distributions import CategoricalDistribution
# from optuna.distributions import DiscreteUniformDistribution
# from optuna.distributions import IntLogUniformDistribution
# from optuna.distributions import IntUniformDistribution
# from optuna.distributions import LogUniformDistribution
# from optuna.distributions import UniformDistribution
# from optuna import logging
# from optuna import pruners
# from optuna.trial._base import BaseTrial
# from optuna.trial._util import _adjust_discrete_uniform_high
from .. import distributions
from ..distributions import CategoricalDistribution
from ..distributions import DiscreteUniformDistribution
from ..distributions import IntLogUniformDistribution
from ..distributions import IntUniformDistribution
from ..distributions import LogUniformDistribution
from ..distributions import UniformDistribution
from .. import logging
from .. import pruners
from ._base import BaseTrial
from ._util import _adjust_discrete_uniform_high
class Trial(BaseTrial):
"""A trial is a process of evaluating an objective function.
This object is passed to an objective function and provides interfaces to get parameter
suggestion, manage the trial's state, and set/get user-defined attributes of the trial.
Note that the direct use of this constructor is not recommended.
This object is seamlessly instantiated and passed to the objective function behind
the :func:`optuna.study.Study.optimize()` method; hence library users do not care about
instantiation of this object.
Args:
study:
A :class:`~optuna.study.Study` object.
trial_id:
A trial ID that is automatically generated.
"""
def __init__(
self,
study, # type: Study
trial_id, # type: int
):
# type: (...) -> None
self.study = study
self._trial_id = trial_id
# TODO(Yanase): Remove _study_id attribute, and use study._study_id instead.
self._study_id = self.study._study_id
self.storage = self.study._storage
self.logger = logging.get_logger(__name__)
self._init_relative_params()
def _init_relative_params(self):
# type: () -> None
# get the current trial object
trial = self.storage.get_trial(self._trial_id)
# does something if hyperband pruner, else returns study unchanged
study = pruners._filter_study(self.study, trial)
# returns {} for TPE
self.relative_search_space = self.study.sampler.infer_relative_search_space(study, trial)
# returns {}
self.relative_params = self.study.sampler.sample_relative(
study, trial, self.relative_search_space
)
def suggest_float(self, name, low, high, *, log=False, step=None):
# type: (str, float, float, bool, Optional[float]) -> float
"""Suggest a value for the floating point parameter.
Note that this is a wrapper method for :func:`~optuna.trial.Trial.suggest_uniform`,
:func:`~optuna.trial.Trial.suggest_loguniform` and
:func:`~optuna.trial.Trial.suggest_discrete_uniform`.
.. versionadded:: 1.3.0
.. seealso::
Please see also :func:`~optuna.trial.Trial.suggest_uniform`,
:func:`~optuna.trial.Trial.suggest_loguniform` and
:func:`~optuna.trial.Trial.suggest_discrete_uniform`.
Example:
Suggest a momentum, learning rate and scaling factor of learning rate
for neural network training.
.. testsetup::
import numpy as np
import optuna
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
np.random.seed(seed=0)
X = np.random.randn(200).reshape(-1, 1)
y = np.random.randint(0, 2, 200)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
def objective(trial):
momentum = trial.suggest_float('momentum', 0.0, 1.0)
learning_rate_init = trial.suggest_float('learning_rate_init',
1e-5, 1e-3, log=True)
power_t = trial.suggest_float('power_t', 0.2, 0.8, step=0.1)
clf = MLPClassifier(hidden_layer_sizes=(100, 50), momentum=momentum,
learning_rate_init=learning_rate_init,
solver='sgd', random_state=0, power_t=power_t)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
Upper endpoint of the range of suggested values. ``high`` is excluded from the
range.
log:
A flag to sample the value from the log domain or not.
If ``log`` is true, the value is sampled from the range in the log domain.
Otherwise, the value is sampled from the range in the linear domain.
See also :func:`suggest_uniform` and :func:`suggest_loguniform`.
step:
A step of discretization.
Returns:
A suggested float value.
"""
if step is not None:
if log:
raise NotImplementedError(
"The parameter `step` is not supported when `log` is True."
)
else:
return self.suggest_discrete_uniform(name, low, high, step)
else:
if log:
return self.suggest_loguniform(name, low, high)
else:
return self.suggest_uniform(name, low, high)
def suggest_uniform(self, name, low, high):
# type: (str, float, float) -> float
"""Suggest a value for the continuous parameter.
The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})`
in the linear domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of
:math:`\\mathsf{low}` will be returned.
Example:
Suggest a momentum for neural network training.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(200).reshape(-1, 1)
y = np.random.randint(0, 2, 200)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.neural_network import MLPClassifier
def objective(trial):
momentum = trial.suggest_uniform('momentum', 0.0, 1.0)
clf = MLPClassifier(hidden_layer_sizes=(100, 50), momentum=momentum,
solver='sgd', random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
Upper endpoint of the range of suggested values. ``high`` is excluded from the
range.
Returns:
A suggested float value.
"""
distribution = UniformDistribution(low=low, high=high)
self._check_distribution(name, distribution)
if low == high:
return self._set_new_param_or_get_existing(name, low, distribution)
return self._suggest(name, distribution)
def suggest_loguniform(self, name, low, high):
# type: (str, float, float) -> float
"""Suggest a value for the continuous parameter.
The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})`
in the log domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of
:math:`\\mathsf{low}` will be returned.
Example:
Suggest penalty parameter ``C`` of `SVC <https://scikit-learn.org/stable/modules/
generated/sklearn.svm.SVC.html>`_.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.svm import SVC
def objective(trial):
c = trial.suggest_loguniform('c', 1e-5, 1e2)
clf = SVC(C=c, gamma='scale', random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
Upper endpoint of the range of suggested values. ``high`` is excluded from the
range.
Returns:
A suggested float value.
"""
distribution = LogUniformDistribution(low=low, high=high)
self._check_distribution(name, distribution)
if low == high:
return self._set_new_param_or_get_existing(name, low, distribution)
return self._suggest(name, distribution)
def suggest_discrete_uniform(self, name, low, high, q):
# type: (str, float, float, float) -> float
"""Suggest a value for the discrete parameter.
The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high}]`,
and the step of discretization is :math:`q`. More specifically,
this method returns one of the values in the sequence
:math:`\\mathsf{low}, \\mathsf{low} + q, \\mathsf{low} + 2 q, \\dots,
\\mathsf{low} + k q \\le \\mathsf{high}`,
where :math:`k` denotes an integer. Note that :math:`high` may be changed due to round-off
errors if :math:`q` is not an integer. Please check warning messages to find the changed
values.
Example:
Suggest a fraction of samples used for fitting the individual learners of
`GradientBoostingClassifier <https://scikit-learn.org/stable/modules/generated/
sklearn.ensemble.GradientBoostingClassifier.html>`_.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.ensemble import GradientBoostingClassifier
def objective(trial):
subsample = trial.suggest_discrete_uniform('subsample', 0.1, 1.0, 0.1)
clf = GradientBoostingClassifier(subsample=subsample, random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
Upper endpoint of the range of suggested values. ``high`` is included in the range.
q:
A step of discretization.
Returns:
A suggested float value.
"""
high = _adjust_discrete_uniform_high(name, low, high, q)
distribution = DiscreteUniformDistribution(low=low, high=high, q=q)
self._check_distribution(name, distribution)
if low == high:
return self._set_new_param_or_get_existing(name, low, distribution)
return self._suggest(name, distribution)
def suggest_int(self, name, low, high, step=1, log=False):
# type: (str, int, int, int, bool) -> int
"""Suggest a value for the integer parameter.
The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`.
Example:
Suggest the number of trees in `RandomForestClassifier <https://scikit-learn.org/
stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.ensemble import RandomForestClassifier
def objective(trial):
n_estimators = trial.suggest_int('n_estimators', 50, 400)
clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
Upper endpoint of the range of suggested values. ``high`` is included in the range.
log:
A flag to sample the value from the log domain or not.
If ``log`` is true, at first, the range of suggested values is divided into grid
points of width ``step``. The range of suggested values is then converted to a log
domain, from which a value is uniformly sampled. The uniformly sampled value is
re-converted to the original domain and rounded to the nearest grid point that we
just split, and the suggested value is determined.
For example,
if `low = 2`, `high = 8` and `step = 2`,
then the range of suggested values is divided by ``step`` as `[2, 4, 6, 8]`
and lower values tend to be more sampled than higher values.
"""
# create the IntUniformDistribution
distribution = IntUniformDistribution(
low=low, high=high, step=step
) # type: Union[IntUniformDistribution, IntLogUniformDistribution]
if log:
high = (
distribution.high - distribution.low
) // distribution.step * distribution.step + distribution.low
distribution = IntLogUniformDistribution(low=low, high=high, step=step)
self._check_distribution(name, distribution)
if low == high:
return self._set_new_param_or_get_existing(name, low, distribution)
param_value, name, samples, scores, distribution = self._suggest(name, distribution)
return int(param_value), name, samples, scores, distribution
def suggest_categorical(self, name, choices):
# type: (str, Sequence[CategoricalChoiceType]) -> CategoricalChoiceType
"""Suggest a value for the categorical parameter.
The value is sampled from ``choices``.
Example:
Suggest a kernel function of `SVC <https://scikit-learn.org/stable/modules/generated/
sklearn.svm.SVC.html>`_.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.svm import SVC
def objective(trial):
kernel = trial.suggest_categorical('kernel', ['linear', 'poly', 'rbf'])
clf = SVC(kernel=kernel, gamma='scale', random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
name:
A parameter name.
choices:
Parameter value candidates.
.. seealso::
:class:`~optuna.distributions.CategoricalDistribution`.
Returns:
A suggested value.
"""
# categorical values
choices = tuple(choices)
# There is no need to call self._check_distribution because
# CategoricalDistribution does not support dynamic value space.
return self._suggest(name, CategoricalDistribution(choices=choices))
def report(self, value, step):
# type: (float, int) -> None
"""Report an objective function value for a given step.
The reported values are used by the pruners to determine whether this trial should be
pruned.
.. seealso::
Please refer to :class:`~optuna.pruners.BasePruner`.
.. note::
The reported value is converted to ``float`` type by applying ``float()``
function internally. Thus, it accepts all float-like types (e.g., ``numpy.float32``).
If the conversion fails, a ``TypeError`` is raised.
Example:
Report intermediate scores of `SGDClassifier <https://scikit-learn.org/stable/modules/
generated/sklearn.linear_model.SGDClassifier.html>`_ training.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.linear_model import SGDClassifier
def objective(trial):
clf = SGDClassifier(random_state=0)
for step in range(100):
clf.partial_fit(X_train, y_train, np.unique(y))
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step=step)
if trial.should_prune():
raise TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
Args:
value:
A value returned from the objective function.
step:
Step of the trial (e.g., Epoch of neural network training).
"""
try:
# For convenience, we allow users to report a value that can be cast to `float`.
value = float(value)
except (TypeError, ValueError):
message = "The `value` argument is of type '{}' but supposed to be a float.".format(
type(value).__name__
)
raise TypeError(message)
if step < 0:
raise ValueError("The `step` argument is {} but cannot be negative.".format(step))
self.storage.set_trial_intermediate_value(self._trial_id, step, value)
def should_prune(self, step=None):
# type: (Optional[int]) -> bool
"""Suggest whether the trial should be pruned or not.
The suggestion is made by a pruning algorithm associated with the trial and is based on
previously reported values. The algorithm can be specified when constructing a
:class:`~optuna.study.Study`.
.. note::
If no values have been reported, the algorithm cannot make meaningful suggestions.
Similarly, if this method is called multiple times with the exact same set of reported
values, the suggestions will be the same.
.. seealso::
Please refer to the example code in :func:`optuna.trial.Trial.report`.
Args:
step:
Deprecated since 0.12.0: Step of the trial (e.g., epoch of neural network
training). Deprecated in favor of always considering the most recent step.
Returns:
A boolean value. If :obj:`True`, the trial should be pruned according to the
configured pruning algorithm. Otherwise, the trial should continue.
"""
if step is not None:
warnings.warn(
"The use of `step` argument is deprecated. "
"The last reported step is used instead of "
"the step given by the argument.",
DeprecationWarning,
)
trial = self.study._storage.get_trial(self._trial_id)
return self.study.pruner.prune(self.study, trial)
def set_user_attr(self, key, value):
# type: (str, Any) -> None
"""Set user attributes to the trial.
The user attributes in the trial can be access via :func:`optuna.trial.Trial.user_attrs`.
Example:
Save fixed hyperparameters of neural network training.
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(50).reshape(-1, 1)
y = np.random.randint(0, 2, 50)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
.. testcode::
import optuna
from sklearn.neural_network import MLPClassifier
def objective(trial):
trial.set_user_attr('BATCHSIZE', 128)
momentum = trial.suggest_uniform('momentum', 0, 1.0)
clf = MLPClassifier(hidden_layer_sizes=(100, 50),
batch_size=trial.user_attrs['BATCHSIZE'],
momentum=momentum, solver='sgd', random_state=0)
clf.fit(X_train, y_train)
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=3)
assert 'BATCHSIZE' in study.best_trial.user_attrs.keys()
assert study.best_trial.user_attrs['BATCHSIZE'] == 128
Args:
key:
A key string of the attribute.
value:
A value of the attribute. The value should be JSON serializable.
"""
self.storage.set_trial_user_attr(self._trial_id, key, value)
def set_system_attr(self, key, value):
# type: (str, Any) -> None
"""Set system attributes to the trial.
Note that Optuna internally uses this method to save system messages such as failure
reason of trials. Please use :func:`~optuna.trial.Trial.set_user_attr` to set users'
attributes.
Args:
key:
A key string of the attribute.
value:
A value of the attribute. The value should be JSON serializable.
"""
self.storage.set_trial_system_attr(self._trial_id, key, value)
def _suggest(self, name, distribution):
# type: (str, BaseDistribution) -> Any
# the first two statements don't execute?
if self._is_fixed_param(name, distribution):
param_value = self.system_attrs["fixed_params"][name]
elif self._is_relative_param(name, distribution):
param_value = self.relative_params[name]
else:
# get the trial
trial = self.storage.get_trial(self._trial_id)
# this will just return the study (not hyperband)
study = pruners._filter_study(self.study, trial)
# get parameter value
param_value, samples, scores = self.study.sampler.sample_independent(
study, trial, name, distribution
)
self.study.info["names"].append(name)
self.study.info[f"{name}_smpls"] = samples
self.study.info[f"{name}_scrs"] = scores
self.study.info[f"{name}_dist"] = distribution
# should be able to change parameters later
param_value = self._set_new_param_or_get_existing(name, param_value, distribution)
return param_value, name, samples, scores, distribution
def _set_new_param_or_get_existing(self, name, param_value, distribution):
# type: (str, Any, BaseDistribution) -> Any
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
# can change trial_id
set_success = self.storage.set_trial_param(
self._trial_id, name, param_value_in_internal_repr, distribution
)
if not set_success:
param_value_in_internal_repr = self.storage.get_trial_param(self._trial_id, name)
param_value = distribution.to_external_repr(param_value_in_internal_repr)
return param_value
def _is_fixed_param(self, name, distribution):
# type: (str, BaseDistribution) -> bool
if "fixed_params" not in self.system_attrs:
return False
if name not in self.system_attrs["fixed_params"]:
return False
param_value = self.system_attrs["fixed_params"][name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
contained = distribution._contains(param_value_in_internal_repr)
if not contained:
warnings.warn(
"Fixed parameter '{}' with value {} is out of range "
"for distribution {}.".format(name, param_value, distribution)
)
return contained
def _is_relative_param(self, name, distribution):
# type: (str, BaseDistribution) -> bool
if name not in self.relative_params:
return False
if name not in self.relative_search_space:
raise ValueError(
"The parameter '{}' was sampled by `sample_relative` method "
"but it is not contained in the relative search space.".format(name)
)
relative_distribution = self.relative_search_space[name]
distributions.check_distribution_compatibility(relative_distribution, distribution)
param_value = self.relative_params[name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
return distribution._contains(param_value_in_internal_repr)
def _check_distribution(self, name, distribution):
# type: (str, BaseDistribution) -> None
old_distribution = self.distributions.get(name, distribution)
if old_distribution != distribution:
warnings.warn(
'Inconsistent parameter values for distribution with name "{}"! '
"This might be a configuration mistake. "
"Optuna allows to call the same distribution with the same "
"name more then once in a trial. "
"When the parameter values are inconsistent optuna only "
"uses the values of the first call and ignores all following. "
"Using these values: {}".format(name, old_distribution._asdict()),
RuntimeWarning,
)
def check(self, cstrs, act_cat_cols, nc):
if len(self.params) == nc:
return True
# keys, vals = list(self.params.keys()), list(self.params.values())
# prms = tuple(zip(keys, vals))
prms = tuple([(f"c{i}", cstrs[f"c{i}"]) for i in act_cat_cols])
if prms in self.study.evaled:
return True
self.study.evaled.add(prms)
return False
def setter(self):
prms = tuple([(name, self.params[name]) for name in self.study.info["names"]])
if self.number < 10:
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [int(prms[i][0][1]) for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# cur_inds = {}
# for name in self.study.info["names"]:
# cur_inds[f"{name}"] = 0
self.storage.clear_params_and_dists(self.number)
names = self.study.info["names"]
lst = []
if len(names) == 3:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
mult = self.study.info[f"{names[0]}_scrs"][i]
lst.append(((val1, None, None), mult))
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
if i == 0:
mult = self.study.info[f"{names[1]}_scrs"][j]
lst.append(((None, val2, None), mult))
pred = ((names[0], val1), (names[1], val2))
if pred not in self.study.evaled and pred in self.study.cat_preds_set:
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
) / 2
lst.append(((val1, val2, None), mult))
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
if i == 0 and j == 0:
mult = self.study.info[f"{names[2]}_scrs"][k]
lst.append(((None, None, val3), mult))
elif i == 0:
pred = ((names[1], val2), (names[2], val3))
if pred not in self.study.evaled and pred in self.study.cat_preds_set:
mult = (
self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
) / 2
lst.append(((None, val2, val3), mult))
elif j == 0:
pred = ((names[0], val1), (names[2], val3))
if pred not in self.study.evaled and pred in self.study.cat_preds_set:
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[2]}_scrs"][k]
) / 2
lst.append(((val1, None, val3), mult))
pred = ((names[0], val1), (names[1], val2), (names[2], val3))
if pred not in self.study.evaled and pred in self.study.cat_preds_set:
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
) / 3
lst.append(((val1, val2, val3), mult))
elif len(names) == 4:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
for l, val4 in enumerate(self.study.info[f"{names[3]}_smpls"]):
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
* self.study.info[f"{names[3]}_scrs"][l]
)
lst.append(((val1, val2, val3, val4), mult))
# print(self.number, len(lst))
# lst = sorted(lst, key=lambda x: x[1])
# lst = lst[::-1]
# print(lst[:30])
while True:
if len(lst) == 0:
print(self.number)
vals = max(lst, key=itemgetter(1))[0]
cstrs = {}
for i, name in enumerate(names):
cstrs[name] = vals[i]
prms = tuple([(name, cstrs[name]) for name in names])
prms_check = tuple([(name, cstrs[name]) for name in names if cstrs[name] is not None])
act_cat_cols = []
for i, (nam, val) in enumerate(prms):
if val is not None:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
act_cat_cols.append(i)
cstrs[nam] = val
self.study.evaled.add(prms_check)
return cstrs, act_cat_cols
# max_val, max_name = 0, self.study.info["names"][0]
# for nam in self.study.info["names"]:
# if cur_inds[nam] == len(self.study.info[f"{nam}_smpls"]) - 1:
# continue
# cur_inds[nam] += 1
# new_high_score = 1
# for name in self.study.info["names"]:
# new_high_score *= self.study.info[f"{name}_scrs"][cur_inds[name]]
# if new_high_score > max_val:
# max_val = new_high_score
# max_name = nam
# cur_inds[nam] -= 1
# print(cur_inds)
# cur_inds[max_name] += 1
def setter2(self):
prms = tuple([(name, self.params[name]) for name in self.study.info["names"]])
if self.number < 10:
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [int(prms[i][0][1]) for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# cur_inds = {}
# for name in self.study.info["names"]:
# cur_inds[f"{name}"] = 0
self.storage.clear_params_and_dists(self.number)
names = self.study.info["names"]
lst = []
if len(names) == 3:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
mult = self.study.info[f"{names[0]}_scrs"][i]
lst.append(((val1, None, None), mult))
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
if i == 0:
mult = self.study.info[f"{names[1]}_scrs"][j]
lst.append(((None, val2, None), mult))
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
) / 2
lst.append(((val1, val2, None), mult))
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
if i == 0 and j == 0:
mult = self.study.info[f"{names[2]}_scrs"][k]
lst.append(((None, None, val3), mult))
elif i == 0:
mult = (
self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
) / 2
lst.append(((None, val2, val3), mult))
elif j == 0:
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[2]}_scrs"][k]
) / 2
lst.append(((val1, None, val3), mult))
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
) / 3
lst.append(((val1, val2, val3), mult))
elif len(names) == 4:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
for l, val4 in enumerate(self.study.info[f"{names[3]}_smpls"]):
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
* self.study.info[f"{names[3]}_scrs"][l]
)
lst.append(((val1, val2, val3, val4), mult))
# print(self.number, len(lst))
lst = sorted(lst, key=lambda x: x[1])
lst = lst[::-1]
# print(lst[:30])
while True:
if len(lst) == 0:
print(self.number)
vals = lst.pop(0)[0]
cstrs = {}
for i, name in enumerate(names):
cstrs[name] = vals[i]
prms = tuple([(name, cstrs[name]) for name in names])
prms_check = tuple([(name, cstrs[name]) for name in names if cstrs[name] is not None])
act_cat_cols = []
if prms_check not in self.study.evaled and prms_check in self.study.cat_preds:
for i, (nam, val) in enumerate(prms):
if val is not None:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
act_cat_cols.append(i)
cstrs[nam] = val
self.study.evaled.add(prms_check)
return cstrs, act_cat_cols
# max_val, max_name = 0, self.study.info["names"][0]
# for nam in self.study.info["names"]:
# if cur_inds[nam] == len(self.study.info[f"{nam}_smpls"]) - 1:
# continue
# cur_inds[nam] += 1
# new_high_score = 1
# for name in self.study.info["names"]:
# new_high_score *= self.study.info[f"{name}_scrs"][cur_inds[name]]
# if new_high_score > max_val:
# max_val = new_high_score
# max_name = nam
# cur_inds[nam] -= 1
# print(cur_inds)
# cur_inds[max_name] += 1
def is_valid(self, df, pred, num_cols, cat_cols):
# numerical constraints
num_constrs = [f"{col} >= {pred[f'{col}_min']} & {col} <= {pred[f'{col}_min'] + pred[f'{col}_len']}" for col in num_cols]
# categorical constraints
cat_constrs = [f"{col} == \"{pred[col]}\"" for col in cat_cols]
# return True if tuples satisfy the predicate, else False
return df.query(" & ".join(num_constrs + cat_constrs)).shape[0] > 0
# cstr_dfs = []
# if num_cols:
# lt = [df[col] >= pred[f"{col}_min"] for col in num_cols]
# gt = [df[col] <= pred[f"{col}_min"] + pred[f"{col}_len"] for col in num_cols]
# cstr_dfs += lt + gt
# if cat_cols:
# ne = [df[col] == pred[col] for col in cat_cols]
# cstr_dfs += ne
# # this dataframe has removed all tuples that satisfy the predicate
# return df[np.logical_and.reduce(cstr_dfs)].shape[0] > 0
def update_params(self, prms):
self.storage.clear_params_and_dists(self.number)
# update trial params
for name, val in prms.items():
self.storage.set_trial_param(
self.number,
name,
self.study.info[f"{name}_dist"].to_internal_repr(val),
self.study.info[f"{name}_dist"],
)
# add parameters to the evaluated set
# self.study.evaled.add(prms)
def fixer_combined(self, df, k, cat_preds, probs, cat_cols, num_cols):
if self.number < 10:
while True:
ind = self.study.rnd.choice(range(len(cat_preds)), p=probs)
prms = dict(zip(cat_cols, cat_preds[ind]))
for col in num_cols:
if np.issubdtype(df[col].dtype, np.signedinteger) or np.issubdtype(df[col].dtype, np.unsignedinteger):
prms[f"{col}_min"] = self.study.rnd.randint(self.study.info[f"{col}_min_dist"].low, self.study.info[f"{col}_min_dist"].high)
prms[f"{col}_len"] = self.study.rnd.randint(0, self.study.info[f"{col}_len_dist"].high - self.study.info[f"{col}_len_dist"].low)
elif np.issubdtype(self.df[col].dtype, np.floating):
prms[f"{col}_min"] = self.study.rnd.uniform(self.study.info[f"{col}_min_dist"].low, self.study.info[f"{col}_min_dist"].high)
prms[f"{col}_len"] = self.study.rnd.uniform(0, self.study.info[f"{col}_len_dist"].high - self.study.info[f"{col}_len_dist"].low)
if self.is_valid(df, prms, num_cols, cat_cols):
self.update_params({k: v for k, v in prms.items() if k in self.study.info["names"]})
return prms
# SRSWR or SRSWOR
indicies = self.study.rnd.choice(range(len(cat_preds)), size=1, p=probs)
self.study.info["cat_smpls"] = [cat_preds[i] for i in indicies]
self.study.info["cat_scrs"] = [probs[i] for i in indicies]
all_param_names = ["cat"] + self.study.info["names"]
# all combinations of predicates
prds = list(product(*[self.study.info[f"{name}_smpls"] for name in all_param_names]))
# all multiplied combinations of scores
scrs = [np.prod(scrs) for scrs in product(*[self.study.info[f"{name}_scrs"] for name in all_param_names])]
prms_lst = list(zip(prds, scrs))
prms_lst = sorted(prms_lst, key=lambda x: x[1], reverse=True)
for prms, _ in prms_lst:
prms = [y for x in prms for y in (x if isinstance(x, tuple) else (x,))]
prms = dict(zip(cat_cols + self.study.info["names"], prms))
if self.is_valid(df, prms, num_cols, cat_cols):
self.update_params({k: v for k, v in prms.items() if k in self.study.info["names"]})
return dict(prms)
exit()
def fixer(self, df, k, algo, cat_cols, num_cols, cat_vals, probs, code_to_value):
prms = {}
if self.number < 10:
while True:
if algo == "TPE_categorical" or algo == "TPE_individual_contribution":
prms = dict(zip(cat_cols, self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]))
elif algo == "weighted_sample_positive" or algo == "weighted_sample_shift" or algo == "weighted_sample_halving":
for col in cat_cols:
prms[col] = self.study.rnd.choice(cat_vals[col], replace=False, p=probs[col])
if tuple(prms.values())[:len(cat_vals)] not in self.study.cat_preds_set:
continue
for col in num_cols:
prms[f"{col}_min"] = self.study.rnd.randint(self.study.info[f"{col}_min_dist"].low, self.study.info[f"{col}_min_dist"].high)
prms[f"{col}_len"] = self.study.rnd.randint(0, self.study.info[f"{col}_len_dist"].high - self.study.info[f"{col}_len_dist"].low)
if algo == "TPE_individual_contribution":
prms_final = {k: (code_to_value[k][v] if k in cat_cols else v) for k, v in prms.items()}
else:
prms_final = prms
if self.is_valid(df, prms_final, num_cols, cat_cols):
break
self.update_params({k: v for k, v in prms.items() if k in self.study.info["names"]}, )
return prms
if algo == "weighted_sample_positive" or algo == "weighted_sample_shift" or algo == "weighted_sample_halving":
for col in cat_cols:
indicies = self.study.rnd.choice(len(cat_vals[col]), size=min(k, len(cat_vals[col])), replace=False, p=probs[col])
self.study.info[f"{col}_smpls"] = [cat_vals[col][i] for i in indicies]
self.study.info[f"{col}_scrs"] = [probs[col][i] for i in indicies]
all_param_names = list(cat_vals.keys()) + self.study.info["names"]
elif algo == "TPE_categorical" or algo == "TPE_individual_contribution":
all_param_names = self.study.info["names"]
# all combinations of predicates
prds = list(product(*[self.study.info[f"{name}_smpls"] for name in all_param_names]))
# all multiplied combinations of scores
scrs = [np.prod(scrs) for scrs in product(*[self.study.info[f"{name}_scrs"] for name in all_param_names])]
prms_lst = list(zip(prds, scrs))
prms_lst = sorted(prms_lst, key=lambda x: x[1], reverse=True)
# print(self.study.cat_preds_set)
for prms, _ in prms_lst:
if algo == "weighted_sample_positive" or algo == "weighted_sample_shift" or algo == "weighted_sample_halving":
if prms[:len(cat_vals.keys())] not in self.study.cat_preds_set:
continue
elif algo == "TPE_categorical" or algo == "TPE_individual_contribution":
names = prms[:len(cat_cols)]
if names not in self.study.cat_preds_set:
continue
prms = dict(zip(all_param_names, prms))
if algo == "TPE_individual_contribution":
prms_final = {k: (code_to_value[k][v] if k in cat_cols else v) for k, v in prms.items()}
else:
prms_final = prms
if self.is_valid(df, prms_final, num_cols, cat_cols):
self.update_params({k: v for k, v in prms.items() if k in self.study.info["names"]})
return dict(prms)
exit()
# initial parameters for this trial
# prms = tuple(self.params.items())
prms = tuple({k: v for k, v in self.params.items() if "_min" not in k and "_len" not in k})
if self.number < 10:
# get a valid predicate that hasn't been evaluated
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
self.update_params(prms)
return dict(prms)
names = [name for name in self.study.info["names"] if "_min" not in name and "_len" not in name]
prms_lst = []
name_combos = list(
chain.from_iterable(
combinations(names, r) for r in range(len(names), len(names) + 1)
)
)
for combo in name_combos:
for name in combo:
self.study.info[f"{name}_smpls"] = list(zip([name] * len(self.study.info[f"{name}_smpls"]), self.study.info[f"{name}_smpls"]))
# all combinations of predicates
prds = list(product(*[self.study.info[f"{name}_smpls"] for name in combo]))
# all multiplied combinations of scores
scrs = [np.prod(scrs) for scrs in product(*[self.study.info[f"{name}_scrs"] for name in combo])]
prms_lst += list(zip(prds, scrs))
prms_lst = sorted(prms_lst, key=lambda x: x[1])
for prms in prms_lst:
if prms not in self.study.evaled and prms in self.study.cat_preds_set:
self.update_params(prms)
return dict(prms)
print("Random Predicate")
# executes if the list of EI candidates is empty
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
self.update_params(prms)
return dict(prms)
def setter_fixed(self):
# initial parameters for this trial
# prms = tuple(self.params.items())
prms = tuple({k: v for k, v in self.params.items() if "_min" not in k and "_len" not in k})
if self.number < 10:
# get a valid predicate that hasn't been evaluated
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
self.update_params(prms)
return dict(prms)
names = [name for name in self.study.info["names"] if "_min" not in name and "_len" not in name]
prms_lst = []
name_combos = list(
chain.from_iterable(
combinations(names, r) for r in range(len(names), len(names) + 1)
)
)
for combo in name_combos:
for name in combo:
self.study.info[f"{name}_smpls"] = list(zip([name] * len(self.study.info[f"{name}_smpls"]), self.study.info[f"{name}_smpls"]))
# all combinations of predicates
prds = list(product(*[self.study.info[f"{name}_smpls"] for name in combo]))
# all multiplied combinations of scores
scrs = [np.prod(scrs) for scrs in product(*[self.study.info[f"{name}_scrs"] for name in combo])]
prms_lst += list(zip(prds, scrs))
prms_lst = sorted(prms_lst, key=lambda x: x[1])
for prms, _ in prms_lst:
if prms not in self.study.evaled and prms in self.study.cat_preds_set:
self.update_params(prms)
return dict(prms)
print("Random Predicate")
# executes if the list of EI candidates is empty
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
self.update_params(prms)
return dict(prms)
def setter_old(self):
prms = tuple([(name, self.params[name]) for name in self.study.info["names"]])
if self.number < 10:
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [prms[i][0] for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# cur_inds = {}
# for name in self.study.info["names"]:
# cur_inds[f"{name}"] = 0
self.storage.clear_params_and_dists(self.number)
names = self.study.info["names"]
lst = []
if len(names) == 3:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
pred = ((names[0], val1), (names[1], val2), (names[2], val3))
if pred not in self.study.evaled and pred in self.study.cat_preds_set:
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
)
lst.append(((val1, val2, val3), mult))
elif len(names) == 4:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
for l, val4 in enumerate(self.study.info[f"{names[3]}_smpls"]):
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
* self.study.info[f"{names[3]}_scrs"][l]
)
lst.append(((val1, val2, val3, val4), mult))
# print(self.number, len(lst))
# lst = sorted(lst, key=lambda x: x[1])
# lst = lst[::-1]
# print(len(lst))
# print(lst[:10])
# if self.number == 20:
# exit(0)
while True:
if len(lst) == 0:
print(self.number)
break
vals = max(lst, key=itemgetter(1))[0]
cstrs = {}
for i, name in enumerate(names):
cstrs[name] = vals[i]
prms = tuple([(name, cstrs[name]) for name in names])
act_cat_cols = []
for i, (nam, val) in enumerate(prms):
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
act_cat_cols.append(nam)
cstrs[nam] = val
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [prms[i][0] for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
def setter_old2(self):
prms = tuple([(name, self.params[name]) for name in self.study.info["names"]])
if self.number < 10:
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [int(prms[i][0][1]) for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# cur_inds = {}
# for name in self.study.info["names"]:
# cur_inds[f"{name}"] = 0
self.storage.clear_params_and_dists(self.number)
names = self.study.info["names"]
lst = []
if len(names) == 3:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
)
lst.append(((val1, val2, val3), mult))
elif len(names) == 4:
for i, val1 in enumerate(self.study.info[f"{names[0]}_smpls"]):
for j, val2 in enumerate(self.study.info[f"{names[1]}_smpls"]):
for k, val3 in enumerate(self.study.info[f"{names[2]}_smpls"]):
for l, val4 in enumerate(self.study.info[f"{names[3]}_smpls"]):
mult = (
self.study.info[f"{names[0]}_scrs"][i]
* self.study.info[f"{names[1]}_scrs"][j]
* self.study.info[f"{names[2]}_scrs"][k]
* self.study.info[f"{names[3]}_scrs"][l]
)
lst.append(((val1, val2, val3, val4), mult))
# print(self.number, len(lst))
lst = sorted(lst, key=lambda x: x[1])
lst = lst[::-1]
# print(len(lst))
# print(lst[:10])
# if self.number == 20:
# exit(0)
while True:
if len(lst) == 0:
print(self.number)
break
vals = lst.pop(0)
cstrs = {}
for i, name in enumerate(names):
cstrs[name] = vals[i]
prms = tuple([(name, cstrs[name]) for name in names])
act_cat_cols = []
if prms not in self.study.evaled and prms in self.study.cat_preds_set:
for i, (nam, val) in enumerate(prms):
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
act_cat_cols.append(i)
cstrs[nam] = val
self.study.evaled.add(prms)
return cstrs, act_cat_cols
# if prms in self.study.evaled or prms not in self.study.cat_preds:
while prms in self.study.evaled or prms not in self.study.cat_preds_set:
prms = self.study.cat_preds[self.study.rnd.choice(len(self.study.cat_preds))]
cstrs = {}
self.storage.clear_params_and_dists(self.number)
for nam, val in prms:
self.storage.set_trial_param(
self.number,
nam,
self.study.info[f"{nam}_dist"].to_internal_repr(val),
self.study.info[f"{nam}_dist"],
)
cstrs[nam] = val
act_cat_cols = [int(prms[i][0][1]) for i in range(len(prms))]
self.study.evaled.add(prms)
return cstrs, act_cat_cols
def get_new_random_params(self, param_list, distr):
while True:
params = {}
cstrs = {}
act_cat_cols = []
checker = tuple()
for i in range(len(param_list)):
# choose whether or not to select a value from this column
v = self.study.rnd.choice(2)
if v == 1:
# choose a value randomly from this column
params[f"c{i}"] = "Yes"
act_cat_cols.append(i)
if distr == "int":
params[f"c{i}_val"] = self.study.rnd.choice(list(range(param_list[i] + 1)))
params[f"c{i}_distr"] = IntUniformDistribution(low=0, high=param_list[i])
elif distr == "cat":
params[f"c{i}_val"] = self.study.rnd.choice(param_list[i])
params[f"c{i}_distr"] = CategoricalDistribution(choices=param_list[i])
cstrs[f"c{i}"] = params[f"c{i}_val"]
else:
params[f"c{i}"] = "No"
if len(cstrs) == 0:
continue
checker = tuple([(f"c{i}", cstrs[f"c{i}"]) for i in act_cat_cols])
if checker not in self.study.evaled:
self.study.evaled.add(checker)
break
self.storage.clear_params_and_dists(self.number)
col_distr = CategoricalDistribution(choices=("Yes", "No"))
for i in range(len(param_list)):
if params[f"c{i}"] == "Yes":
self.storage.set_trial_param(self.number, f"c{i}", 0, col_distr)
self.storage.set_trial_param(
self.number,
f"c{i}_val",
params[f"c{i}_distr"].to_internal_repr(params[f"c{i}_val"]),
params[f"c{i}_distr"],
)
elif params[f"c{i}"] == "No":
self.storage.set_trial_param(self.number, f"c{i}", 1, col_distr)
return cstrs, act_cat_cols
@property
def number(self):
# type: () -> int
"""Return trial's number which is consecutive and unique in a study.
Returns:
A trial number.
"""
return self.storage.get_trial_number_from_id(self._trial_id)
@property
def trial_id(self):
# type: () -> int
"""Return trial ID.
Note that the use of this is deprecated.
Please use :attr:`~optuna.trial.Trial.number` instead.
Returns:
A trial ID.
"""
warnings.warn(
"The use of `Trial.trial_id` is deprecated. Please use `Trial.number` instead.",
DeprecationWarning,
)
self.logger.warning(
"The use of `Trial.trial_id` is deprecated. Please use `Trial.number` instead."
)
return self._trial_id
@property
def params(self):
# type: () -> Dict[str, Any]
"""Return parameters to be optimized.
Returns:
A dictionary containing all parameters.
"""
return self.storage.get_trial_params(self._trial_id)
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Return distributions of parameters to be optimized.
Returns:
A dictionary containing all distributions.
"""
return self.storage.get_trial(self._trial_id).distributions
@property
def user_attrs(self):
# type: () -> Dict[str, Any]
"""Return user attributes.
Returns:
A dictionary containing all user attributes.
"""
return self.storage.get_trial_user_attrs(self._trial_id)
@property
def system_attrs(self):
# type: () -> Dict[str, Any]
"""Return system attributes.
Returns:
A dictionary containing all system attributes.
"""
return self.storage.get_trial_system_attrs(self._trial_id)
@property
def datetime_start(self):
# type: () -> Optional[datetime.datetime]
"""Return start datetime.
Returns:
Datetime where the :class:`~optuna.trial.Trial` started.
"""
return self.storage.get_trial(self._trial_id).datetime_start
@property
def study_id(self):
# type: () -> int
"""Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.Trial.study` instead.
Returns:
The study ID.
"""
message = "The use of `Trial.study_id` is deprecated. Please use `Trial.study` instead."
warnings.warn(message, DeprecationWarning)
self.logger.warning(message)
return self.study._study_id
| 67,255
| 40.210784
| 176
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/pruners/base.py
|
import abc
class BasePruner(object, metaclass=abc.ABCMeta):
"""Base class for pruners."""
@abc.abstractmethod
def prune(self, study, trial):
# type: (Study, FrozenTrial) -> bool
"""Judge whether the trial should be pruned based on the reported values.
Note that this method is not supposed to be called by library users. Instead,
:func:`optuna.trial.Trial.report` and :func:`optuna.trial.Trial.should_prune` provide
user interfaces to implement pruning mechanism in an objective function.
Args:
study:
Study object of the target study.
trial:
FrozenTrial object of the target trial.
Returns:
A boolean value representing whether the trial should be pruned.
"""
raise NotImplementedError
| 846
| 30.37037
| 93
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/pruners/nop.py
|
# from optuna.pruners import BasePruner
from . import BasePruner
class NopPruner(BasePruner):
"""Pruner which never prunes trials.
Example:
.. testsetup::
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(seed=0)
X = np.random.randn(200).reshape(-1, 1)
y = np.where(X[:, 0] < 0.5, 0, 1)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
classes = np.unique(y)
.. testcode::
import optuna
from sklearn.linear_model import SGDClassifier
def objective(trial):
alpha = trial.suggest_uniform('alpha', 0.0, 1.0)
clf = SGDClassifier(alpha=alpha)
n_train_iter = 100
for step in range(n_train_iter):
clf.partial_fit(X_train, y_train, classes=classes)
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step)
if trial.should_prune():
assert False, "should_prune() should always return False with this pruner."
raise optuna.exceptions.TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize',
pruner=optuna.pruners.NopPruner())
study.optimize(objective, n_trials=20)
"""
def prune(self, study, trial):
# type: (Study, FrozenTrial) -> bool
return False
| 1,632
| 30.403846
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/pruners/__init__.py
|
# import optuna
# from optuna.pruners.base import BasePruner
# from optuna.pruners.nop import NopPruner
from ... import optuna
from .base import BasePruner
from .nop import NopPruner
def _filter_study(
study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial"
) -> "optuna.study.Study":
return study
| 315
| 23.307692
| 66
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/base.py
|
import abc
class BaseSampler(object, metaclass=abc.ABCMeta):
"""Base class for samplers.
Optuna combines two types of sampling strategies, which are called *relative sampling* and
*independent sampling*.
*The relative sampling* determines values of multiple parameters simultaneously so that
sampling algorithms can use relationship between parameters (e.g., correlation).
Target parameters of the relative sampling are described in a relative search space, which
is determined by :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
*The independent sampling* determines a value of a single parameter without considering any
relationship between parameters. Target parameters of the independent sampling are the
parameters not described in the relative search space.
More specifically, parameters are sampled by the following procedure.
At the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`
is called to determine the relative search space for the trial. Then,
:meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample parameters
from the relative search space. During the execution of the objective function,
:meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample
parameters that don't belong to the relative search space.
The following figure depicts the lifetime of a trial and how the above three methods are
called in the trial.
.. image:: ../../image/sampling-sequence.png
|
"""
@abc.abstractmethod
def infer_relative_search_space(self, study, trial):
# type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
"""Infer the search space that will be used by relative sampling in the target trial.
This method is called right before :func:`~optuna.samplers.BaseSampler.sample_relative`
method, and the search space returned by this method is pass to it. The parameters not
contained in the search space will be sampled by using
:func:`~optuna.samplers.BaseSampler.sample_independent` method.
Args:
study:
Target study object.
trial:
Target trial object.
Returns:
A dictionary containing the parameter names and parameter's distributions.
.. seealso::
Please refer to :func:`~optuna.samplers.intersection_search_space` as an
implementation of :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
"""
raise NotImplementedError
@abc.abstractmethod
def sample_relative(self, study, trial, search_space):
# type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
"""Sample parameters in a given search space.
This method is called once at the beginning of each trial, i.e., right before the
evaluation of the objective function. This method is suitable for sampling algorithms
that use relationship between parameters such as Gaussian Process and CMA-ES.
.. note::
The failed trials are ignored by any build-in samplers when they sample new
parameters. Thus, failed trials are regarded as deleted in the samplers'
perspective.
Args:
study:
Target study object.
trial:
Target trial object.
search_space:
The search space returned by
:func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
Returns:
A dictionary containing the parameter names and the values.
"""
raise NotImplementedError
@abc.abstractmethod
def sample_independent(self, study, trial, param_name, param_distribution):
# type: (Study, FrozenTrial, str, BaseDistribution) -> Any
"""Sample a parameter for a given distribution.
This method is called only for the parameters not contained in the search space returned
by :func:`~optuna.samplers.BaseSampler.sample_relative` method. This method is suitable
for sampling algorithms that do not use relationship between parameters such as random
sampling and TPE.
.. note::
The failed trials are ignored by any build-in samplers when they sample new
parameters. Thus, failed trials are regarded as deleted in the samplers'
perspective.
Args:
study:
Target study object.
trial:
Target trial object.
param_name:
Name of the sampled parameter.
param_distribution:
Distribution object that specifies a prior and/or scale of the sampling algorithm.
Returns:
A parameter value.
"""
raise NotImplementedError
def reseed_rng(self) -> None:
"""Reseed sampler's random number generator.
This method is called by the :class:`~optuna.study.Study` instance if trials are executed
in parallel with the option ``n_jobs>1``. In that case, the sampler instance will be
replicated including the state of the random number generator, and they may suggest the
same values. To prevent this issue, this method assigns a different seed to each random
number generator.
"""
pass
| 5,487
| 39.651852
| 98
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/_search_space.py
|
from collections import OrderedDict
import copy
from typing import Dict
from typing import Optional
# import optuna
# from optuna.distributions import BaseDistribution
# from optuna.study import BaseStudy
from ... import optuna
from ..distributions import BaseDistribution
from ..study import BaseStudy
class IntersectionSearchSpace(object):
"""A class to calculate the intersection search space of a :class:`~optuna.study.BaseStudy`.
Intersection search space contains the intersection of parameter distributions that have been
suggested in the completed trials of the study so far.
If there are multiple parameters that have the same name but different distributions,
neither is included in the resulting search space
(i.e., the parameters with dynamic value ranges are excluded).
Note that an instance of this class is supposed to be used for only one study.
If different studies are passed to :func:`~optuna.samplers.IntersectionSearchSpace.calculate`,
a :obj:`ValueError` is raised.
"""
def __init__(self) -> None:
self._cursor = -1 # type: int
self._search_space = None # type: Optional[Dict[str, BaseDistribution]]
self._study_id = None # type: Optional[int]
def calculate(
self, study: BaseStudy, ordered_dict: bool = False
) -> Dict[str, BaseDistribution]:
"""Returns the intersection search space of the :class:`~optuna.study.BaseStudy`.
Args:
study:
A study with completed trials.
ordered_dict:
A boolean flag determining the return type.
If :obj:`False`, the returned object will be a :obj:`dict`.
If :obj:`True`, the returned object will be an :obj:`collections.OrderedDict`
sorted by keys, i.e. parameter names.
Returns:
A dictionary containing the parameter names and parameter's distributions.
"""
if self._study_id is None:
self._study_id = study._study_id
else:
# Note that the check below is meaningless when `InMemortyStorage` is used
# because `InMemortyStorage.create_new_study` always returns the same study ID.
if self._study_id != study._study_id:
raise ValueError("`IntersectionSearchSpace` cannot handle multiple studies.")
next_cursor = self._cursor
for trial in reversed(study.get_trials(deepcopy=False)):
if self._cursor > trial.number:
break
if not trial.state.is_finished():
next_cursor = trial.number
if trial.state != optuna.trial.TrialState.COMPLETE:
continue
if self._search_space is None:
self._search_space = copy.copy(trial.distributions)
continue
delete_list = []
for param_name, param_distribution in self._search_space.items():
if param_name not in trial.distributions:
delete_list.append(param_name)
elif trial.distributions[param_name] != param_distribution:
delete_list.append(param_name)
for param_name in delete_list:
del self._search_space[param_name]
self._cursor = next_cursor
search_space = self._search_space or {}
if ordered_dict:
search_space = OrderedDict(sorted(search_space.items(), key=lambda x: x[0]))
return copy.deepcopy(search_space)
def intersection_search_space(
study: BaseStudy, ordered_dict: bool = False
) -> Dict[str, BaseDistribution]:
"""Return the intersection search space of the :class:`~optuna.study.BaseStudy`.
Intersection search space contains the intersection of parameter distributions that have been
suggested in the completed trials of the study so far.
If there are multiple parameters that have the same name but different distributions,
neither is included in the resulting search space
(i.e., the parameters with dynamic value ranges are excluded).
.. note::
:class:`~optuna.samplers.IntersectionSearchSpace` provides the same functionality with
a much faster way. Please consider using it if you want to reduce execution time
as much as possible.
Args:
study:
A study with completed trials.
ordered_dict:
A boolean flag determining the return type.
If :obj:`False`, the returned object will be a :obj:`dict`.
If :obj:`True`, the returned object will be an :obj:`collections.OrderedDict` sorted by
keys, i.e. parameter names.
Returns:
A dictionary containing the parameter names and parameter's distributions.
"""
return IntersectionSearchSpace().calculate(study, ordered_dict=ordered_dict)
| 4,886
| 38.731707
| 99
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/random.py
|
import numpy
# from optuna import distributions
# from optuna.samplers.base import BaseSampler
from .. import distributions
from ..samplers.base import BaseSampler
class RandomSampler(BaseSampler):
"""Sampler using random sampling.
This sampler is based on *independent sampling*.
See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.
Example:
.. testcode::
import optuna
from optuna.samplers import RandomSampler
def objective(trial):
x = trial.suggest_uniform('x', -5, 5)
return x**2
study = optuna.create_study(sampler=RandomSampler())
study.optimize(objective, n_trials=10)
Args:
seed: Seed for random number generator.
"""
def __init__(self, seed=None):
# type: (Optional[int]) -> None
self._rng = numpy.random.RandomState(seed)
def reseed_rng(self) -> None:
self._rng = numpy.random.RandomState()
def infer_relative_search_space(self, study, trial):
# type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
return {}
def sample_relative(self, study, trial, search_space):
# type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
return {}
def sample_independent(self, study, trial, param_name, param_distribution):
# type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any
if isinstance(param_distribution, distributions.UniformDistribution):
return self._rng.uniform(param_distribution.low, param_distribution.high)
elif isinstance(param_distribution, distributions.LogUniformDistribution):
log_low = numpy.log(param_distribution.low)
log_high = numpy.log(param_distribution.high)
return float(numpy.exp(self._rng.uniform(log_low, log_high)))
elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
q = param_distribution.q
r = param_distribution.high - param_distribution.low
# [low, high] is shifted to [0, r] to align sampled values at regular intervals.
low = 0 - 0.5 * q
high = r + 0.5 * q
s = self._rng.uniform(low, high)
v = numpy.round(s / q) * q + param_distribution.low
# v may slightly exceed range due to round-off errors.
return float(min(max(v, param_distribution.low), param_distribution.high))
elif isinstance(param_distribution, distributions.IntUniformDistribution):
# [low, high] is shifted to [0, r] to align sampled values at regular intervals.
r = (param_distribution.high - param_distribution.low) / param_distribution.step
# numpy.random.randint includes low but excludes high.
s = self._rng.randint(0, r + 1)
v = s * param_distribution.step + param_distribution.low
return int(v)
elif isinstance(param_distribution, distributions.IntLogUniformDistribution):
log_low = numpy.log(param_distribution.low - 0.5)
log_high = numpy.log(param_distribution.high + 0.5)
s = numpy.exp(self._rng.uniform(log_low, log_high))
v = (
numpy.round((s - param_distribution.low) / param_distribution.step)
* param_distribution.step
+ param_distribution.low
)
return int(min(max(v, param_distribution.low), param_distribution.high))
elif isinstance(param_distribution, distributions.CategoricalDistribution):
choices = param_distribution.choices
index = self._rng.randint(0, len(choices))
return choices[index]
else:
raise NotImplementedError
| 3,847
| 40.376344
| 94
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/__init__.py
|
# from optuna.samplers._search_space import intersection_search_space # NOQA
# from optuna.samplers._search_space import IntersectionSearchSpace # NOQA
# from optuna.samplers.base import BaseSampler # NOQA
# from optuna.samplers.random import RandomSampler # NOQA
# from optuna.samplers.tpe import TPESampler # NOQA
from ._search_space import intersection_search_space # NOQA
from ._search_space import IntersectionSearchSpace # NOQA
from .base import BaseSampler # NOQA
from .random import RandomSampler # NOQA
from .tpe import TPESampler # NOQA
| 556
| 54.7
| 77
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/tpe/parzen_estimator.py
|
from typing import Callable
from typing import NamedTuple
from typing import Optional
import numpy
from numpy import ndarray
EPS = 1e-12
class _ParzenEstimatorParameters(
NamedTuple(
"_ParzenEstimatorParameters",
[
("consider_prior", bool),
("prior_weight", Optional[float]),
("consider_magic_clip", bool),
("consider_endpoints", bool),
("weights", Callable[[int], ndarray]),
],
)
):
pass
class _ParzenEstimator(object):
def __init__(
self,
mus, # type: ndarray
low, # type: float
high, # type: float
parameters, # type: _ParzenEstimatorParameters
):
# type: (...) -> None
self.weights, self.mus, self.sigmas = _ParzenEstimator._calculate(
mus,
low,
high,
parameters.consider_prior,
parameters.prior_weight,
parameters.consider_magic_clip,
parameters.consider_endpoints,
parameters.weights,
)
@classmethod
def _calculate(
cls,
mus, # type: ndarray
low, # type: float
high, # type: float
consider_prior, # type: bool
prior_weight, # type: Optional[float]
consider_magic_clip, # type: bool
consider_endpoints, # type: bool
weights_func, # type: Callable[[int], ndarray]
):
# type: (...) -> Tuple[ndarray, ndarray, ndarray]
"""Calculates the weights, mus and sigma for the Parzen estimator.
Note: When the number of observations is zero, the Parzen estimator ignores the
`consider_prior` flag and utilizes a prior. Validation of this approach is future work.
"""
# initialize mus and sigmas for the KDE
mus = numpy.asarray(mus)
sigma = numpy.asarray([], dtype=float)
prior_pos = 0
# Parzen estimator construction requires at least one observation or a priror.
if mus.size == 0:
consider_prior = True
# consider_prior = True. We have a prior over the space of ints
if consider_prior:
# prior mean is the midpoint
prior_mu = 0.5 * (low + high)
# prior std is the range of values
prior_sigma = 1.0 * (high - low)
if mus.size == 0:
low_sorted_mus_high = numpy.zeros(3)
sorted_mus = low_sorted_mus_high[1:-1]
sorted_mus[0] = prior_mu
sigma = numpy.asarray([prior_sigma])
prior_pos = 0
order = [] # type: List[int]
# THIS CODE ORDERS THE MEANS with the prior, confusing
else: # When mus.size is greater than 0. <- OPTUNA COMMENT
# We decide the place of the prior. <- OPTUNA COMMENT
# order = indices that would sort the mus
order = numpy.argsort(mus).astype(int)
# mus in increasing order
ordered_mus = mus[order]
# find the index where prior_mu should be inserted to maintain order
prior_pos = numpy.searchsorted(ordered_mus, prior_mu)
# We decide the mus. <- OPTUNA COMMENT
# low_sorted_mus_high gets updated with sorted_mus and is used below
low_sorted_mus_high = numpy.zeros(len(mus) + 3)
sorted_mus = low_sorted_mus_high[1:-1]
# insert the prior appropriately in the ordered list of mus
sorted_mus[:prior_pos] = ordered_mus[:prior_pos]
sorted_mus[prior_pos] = prior_mu
sorted_mus[prior_pos + 1 :] = ordered_mus[prior_pos:]
else:
order = numpy.argsort(mus)
# We decide the mus.
low_sorted_mus_high = numpy.zeros(len(mus) + 2)
sorted_mus = low_sorted_mus_high[1:-1]
sorted_mus[:] = mus[order]
# We decide the sigma.
if mus.size > 0:
low_sorted_mus_high[-1] = high
low_sorted_mus_high[0] = low
# the standard deviation of each Gaussian was set to the greater of the distances to the left and right neighbour
sigma = numpy.maximum(
low_sorted_mus_high[1:-1] - low_sorted_mus_high[0:-2],
low_sorted_mus_high[2:] - low_sorted_mus_high[1:-1],
)
# If not considering endpoints, set the std of the min and max mus to be the
# distance from its only neighbours, DEFAULT consider_endpoints=False
if not consider_endpoints and low_sorted_mus_high.size > 2:
sigma[0] = low_sorted_mus_high[2] - low_sorted_mus_high[1]
sigma[-1] = low_sorted_mus_high[-2] - low_sorted_mus_high[-3]
# We decide the weights. <- OPTUNA
# Ramp of weights
unsorted_weights = weights_func(mus.size)
if consider_prior:
# array of zeros in the shape of sorted_mus
sorted_weights = numpy.zeros_like(sorted_mus)
# sort the weights based on the increasing order of the mus
sorted_weights[:prior_pos] = unsorted_weights[order[:prior_pos]]
sorted_weights[prior_pos] = prior_weight
sorted_weights[prior_pos + 1 :] = unsorted_weights[order[prior_pos:]]
else:
sorted_weights = unsorted_weights[order]
# normalize the weights
sorted_weights /= sorted_weights.sum()
# We adjust the range of the 'sigma' according to the 'consider_magic_clip' flag. <-OTPUNA
# Original TPE paper clips stds to remain in feasible range
# largest std in sigma array
maxsigma = 1.0 * (high - low)
# limit the smallest stds in a gaussian distribution
if consider_magic_clip:
minsigma = 1.0 * (high - low) / min(100.0, (1.0 + len(sorted_mus)))
else:
minsigma = EPS
# set all sigmas to be between minsigma and maxsigma
sigma = numpy.clip(sigma, minsigma, maxsigma)
if consider_prior:
# don't modify the prior std
sigma[prior_pos] = prior_sigma
return sorted_weights, sorted_mus, sigma
| 6,249
| 38.556962
| 125
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/tpe/sampler.py
|
import math
import numpy as np
import scipy.special
from scipy.stats import truncnorm
# from optuna import distributions
# from optuna.samplers import base
# from optuna.samplers import random
# from optuna.samplers.tpe.parzen_estimator import _ParzenEstimator
# from optuna.samplers.tpe.parzen_estimator import _ParzenEstimatorParameters
# from optuna.study import StudyDirection
# from optuna.trial import TrialState
from ... import distributions
from ...samplers import base
from ...samplers import random
from ...samplers.tpe.parzen_estimator import _ParzenEstimator
from ...samplers.tpe.parzen_estimator import _ParzenEstimatorParameters
from ...study import StudyDirection
from ...trial import TrialState
EPS = 1e-12
def default_gamma(x):
# type: (int) -> int
return min(int(np.ceil(0.1 * x)), 25)
def hyperopt_default_gamma(x):
# type: (int) -> int
return min(int(np.ceil(0.25 * np.sqrt(x))), 25)
def default_weights(x):
# type: (int) -> np.ndarray
if x == 0:
return np.asarray([])
elif x < 25:
return np.ones(x)
else:
ramp = np.linspace(1.0 / x, 1.0, num=x - 25)
flat = np.ones(25)
return np.concatenate([ramp, flat], axis=0)
class TPESampler(base.BaseSampler):
"""Sampler using TPE (Tree-structured Parzen Estimator) algorithm.
This sampler is based on *independent sampling*.
See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.
On each trial, for each parameter, TPE fits one Gaussian Mixture Model (GMM) ``l(x)`` to
the set of parameter values associated with the best objective values, and another GMM
``g(x)`` to the remaining parameter values. It chooses the parameter value ``x`` that
maximizes the ratio ``l(x)/g(x)``.
For further information about TPE algorithm, please refer to the following papers:
- `Algorithms for Hyper-Parameter Optimization
<https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf>`_
- `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of
Dimensions for Vision Architectures <http://proceedings.mlr.press/v28/bergstra13.pdf>`_
Example:
.. testcode::
import optuna
from optuna.samplers import TPESampler
def objective(trial):
x = trial.suggest_uniform('x', -10, 10)
return x**2
study = optuna.create_study(sampler=TPESampler())
study.optimize(objective, n_trials=10)
Args:
consider_prior:
Enhance the stability of Parzen estimator by imposing a Gaussian prior when
:obj:`True`. The prior is only effective if the sampling distribution is
either :class:`~optuna.distributions.UniformDistribution`,
:class:`~optuna.distributions.DiscreteUniformDistribution`,
:class:`~optuna.distributions.LogUniformDistribution`,
:class:`~optuna.distributions.IntUniformDistribution`,
or :class:`~optuna.distributions.IntLogUniformDistribution`.
prior_weight:
The weight of the prior. This argument is used in
:class:`~optuna.distributions.UniformDistribution`,
:class:`~optuna.distributions.DiscreteUniformDistribution`,
:class:`~optuna.distributions.LogUniformDistribution`,
:class:`~optuna.distributions.IntUniformDistribution`,
:class:`~optuna.distributions.IntLogUniformDistribution`, and
:class:`~optuna.distributions.CategoricalDistribution`.
consider_magic_clip:
Enable a heuristic to limit the smallest variances of Gaussians used in
the Parzen estimator.
consider_endpoints:
Take endpoints of domains into account when calculating variances of Gaussians
in Parzen estimator. See the original paper for details on the heuristics
to calculate the variances.
n_startup_trials:
The random sampling is used instead of the TPE algorithm until the given number
of trials finish in the same study.
n_ei_candidates:
Number of candidate samples used to calculate the expected improvement.
gamma:
A function that takes the number of finished trials and returns the number
of trials to form a density function for samples with low grains.
See the original paper for more details.
weights:
A function that takes the number of finished trials and returns a weight for them.
See `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of
Dimensions for Vision Architectures <http://proceedings.mlr.press/v28/bergstra13.pdf>`_
for more details.
seed:
Seed for random number generator.
"""
def __init__(
self,
consider_prior=True, # type: bool
prior_weight=1.0, # type: float
consider_magic_clip=True, # type: bool
consider_endpoints=False, # type: bool
n_startup_trials=10, # 10, # type: int
n_ei_candidates=24, # type: int # USE 28 FOR ML EXPERIMENTS?
gamma=default_gamma, # type: Callable[[int], int]
weights=default_weights, # type: Callable[[int], np.ndarray]
seed=None, # type: Optional[int]
k=5,
):
# type: (...) -> None
self._parzen_estimator_parameters = _ParzenEstimatorParameters(
consider_prior, prior_weight, consider_magic_clip, consider_endpoints, weights
)
self._prior_weight = prior_weight
self._n_startup_trials = n_startup_trials
self._n_ei_candidates = n_ei_candidates
self._gamma = gamma
self._weights = weights
self._k = k
self._rng = np.random.RandomState(seed)
self._random_sampler = random.RandomSampler(seed=seed)
def reseed_rng(self) -> None:
self._rng = np.random.RandomState()
self._random_sampler.reseed_rng()
def infer_relative_search_space(self, study, trial):
# type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
return {}
def sample_relative(self, study, trial, search_space):
# type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
return {}
def sample_independent(self, study, trial, param_name, param_distribution):
# type: (Study, FrozenTrial, str, BaseDistribution) -> Any
# parameter values and scores of previous iterations, of the form (param_value, (-step, value))
values, scores = _get_observation_pairs(study, param_name, trial)
n = len(values)
# randomly sample at the start
if n < self._n_startup_trials:
return (
self._random_sampler.sample_independent(
study, trial, param_name, param_distribution
),
None,
None,
)
# split the hyperparameters into good=below and bad=above. The best 10% of values
# or the 25 best values go in "below"
below_param_values, above_param_values = self._split_observation_pairs(values, scores)
if isinstance(param_distribution, distributions.UniformDistribution):
return self._sample_uniform(param_distribution, below_param_values, above_param_values)
elif isinstance(param_distribution, distributions.LogUniformDistribution):
return self._sample_loguniform(
param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
return self._sample_discrete_uniform(
param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.IntUniformDistribution):
return self._sample_int(param_distribution, below_param_values, above_param_values)
elif isinstance(param_distribution, distributions.IntLogUniformDistribution):
return self._sample_int_loguniform(
param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.CategoricalDistribution):
index, samples, scores = self._sample_categorical_index(
param_distribution, below_param_values, above_param_values
)
return param_distribution.choices[index], samples, scores
else:
distribution_list = [
distributions.UniformDistribution.__name__,
distributions.LogUniformDistribution.__name__,
distributions.DiscreteUniformDistribution.__name__,
distributions.IntUniformDistribution.__name__,
distributions.IntLogUniformDistribution.__name__,
distributions.CategoricalDistribution.__name__,
]
raise NotImplementedError(
"The distribution {} is not implemented. "
"The parameter distribution should be one of the {}".format(
param_distribution, distribution_list
)
)
def _split_observation_pairs(
self,
config_vals, # type: List[Optional[float]]
loss_vals, # type: List[Tuple[float, float]]
):
# type: (...) -> Tuple[np.ndarray, np.ndarray]
# parameters and objective function values to np arrays
config_vals = np.asarray(config_vals)
loss_vals = np.asarray(loss_vals, dtype=[("step", float), ("score", float)])
# number of good observations
n_below = self._gamma(len(config_vals))
# indices of values that would sort the losses in ascending order
loss_ascending = np.argsort(loss_vals)
# best parameter values
below = config_vals[np.sort(loss_ascending[:n_below])]
below = np.asarray([v for v in below if v is not None], dtype=float)
# worst parameter values
above = config_vals[np.sort(loss_ascending[n_below:])]
above = np.asarray([v for v in above if v is not None], dtype=float)
return below, above
def _sample_uniform(self, distribution, below, above):
# type: (distributions.UniformDistribution, np.ndarray, np.ndarray) -> float
low = distribution.low
high = distribution.high
return self._sample_numerical(low, high, below, above)
def _sample_loguniform(self, distribution, below, above):
# type: (distributions.LogUniformDistribution, np.ndarray, np.ndarray) -> float
low = distribution.low
high = distribution.high
return self._sample_numerical(low, high, below, above, is_log=True)
def _sample_discrete_uniform(self, distribution, below, above):
# type:(distributions.DiscreteUniformDistribution, np.ndarray, np.ndarray) -> float
# step size (1 for integers)
q = distribution.q
# value range
r = distribution.high - distribution.low
# [low, high] is shifted to [0, r] to align sampled values at regular intervals <- OPTUNA COMMENT
# Use ±0.5*q for rounding evenly around the endpoints
low = 0 - 0.5 * q
high = r + 0.5 * q
# Shift below and above to [0, r] <- OPTUNA COMMENT
# ie, shift hyperparam values to be 0, 1, 2, ... when q=1
above -= distribution.low
below -= distribution.low
# best sample
best_sample, samples, scores = self._sample_numerical(low, high, below, above, q=q)
best_sample += distribution.low
best_sample = min(max(best_sample, distribution.low), distribution.high)
samples = samples + distribution.low
# best_sample = self._sample_numerical(low, high, below, above, q=q) + distribution.low
return best_sample, samples, scores
def _sample_int(self, distribution, below, above):
# type: (distributions.IntUniformDistribution, np.ndarray, np.ndarray) -> int
# IntUniformDistribution is the same as DiscreteUniformDistribution with q=1
d = distributions.DiscreteUniformDistribution(
low=distribution.low, high=distribution.high, q=distribution.step
)
best_sample, samples, scores = self._sample_discrete_uniform(d, below, above)
samples = [int(sample) for sample in samples]
return int(best_sample), samples, scores
def _sample_int_loguniform(self, distribution, below, above):
# type: (distributions.IntLogUniformDistribution, np.ndarray, np.ndarray) -> int
low = distribution.low - 0.5
high = distribution.high + 0.5
sample = self._sample_numerical(low, high, below, above, is_log=True)
best_sample = (
np.round((sample - distribution.low) / distribution.step) * distribution.step
+ distribution.low
)
return int(min(max(best_sample, distribution.low), distribution.high))
def _sample_numerical(
self,
low, # type: float
high, # type: float
below, # type: np.ndarray
above, # type: np.ndarray
q=None, # type: Optional[float]
is_log=False, # type: bool
):
# type: (...) -> float
# log distribution
if is_log:
low = np.log(low)
high = np.log(high)
below = np.log(below)
above = np.log(above)
# number of ei candidates = 24
size = (self._n_ei_candidates,)
# get sigmas and sampling weights in sorted order for the good points
parzen_estimator_below = _ParzenEstimator(
mus=below, low=low, high=high, parameters=self._parzen_estimator_parameters
)
# get a sample of 24 good points
samples_below = self._sample_from_gmm(
parzen_estimator=parzen_estimator_below, low=low, high=high, q=q, size=size,
)
# log likelihoods of the sample points
log_likelihoods_below = self._gmm_log_pdf(
samples=samples_below,
parzen_estimator=parzen_estimator_below,
low=low,
high=high,
q=q,
)
# build a KDE on the bad=above points
parzen_estimator_above = _ParzenEstimator(
mus=above, low=low, high=high, parameters=self._parzen_estimator_parameters
)
# og likelihoods of the good sampled points occuring in the bad=above KDE
log_likelihoods_above = self._gmm_log_pdf(
samples=samples_below,
parzen_estimator=parzen_estimator_above,
low=low,
high=high,
q=q,
)
ret, samples, scores = TPESampler._compare(
samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above, k=self._k
)
ret = float(ret[0])
ret = math.exp(ret) if is_log else ret
# ret = float(
# TPESampler._compare(
# samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above
# )[0]
# )
return ret, samples, scores
def _sample_categorical_index(self, distribution, below, above):
# type: (distributions.CategoricalDistribution, np.ndarray, np.ndarray) -> int
# parameter values
choices = distribution.choices
# convert the good=below and bad=above values to ints
below = list(map(int, below))
above = list(map(int, above))
upper = len(choices)
# number of ei candidates = 24
size = (self._n_ei_candidates,)
# Ramp of weights, weights are smaller for trials done earlier on
weights_below = self._weights(len(below))
# Weighted count of the number of occurrences of each good hyperparameter (using int IDs)
counts_below = np.bincount(below, minlength=upper, weights=weights_below)
# Add a prior = 1 to avoid zero probability of choosing a hyperparameter
weighted_below = counts_below + self._prior_weight
# normalize
weighted_below /= weighted_below.sum()
# sample the good categorical values
samples_below = self._sample_from_categorical_dist(weighted_below, size)
# log probability of each categorical value in the sample
log_likelihoods_below = TPESampler._categorical_log_pdf(samples_below, weighted_below)
## Now same for bad points
# Ramp of weights
weights_above = self._weights(len(above))
# Weighted count of the number of occurrences of each bad hyperparameter
counts_above = np.bincount(above, minlength=upper, weights=weights_above)
# Add a prior = 1 to avoid zero probability of choosing a hyperparameter
weighted_above = counts_above + self._prior_weight
# normalize
weighted_above /= weighted_above.sum()
# log likelihood of the GOOD sample points with their probabilities of being in the bad group
log_likelihoods_above = TPESampler._categorical_log_pdf(samples_below, weighted_above)
ret, samples, scores = TPESampler._compare(
samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above, k=self._k
)
ret = int(ret[0])
samples = [distribution.choices[samples[index]] for index in range(len(samples))]
return ret, samples, scores
# return int(
# TPESampler._compare(
# samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above
# )[0]
# )
def _sample_from_gmm(
self,
parzen_estimator, # type: _ParzenEstimator
low, # type: float
high, # type: float
q=None, # type: Optional[float]
size=(), # type: Tuple
):
# type: (...) -> np.ndarray
# weights, mus, and stds sorted by increasing mus of the good points
weights = parzen_estimator.weights
mus = parzen_estimator.mus
sigmas = parzen_estimator.sigmas
weights, mus, sigmas = map(np.asarray, (weights, mus, sigmas))
if low >= high:
raise ValueError(
"The 'low' should be lower than the 'high'. "
"But (low, high) = ({}, {}).".format(low, high)
)
# weighted multinomial sample of 24 good points based on the WEIGHTS, not mean/std
active = np.argmax(self._rng.multinomial(1, weights, size=size), axis=-1)
# normalize the active points
trunc_low = (low - mus[active]) / sigmas[active]
trunc_high = (high - mus[active]) / sigmas[active]
while True:
# sample from a truncated normal dist with means and stds of the active points
samples = truncnorm.rvs(
trunc_low,
trunc_high,
size=size,
loc=mus[active],
scale=sigmas[active],
random_state=self._rng,
)
if (samples < high).all(): # why not
break
if q is None:
return samples
else:
# round the samples to ints
return np.round(samples / q) * q
def _gmm_log_pdf(
self,
samples, # type: np.ndarray
parzen_estimator, # type: _ParzenEstimator
low, # type: float
high, # type: float
q=None, # type: Optional[float]
):
# type: (...) -> np.ndarray
# weights, mus, and stds sorted by increasing mus of the good points
weights = parzen_estimator.weights
mus = parzen_estimator.mus
sigmas = parzen_estimator.sigmas
samples, weights, mus, sigmas = map(np.asarray, (samples, weights, mus, sigmas))
if samples.size == 0:
return np.asarray([], dtype=float)
if weights.ndim != 1:
raise ValueError(
"The 'weights' should be 2-dimension. "
"But weights.shape = {}".format(weights.shape)
)
if mus.ndim != 1:
raise ValueError(
"The 'mus' should be 2-dimension. " "But mus.shape = {}".format(mus.shape)
)
if sigmas.ndim != 1:
raise ValueError(
"The 'sigmas' should be 2-dimension. " "But sigmas.shape = {}".format(sigmas.shape)
)
# probability of each point times the probability it is in the
# accepted range [low, high], ie normalization constant. weights are normalized
p_accept = np.sum(
weights
* (
TPESampler._normal_cdf(high, mus, sigmas)
- TPESampler._normal_cdf(low, mus, sigmas)
)
)
if q is None:
distance = samples[..., None] - mus
mahalanobis = (distance / np.maximum(sigmas, EPS)) ** 2
Z = np.sqrt(2 * np.pi) * sigmas
coefficient = weights / Z / p_accept
return TPESampler._logsum_rows(-0.5 * mahalanobis + np.log(coefficient))
else:
cdf_func = TPESampler._normal_cdf
# bounds on the normal distribution of each sample point. This is the probability space
# that would have allowed for each sample value to be chosen
upper_bound = np.minimum(samples + q / 2.0, high)
lower_bound = np.maximum(samples - q / 2.0, low)
# probability for each sampled point
# weights[..., None] is weights reshaped from (len(weights),) to (len(weights),1), same for mus and sigmas
# upper_bound[None] are the upper_bounds reshaped from (len(upper_bound),) to (1, len(upper_bounds))
# For each sample point, we compute the probability of it occuring in each Gaussian mixture (one for each point)
# and then sum the mass. Finally we multiply by the weights of each point occuring
probabilities = np.sum(
weights[..., None]
* (
cdf_func(upper_bound[None], mus[..., None], sigmas[..., None])
- cdf_func(lower_bound[None], mus[..., None], sigmas[..., None])
),
axis=0,
)
# normalize by the probability of accepting
return np.log(probabilities + EPS) - np.log(p_accept + EPS)
def _sample_from_categorical_dist(self, probabilities, size): # weights=probabilities
# type: (np.ndarray, Tuple[int]) -> np.ndarray
# probabilities as np array
if probabilities.size == 1 and isinstance(probabilities[0], np.ndarray):
probabilities = probabilities[0]
probabilities = np.asarray(probabilities)
if size == (0,):
return np.asarray([], dtype=float)
assert len(size)
assert probabilities.ndim == 1
# n_draws = 24
n_draws = int(np.prod(size))
# draw samples from the multinomial distribution
sample = self._rng.multinomial(n=1, pvals=probabilities, size=n_draws)
assert sample.shape == size + (probabilities.size,)
# 24 categorical values selected from the multinomial sample
return_val = np.dot(sample, np.arange(probabilities.size))
return_val.shape = size
return return_val
@classmethod
def _categorical_log_pdf(
cls,
sample, # type: np.ndarray
p, # type: np.ndarray
):
# type: (...) -> np.ndarray
if sample.size:
# log probability of each sample
return np.log(np.asarray(p)[sample])
else:
return np.asarray([])
@classmethod
def _compare(cls, samples, log_l, log_g, k):
# type: (np.ndarray, np.ndarray, np.ndarray) -> np.ndarray
# good samples, good log likelihoods, bad log likelihoods
samples, log_l, log_g = map(np.asarray, (samples, log_l, log_g))
if samples.size:
# ratio of likelihoods = difference of log likelihoods
score = log_l - log_g
if samples.size != score.size:
raise ValueError(
"The size of the 'samples' and that of the 'score' "
"should be same. "
"But (samples.size, score.size) = ({}, {})".format(samples.size, score.size)
)
# this is the hyperparameter with the best expected score, can find top-k
best = np.argmax(score)
uniq_smpls, indices = np.unique(samples, return_index=True)
uniq_scores = np.exp(score[indices])
# topk=min(len(uniq_scores), 32)
k = min(k, len(uniq_smpls))
indicies = np.argpartition(uniq_scores, -k)[-k:]
uniq_smpls = uniq_smpls[indicies]
uniq_scores = uniq_scores[indicies]
# sorted_indices = np.argsort(uniq_scores)[::-1]
# uniq_smpls = uniq_smpls[sorted_indices]
# uniq_scores = uniq_scores[sorted_indices]
return np.asarray([samples[best]] * samples.size), uniq_smpls, uniq_scores #/ np.sum(uniq_scores)
else:
return np.asarray([])
@classmethod
def _logsum_rows(cls, x):
# type: (np.ndarray) -> np.ndarray
x = np.asarray(x)
m = x.max(axis=1)
return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m
@classmethod
def _normal_cdf(cls, x, mu, sigma):
# type: (float, np.ndarray, np.ndarray) -> np.ndarray
mu, sigma = map(np.asarray, (mu, sigma))
denominator = x - mu
numerator = np.maximum(np.sqrt(2) * sigma, EPS)
z = denominator / numerator
return 0.5 * (1 + scipy.special.erf(z))
@classmethod
def _log_normal_cdf(cls, x, mu, sigma):
# type: (float, np.ndarray, np.ndarray) -> np.ndarray
mu, sigma = map(np.asarray, (mu, sigma))
if x < 0:
raise ValueError("Negative argument is given to _lognormal_cdf. x: {}".format(x))
denominator = np.log(np.maximum(x, EPS)) - mu
numerator = np.maximum(np.sqrt(2) * sigma, EPS)
z = denominator / numerator
return 0.5 + 0.5 * scipy.special.erf(z)
@staticmethod
def hyperopt_parameters():
# type: () -> Dict[str, Any]
"""Return the the default parameters of hyperopt (v0.1.2).
:class:`~optuna.samplers.TPESampler` can be instantiated with the parameters returned
by this method.
Example:
Create a :class:`~optuna.samplers.TPESampler` instance with the default
parameters of `hyperopt <https://github.com/hyperopt/hyperopt/tree/0.1.2>`_.
.. testcode::
import optuna
from optuna.samplers import TPESampler
def objective(trial):
x = trial.suggest_uniform('x', -10, 10)
return x**2
sampler = TPESampler(**TPESampler.hyperopt_parameters())
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
Returns:
A dictionary containing the default parameters of hyperopt.
"""
return {
"consider_prior": True,
"prior_weight": 1.0,
"consider_magic_clip": True,
"consider_endpoints": False,
"n_startup_trials": 20,
"n_ei_candidates": 24,
"gamma": hyperopt_default_gamma,
"weights": default_weights,
}
def _get_observation_pairs(study, param_name, trial):
# type: (Study, str, FrozenTrial) -> Tuple[List[Optional[float]], List[Tuple[float, float]]]
"""Get observation pairs from the study.
This function collects observation pairs from the complete or pruned trials of the study.
The values for trials that don't contain the parameter named ``param_name`` are set to None.
An observation pair fundamentally consists of a parameter value and an objective value.
However, due to the pruning mechanism of Optuna, final objective values are not always
available. Therefore, this function uses intermediate values in addition to the final
ones, and reports the value with its step count as ``(-step, value)``.
Consequently, the structure of the observation pair is as follows:
``(param_value, (-step, value))``.
The second element of an observation pair is used to rank observations in
``_split_observation_pairs`` method (i.e., observations are sorted lexicographically by
``(-step, value)``).
"""
sign = 1
if study.direction == StudyDirection.MAXIMIZE:
sign = -1
values = []
scores = []
for trial in study.get_trials(deepcopy=False):
if trial.state is TrialState.COMPLETE and trial.value is not None:
score = (-float("inf"), sign * trial.value)
elif trial.state is TrialState.PRUNED:
if len(trial.intermediate_values) > 0:
step, intermediate_value = max(trial.intermediate_values.items())
if math.isnan(intermediate_value):
score = (-step, float("inf"))
else:
score = (-step, sign * intermediate_value)
else:
score = (float("inf"), 0.0)
else:
continue
param_value = None # type: Optional[float]
if param_name in trial.params:
distribution = trial.distributions[param_name]
param_value = distribution.to_internal_repr(trial.params[param_name])
values.append(param_value)
scores.append(score)
return values, scores
| 29,882
| 39.99177
| 124
|
py
|
BOExplain
|
BOExplain-main/boexplain/optuna/optuna/samplers/tpe/__init__.py
|
# from optuna.samplers.tpe.sampler import TPESampler # NOQA
from .sampler import TPESampler # NOQA
| 100
| 49.5
| 60
|
py
|
BOExplain
|
BOExplain-main/boexplain/files/stats.py
|
from typing import Any
import re
import random
import numpy as np
import pandas as pd
import altair as alt
alt.data_transformers.disable_max_rows()
from json import dumps
from numpyencoder import NumpyEncoder
class Experiment:
experiments = dict()
n_exp = 0
def __init__(
self,
num_cols,
cat_cols,
direction,
n_trials,
runs,
correct_pred,
name,
file,
num_cols_range,
cat_cols_n_uniq,
dataset_length,
runtime,
increment,
use_seeds_from_paper,
):
self.num_cols = num_cols
self.cat_cols = cat_cols
self.direction = direction
self.dir_enc = 1 if direction == "minimize" else -1
self.n_trials = n_trials
self.runs = runs
self.correct_pred = correct_pred
self.name = name
self.file = file
self.num_cols_range = num_cols_range
self.cat_cols_n_uniq = cat_cols_n_uniq
self.dataset_length = dataset_length
self.runtime = runtime
self.increment = increment
if use_seeds_from_paper:
self.seeds = [
529840,
664234,
978546,
283991,
819362,
348229,
536289,
480291,
500927,
386602,
]
else:
self.seeds = random.sample(range(1000000), runs)
def set_experiment(self, results) -> None:
self.experiments[self.n_exp] = results.__dict__.copy()
self.n_exp += 1
def output_file(self):
fo = open(self.file, "w")
for v in self.experiments.values():
fo.write(f"{dumps(v, cls=NumpyEncoder)}\n")
fo.close()
def visualize_results(self):
df = pd.DataFrame({}, columns=["Algorithm", "Iteration", "Value"])
for i in range(len(self.experiments)):
df_new = pd.DataFrame.from_dict(
{
"Algorithm": self.experiments[i]["cat_enc"],
"Iteration": list(range(self.experiments[i]["n_trials"])),
"Value": self.experiments[i]["opt_res"],
},
orient="index",
).T
df = df.append(df_new)
df = df.explode("Value")
df = df.set_index(["Algorithm"]).apply(pd.Series.explode).reset_index()
num_cols = f"{len(self.experiments[0]['num_cols'])} numerical columns: "
for i, col in enumerate(self.experiments[0]["num_cols"]):
num_cols += f"{col} (range {self.experiments[0]['num_cols_range'][i][0]} to {self.experiments[0]['num_cols_range'][i][1]}), "
cat_cols = f"{len(self.experiments[0]['cat_cols'])} categorical columns: "
for i, col in enumerate(self.experiments[0]["cat_cols"]):
cat_cols += f"{col} ({self.experiments[0]['cat_cols_n_uniq'][i]} unique values), "
out_str = f"Experiment: {self.experiments[0]['name']}. Completed {self.experiments[0]['n_trials']} iterations for {self.experiments[0]['runs']} runs. Search space includes "
if len(self.experiments[0]["num_cols"]) > 0:
out_str += num_cols
if len(self.experiments[0]["cat_cols"]) > 0:
out_str += "and "
if len(self.experiments[0]["cat_cols"]) > 0:
out_str += cat_cols
out_str = f"{out_str[:-2]}."
out_lst = [line.strip() for line in re.findall(r".{1,80}(?:\s+|$)", out_str)]
line = (
alt.Chart(df)
.mark_line()
.encode(
x="Iteration",
y=alt.Y("mean(Value)", scale=alt.Scale(zero=False)),
color="Algorithm",
)
.properties(title=out_lst) # {"text": out_lst, "subtitle": ""}
)
band = (
alt.Chart(df)
.mark_errorband(extent="stdev")
.encode(
x="Iteration",
y=alt.Y("Value", title="Mean Objective Function Value"),
color="Algorithm",
)
)
chart = band + line
chart = chart.configure_title(
anchor="start",
)
return chart
class Stats(Experiment):
def __init__(self, experiment, cat_enc) -> None:
self.__dict__ = experiment.__dict__
self.cat_enc = cat_enc
self.run_times = np.zeros(self.runs)
self.n_duplicates = np.zeros(self.runs)
self.n_zero_tup_preds = np.zeros(self.runs)
self.preds = dict()
self.opt_res = np.full((self.runs, self.n_trials), self.dir_enc * 1e9)
self.run_time_of_opt_res = np.zeros((self.runs, self.n_trials))
self.iter_completed = np.zeros(self.runs)
self.min_iter_completed = self.n_trials
self.n_tuples_removed_from_data = np.zeros(self.runs)
self.best_obj_values = np.full(self.runs, self.dir_enc * 1e9)
self.add_on = np.zeros(self.runs)
if self.correct_pred:
self.precision = np.zeros((self.runs, self.n_trials))
self.recall = np.zeros((self.runs, self.n_trials))
self.f_score = np.zeros((self.runs, self.n_trials))
self.jaccard = np.zeros((self.runs, self.n_trials))
self.final_precision = np.zeros(self.runs)
self.final_recall = np.zeros(self.runs)
self.final_f_score = np.zeros(self.runs)
self.final_jaccard = np.zeros(self.runs)
self.encoding_time = 0
self.example_best_predicate = None
self.time_array = np.zeros((self.runs, self.runtime // self.increment))
self.precision_time_array = np.zeros((self.runs, self.runtime // self.increment))
self.recall_time_array = np.zeros((self.runs, self.runtime // self.increment))
self.f_score_time_array = np.zeros((self.runs, self.runtime // self.increment))
self.jaccard_time_array = np.zeros((self.runs, self.runtime // self.increment))
def get_run_opt_res_array(self) -> np.ndarray:
return np.full(self.n_trials, self.dir_enc * 1e9)
def get_run_time_array(self) -> np.ndarray:
return np.zeros(self.runtime // self.increment)
def get_run_time_of_opt_res_array(self) -> np.ndarray:
return np.zeros(self.n_trials)
def set_run_encoding_time(self, run_encoding_time):
self.encoding_time = run_encoding_time
def set_run_opt_res(self, run_opt_res: np.ndarray, run: int) -> None:
self.opt_res[run] = run_opt_res
def set_run_time_array(self, run_time_array: np.ndarray, run: int) -> None:
self.time_array[run] = run_time_array
def set_precision_time_array(self, precision_time_array: np.ndarray, run: int) -> None:
self.precision_time_array[run] = precision_time_array
def set_recall_time_array(self, recall_time_array: np.ndarray, run: int) -> None:
self.recall_time_array[run] = recall_time_array
def set_f_score_time_array(self, f_score_time_array: np.ndarray, run: int) -> None:
self.f_score_time_array[run] = f_score_time_array
def set_jaccard_time_array(self, jaccard_time_array: np.ndarray, run: int) -> None:
self.jaccard_time_array[run] = jaccard_time_array
def set_run_time_of_opt_res(self, run_time_opt_res: np.ndarray, run: int) -> None:
self.run_time_of_opt_res[run] = run_time_opt_res
def set_run_time(self, run_time: float, run: int) -> None:
self.run_times[run] = run_time
def set_add_on(self, add_on: float, run: int) -> None:
self.add_on[run] = add_on
def set_run_n_duplicates(self, run_n_dups: float, run: int) -> None:
self.n_duplicates[run] = run_n_dups
def set_run_n_zero_tup_preds(self, run_n_zero_tup_preds: float, run: int) -> None:
self.n_zero_tup_preds[run] = run_n_zero_tup_preds
def set_run_preds(self, best_pred: dict[Any], run: int) -> None:
self.preds[run] = best_pred
def set_run_iter_completed(self, n_iter: int, run) -> None:
self.iter_completed[run] = n_iter
def set_run_best_objective_value(self, obj_value: int, run) -> None:
self.best_obj_values[run] = obj_value
def set_example_best_predicate(self, best_pred: dict[Any], run) -> None:
if self.direction == "minimize":
if self.best_obj_values[run] == self.best_obj_values.min():
self.example_best_predicate = best_pred
else:
if self.best_obj_values[run] == self.best_obj_values.max():
self.example_best_predicate = best_pred
def set_min_iter_completed(self, n_iter: int) -> None:
if n_iter < self.min_iter_completed:
self.min_iter_completed = n_iter
def set_run_n_tuples_removed_from_data(self, num_removed: int, run: int):
self.n_tuples_removed_from_data[run] = num_removed
def set_final_precision(self, precision: float, run: int) -> None:
self.final_precision[run] = precision
def set_final_recall(self, recall: float, run: int) -> None:
self.final_recall[run] = recall
def set_final_f_score(self, f_score: float, run: int) -> None:
self.final_f_score[run] = f_score
def set_final_jaccard(self, jaccard: float, run: int) -> None:
self.final_jaccard[run] = jaccard
def set_precision(self, precision: np.ndarray, run: int) -> None:
self.precision[run] = precision
def set_recall(self, recall: np.ndarray, run: int) -> None:
self.recall[run] = recall
def set_f_score(self, f_score: np.ndarray, run: int) -> None:
self.f_score[run] = f_score
def set_jaccard(self, jaccard: np.ndarray, run: int) -> None:
self.jaccard[run] = jaccard
def output_temp_file(self) -> None:
fo = open("temp.json", "w")
fo.write(f"{dumps(self.__dict__, cls=NumpyEncoder)}\n")
fo.close()
def standard_output(self) -> None:
print("BEST SCORE", self.best_obj_values)
print("AVERAGE NUMBER OF TUPLES REMOVED", self.n_tuples_removed_from_data.mean())
print("AVERAGE TIME", self.run_times.mean())
print("AVERAGE DUPLICATE COUNT", self.n_duplicates.mean())
print("AVERAGE ZERO TUPLE", self.n_zero_tup_preds.mean(), "\n")
| 10,326
| 31.784127
| 181
|
py
|
BOExplain
|
BOExplain-main/boexplain/files/__init__.py
|
from .search import fmin, fmax
__all__ = ["fmin", "fmax"]
| 58
| 18.666667
| 30
|
py
|
BOExplain
|
BOExplain-main/boexplain/files/search.py
|
import time
from pandas.api.types import is_numeric_dtype
from .cat_xform import individual_contribution
from .tpe_wrapper import TpeBo
from .stats import Experiment, Stats
CAT_ALG_MAP = {
"individual_contribution": "individual_contribution_warm_start_top1",
"categorical": "categorical",
"categorical_warm_start": "categorical_warm_start",
}
def fmin(
data,
f,
num_cols=[],
cat_cols=[],
columns=[],
cat_alg=["individual_contribution"],
n_trials=2000,
runtime=10000,
runs=1,
k=5,
random=False,
correct_pred=None,
increment=5,
name="experiment_name",
file=None,
return_viz=False,
use_seeds_from_paper=False,
**kwargs,
):
"""
Use BOExplain to minimize the objective function.
Parameters
----------
data
pandas DataFrame of source, training, or inference data
from which to derive an explanation.
f
Objective function to be minimized.
num_cols
Numerical columns over which to derive an explanation.
cat_cols
Categorical columns over which to derive an explanation.
columns
Columns over which to derive an explanation.
cat_alg
Algorithms to handle categorical parameters. Can be
* 'individual_contribution'
* 'categorical'
* 'categorical_warm_start'
See the paper for details.
n_trials
Maximum number of trials to perform during a run.
runtime
Maximum allowed time for a run in seconds.
runs
Number of runs to perform.
k
Number of TPE candidates to consider. (deprecated)
random
If True, perform a run using random search to
find the constraint parameters.
correct_pred
If provided, will compute f-score, precision, recall,
and jaccard similarity of the found predicates and
the correct predicate
increment
How frequently (in seconds) to log results when finding the best
result in each increment.
name
The name of an experiment.
file
File name to output statistics from the run.
return_viz
If True, return an Altair visualization of the objective function
with iteration on the x-axis.
use_seeds_from_paper
If True, use the seeds that were used in the paper. For reproducibility.
Returns
-------
The input DataFrame filtered to contain all tuples that do not
satisfy the explanation
"""
return _drop_tuples_satisfying_optimal_predicate(
data,
f,
num_cols,
cat_cols,
columns,
cat_alg,
n_trials,
runtime,
runs,
k,
random,
correct_pred,
increment,
name,
file,
return_viz,
use_seeds_from_paper,
direction="minimize",
**kwargs,
)
def fmax(
data,
f,
num_cols=[],
cat_cols=[],
columns=[],
cat_alg=["individual_contribution"],
n_trials=2000,
runtime=10000,
runs=1,
k=5,
random=False,
correct_pred=None,
increment=5,
name="experiment_name",
file=None,
return_viz=False,
use_seeds_from_paper=False,
**kwargs,
):
"""
Use BOExplain to maximize the objective function.
Parameters
----------
data
pandas DataFrame of source, training, or inference data
from which to derive an explanation.
f
Objective function to be minimized.
num_cols
Numerical columns over which to derive an explanation.
cat_cols
Categorical columns over which to derive an explanation.
columns
Columns over which to derive an explanation.
cat_alg
Algorithms to handle categorical parameters. Can be
* 'individual_contribution'
* 'categorical'
* 'categorical_warm_start'
See the paper for details.
n_trials
Maximum number of trials to perform during a run.
runtime
Maximum allowed time for a run in seconds.
runs
Number of runs to perform.
k
Number of TPE candidates to consider. (deprecated)
random
If True, perform a run using random search to
find the constraint parameters.
correct_pred
If provided, will compute f-score, precision, recall,
and jaccard similarity of the found predicates and
the correct predicate
increment
How frequently (in seconds) to log results when finding the best
result in each increment.
name
The name of an experiment.
file
File name to output statistics from the run.
return_viz
If True, return an Altair visualization of the objective function
with iteration on the x-axis.
use_seeds_from_paper
If True, use the seeds that were used in the paper. For reproducibility.
Returns
-------
The input DataFrame filtered to contain all tuples that do not
satisfy the explanation
"""
return _drop_tuples_satisfying_optimal_predicate(
data,
f,
num_cols,
cat_cols,
columns,
cat_alg,
n_trials,
runtime,
runs,
k,
random,
correct_pred,
increment,
name,
file,
return_viz,
use_seeds_from_paper,
direction="maximize",
**kwargs,
)
def _drop_tuples_satisfying_optimal_predicate(
data,
f,
num_cols=[],
cat_cols=[],
columns=[],
cat_alg=["individual_contribution"],
n_trials=2000,
runtime=10000,
runs=1,
k=5,
random=False,
correct_pred=None,
increment=5,
name="experiment_name",
file=None,
return_viz=False,
use_seeds_from_paper=False,
direction="minimize",
**kwargs,
):
assert direction == "minimize" or direction == "maximize"
for col in columns:
if is_numeric_dtype(data[col]):
num_cols.append(col)
else:
cat_cols.append(col)
# cast categorical columns as string type
if cat_cols:
data[cat_cols] = data[cat_cols].astype(str)
# get the nuber of unique values in each column
num_cols_range = [(data[col].min(), data[col].max()) for col in num_cols]
cat_cols_n_uniq = [data[col].nunique() for col in cat_cols]
# dataset length
dataset_length = len(data)
experiment = Experiment(
num_cols,
cat_cols,
direction,
n_trials,
runs,
correct_pred,
name,
file,
num_cols_range,
cat_cols_n_uniq,
dataset_length,
runtime,
increment,
use_seeds_from_paper,
)
cat_alg = [CAT_ALG_MAP[alg] for alg in cat_alg]
for alg in cat_alg:
stats = Stats(experiment, alg)
cat_val_to_indiv_cont = {}
if cat_cols and alg in {
"individual_contribution_warm_start_topk",
"categorical_warm_start",
"individual_contribution_warm_start_top1",
}:
start = time.time()
# encode categorical columns as numerical and record their encoding maps
cat_val_to_indiv_cont = individual_contribution(
data,
objective=f,
cat_cols=cat_cols,
**kwargs,
)
run_encoding_time = time.time() - start
# print(alg, run_encoding_time)
stats.set_run_encoding_time(run_encoding_time)
# initialize a TpeBo object
tpebo = TpeBo(
df=data,
objective=f,
num_cols=num_cols,
cat_cols=cat_cols,
direction=direction,
k=k,
cat_alg=alg,
cat_val_to_indiv_cont=cat_val_to_indiv_cont,
correct_pred=correct_pred,
)
# run the bayesian optimization
df_rem = tpebo.run(stats, **kwargs)
experiment.set_experiment(stats)
if random:
stats = Stats(experiment, None)
tpebo = TpeBo(
df=data,
objective=f,
num_cols=num_cols,
cat_cols=cat_cols,
direction=direction,
k=k,
cat_alg="random",
cat_val_to_indiv_cont={},
correct_pred=correct_pred,
)
df_rem = tpebo.random(stats, **kwargs)
experiment.set_experiment(stats)
viz = experiment.visualize_results()
if file is not None:
experiment.output_file()
if return_viz:
viz = experiment.visualize_results()
return df_rem, viz
return df_rem
| 8,685
| 24.698225
| 84
|
py
|
BOExplain
|
BOExplain-main/boexplain/files/cat_xform.py
|
import pandas as pd
import numpy as np
def individual_contribution(df, objective, cat_cols, **kwargs):
# dictionary of dictionaries, one dictionary for each column
# dictinary keys are the categorical values and the values are the individual contribution
# for each value in the column, compute the individual contribution of that column
# ie, remove tuples satisfying the single-clause predicate 'col=val',
# and compute the objective function with this data
cat_val_to_indiv_cont = {
col: {val: objective(df[df[col] != val], **kwargs) for val in df[col].unique()}
for col in cat_cols
}
return cat_val_to_indiv_cont
| 669
| 36.222222
| 94
|
py
|
BOExplain
|
BOExplain-main/boexplain/files/tpe_wrapper.py
|
import time
from itertools import product
import numpy as np
from ..optuna.optuna.samplers import tpe
from ..optuna.optuna.study import create_study
class TpeBo(object):
def __init__(
self,
df,
objective,
num_cols,
cat_cols,
direction,
k,
cat_alg,
cat_val_to_indiv_cont,
correct_pred,
):
self.df = df
self.objective = objective
self.num_cols = num_cols
self.cat_cols = cat_cols
self.dir_enc = 1 if direction == "minimize" else -1
self.direction = direction
self.k = k
self.cat_alg = cat_alg
self.correct_pred = correct_pred
self.cat_val_to_indiv_cont = cat_val_to_indiv_cont
self.n_warm_up = 10
# domains or ranges of the categorical and numerical columns
self.dom = {f"{c}_{str(f)[19:22]}": f(df[c]) for c in num_cols for f in {min, max}}
if "individual_contribution_warm_start" in cat_alg and cat_cols:
self.dom.update({f"{c}_min": 0 for c in cat_cols})
self.dom.update({f"{c}_max": len(cat_val_to_indiv_cont[c]) - 1 for c in cat_cols})
else:
self.dom.update({c: list(df[c].dropna().unique()) for c in cat_cols})
if "individual_contribution_warm_start" in cat_alg and cat_cols:
self.code_to_cat_val, self.cat_val_to_code = {}, {}
for col in cat_cols:
ordered = dict(sorted(cat_val_to_indiv_cont[col].items(), key=lambda x: x[1]))
self.code_to_cat_val[col] = dict(zip(range(len(ordered)), list(ordered.keys())))
self.cat_val_to_code[col] = dict(zip(list(ordered.keys()), range(len(ordered))))
if cat_cols:
preds = self.df.groupby(list(cat_cols)).size().index
if len(self.cat_cols) == 1:
preds = [(pred,) for pred in preds]
self.cat_preds = dict(zip(range(len(preds)), preds))
self.cat_preds_set = set(preds)
# for random
self.uniq_cat_vals = {
c: dict(zip(range(df[c].nunique()), df[c].unique())) for c in cat_cols
}
if (
cat_alg
in {
"individual_contribution_warm_start_topk",
"categorical_warm_start",
"individual_contribution_warm_start_top1",
}
and cat_cols
):
grp_cont = [
(
grp,
sum([cat_val_to_indiv_cont[col][val] for col, val in zip(cat_cols, grp)]),
)
for grp in self.cat_preds_set
]
grp_cont = sorted(grp_cont, key=lambda x: x[1], reverse=(direction == "maximize"))
self.good_preds = [grp for (grp, _) in grp_cont[: self.n_warm_up]]
if self.correct_pred:
self.df_true = self.tuples_satisfying_pred(correct_pred)
def candidates_to_pred(self, cnds):
while self.run_stats["iter_cnt"] < self.n_warm_up:
pred = {}
if self.cat_cols:
if self.cat_alg in {
"individual_contribution_warm_start_topk",
"categorical_warm_start",
"individual_contribution_warm_start_top1",
}:
pred.update(zip(self.cat_cols, self.good_preds[self.run_stats["iter_cnt"]]))
else:
pred.update(
zip(self.cat_cols, self.cat_preds[np.random.randint(len(self.cat_preds))])
)
if not self.num_cols: # don't do a data scan with only categorical columns
return pred
for col in self.num_cols:
if np.issubdtype(self.df[col].dtype, np.signedinteger) or np.issubdtype(
self.df[col].dtype, np.unsignedinteger
):
pred[f"{col}_min"] = np.random.randint(
cnds[f"{col}_min_dist"].low, cnds[f"{col}_min_dist"].high
)
pred[f"{col}_len"] = np.random.randint(
0, cnds[f"{col}_len_dist"].high - cnds[f"{col}_len_dist"].low
)
elif np.issubdtype(self.df[col].dtype, np.floating):
pred[f"{col}_min"] = np.random.uniform(
cnds[f"{col}_min_dist"].low, cnds[f"{col}_min_dist"].high
)
pred[f"{col}_len"] = np.random.uniform(
0, cnds[f"{col}_len_dist"].high - cnds[f"{col}_len_dist"].low
)
if self.is_valid(pred):
return pred
# all combinations of predicates
preds = list(product(*[cnds[f"{name}_smpls"] for name in cnds["names"]]))
# normalize
for n in cnds["names"]:
cnds[f"{n}_scrs"] = [scr / sum(cnds[f"{n}_scrs"]) for scr in cnds[f"{n}_scrs"]]
# all multiplied combinations of scores
scrs = [np.prod(scrs) for scrs in product(*[cnds[f"{n}_scrs"] for n in cnds["names"]])]
# sorted list of (pred, score)
pred_lst = sorted(list(zip(preds, scrs)), key=lambda x: x[1], reverse=True)
for pred, _ in pred_lst:
if self.cat_cols:
if self.cat_alg == "individual_contribution_warm_start_topk":
pred = (
tuple(
[
self.code_to_cat_val[col][val]
for col, val in zip(self.cat_cols, pred[: len(self.cat_cols)])
]
)
+ pred[len(self.cat_cols) :]
)
if pred[: len(self.cat_cols)] not in self.cat_preds_set:
continue
pred = dict(zip(cnds["names"], pred))
if self.is_valid(pred):
return pred
print("Random predicate")
while True:
pred = {}
if self.cat_cols:
pred.update(
zip(self.cat_cols, self.cat_preds[np.random.randint(len(self.cat_cols))])
)
if not self.num_cols:
return pred
for col in self.num_cols:
if np.issubdtype(self.df[col].dtype, np.signedinteger) or np.issubdtype(
self.df[col].dtype, np.unsignedinteger
):
pred[f"{col}_min"] = np.random.randint(
cnds[f"{col}_min_dist"].low, cnds[f"{col}_min_dist"].high
)
pred[f"{col}_len"] = np.random.randint(
0, cnds[f"{col}_len_dist"].high - cnds[f"{col}_len_dist"].low
)
elif np.issubdtype(self.df[col].dtype, np.floating):
pred[f"{col}_min"] = np.random.uniform(
cnds[f"{col}_min_dist"].low, cnds[f"{col}_min_dist"].high
)
pred[f"{col}_len"] = np.random.uniform(
0, cnds[f"{col}_len_dist"].high - cnds[f"{col}_len_dist"].low
)
if self.is_valid(pred):
return pred
def objective_wrapper(self, trial, **kwargs):
if time.time() - self.add_on - self.start >= (self.step + 1) * self.incr:
self.time_array[self.step] = self.opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
self.precision_time_array[self.step] = self.run_stats["precision"][
self.run_stats["iter_cnt"] - 1
]
self.recall_time_array[self.step] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
self.f_score_time_array[self.step] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
self.jaccard_time_array[self.step] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
self.step += 1
cnds = {"names": []} # candidate predicate constraint values dict
dom = self.dom # parameter domains
pred = {}
for c in self.cat_cols:
if self.cat_alg in {"categorical", "categorical_warm_start", "categorical_topk"}:
param_cnds = trial.suggest_categorical(c, dom[c])
elif "individual_contribution_warm_start" in self.cat_alg:
param_cnds = trial.suggest_int(c, dom[f"{c}_min"], dom[f"{c}_max"])
cnds.update(zip([f"{c}_{x}" for x in ("smpls", "scrs", "dist")], param_cnds[2:5]))
cnds["names"].append(param_cnds[1])
pred[c] = param_cnds[0]
for c in self.num_cols:
if np.issubdtype(self.df[c].dtype, np.signedinteger) or np.issubdtype(
self.df[c].dtype, np.unsignedinteger
):
min_cnds = trial.suggest_int(f"{c}_min", dom[f"{c}_min"], dom[f"{c}_max"])
len_cnds = trial.suggest_int(f"{c}_len", 0, dom[f"{c}_max"] - dom[f"{c}_min"])
elif np.issubdtype(self.df[c].dtype, np.floating):
min_cnds = trial.suggest_uniform(f"{c}_min", dom[f"{c}_min"], dom[f"{c}_max"])
len_cnds = trial.suggest_uniform(f"{c}_len", 0, dom[f"{c}_max"] - dom[f"{c}_min"])
cnds.update(zip([f"{c}_min_{x}" for x in ("smpls", "scrs", "dist")], min_cnds[2:5]))
cnds.update(zip([f"{c}_len_{x}" for x in ("smpls", "scrs", "dist")], len_cnds[2:5]))
cnds["names"].extend([min_cnds[1], len_cnds[1]])
pred.update({f"{c}_min": min_cnds[0], f"{c}_len": len_cnds[0]})
if self.cat_cols and (
self.cat_alg == "categorical_topk"
or self.cat_alg == "individual_contribution_warm_start_topk"
or self.cat_alg == "individual_contribution_warm_start_top1"
and self.run_stats["iter_cnt"] < self.n_warm_up
or self.cat_alg == "categorical_warm_start"
and self.run_stats["iter_cnt"] < self.n_warm_up
):
# begin_metric_comps = time.time()
pred = self.candidates_to_pred(cnds)
# self.add_on += time.time() - begin_metric_comps
if self.cat_cols and (
self.cat_alg == "individual_contribution_warm_start_topk"
or self.cat_alg == "individual_contribution_warm_start_top1"
and self.run_stats["iter_cnt"] < self.n_warm_up
):
trial.update_params(
{
k: (self.cat_val_to_code[k][v] if k in self.cat_cols else v)
for k, v in pred.items()
}
)
else:
trial.update_params(pred)
if (
self.cat_cols
and self.cat_alg == "individual_contribution_warm_start_top1"
and self.run_stats["iter_cnt"] >= self.n_warm_up
):
pred = {
k: (self.code_to_cat_val[k][v] if k in self.cat_cols else v)
for k, v in pred.items()
}
# keep track of repeated values
if (dpl_check := tuple(pred.items())) not in self.run_stats["dpls"]:
self.run_stats["dpls"].add(dpl_check)
else:
self.run_stats["dpl_cnt"] += 1
# print(pred)
df_rem_pred = self.drop_tuples_satisfying_pred(pred)
# if no tuples satisfy the predicate, return. This should never execute
if len(df_rem_pred) == len(self.df):
self.run_stats["zero_tup_cnt"] += 1
self.opt_res[self.run_stats["iter_cnt"]] = self.opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
self.run_stats["precision"][self.run_stats["iter_cnt"]] = self.run_stats[
"precision"
][self.run_stats["iter_cnt"] - 1]
self.run_stats["recall"][self.run_stats["iter_cnt"]] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
self.run_stats["iter_cnt"] += 1
return self.dir_enc * 1e9
result = self.objective(df_rem_pred, **kwargs)
# record results
if (
self.dir_enc == 1
and result < self.opt_res.min()
or self.dir_enc == -1
and result > self.opt_res.max()
):
self.opt_res[self.run_stats["iter_cnt"]] = result
self.run_stats["best_pred"] = pred
else:
self.opt_res[self.run_stats["iter_cnt"]] = self.opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
begin_metric_comps = time.time()
df_only_pred = self.tuples_satisfying_pred(pred)
if (prec := self.precision(df_only_pred)) >= self.run_stats["precision"].max():
self.run_stats["precision"][self.run_stats["iter_cnt"]] = prec
else:
self.run_stats["precision"][self.run_stats["iter_cnt"]] = self.run_stats[
"precision"
][self.run_stats["iter_cnt"] - 1]
if (recall := self.recall(df_only_pred)) >= self.run_stats["recall"].max():
self.run_stats["recall"][self.run_stats["iter_cnt"]] = recall
else:
self.run_stats["recall"][self.run_stats["iter_cnt"]] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
if (f_score := self.f_score(df_only_pred)) >= self.run_stats["f_score"].max():
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = f_score
else:
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
if (jaccard := self.jaccard(df_only_pred)) >= self.run_stats["jaccard"].max():
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = jaccard
else:
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
self.add_on += time.time() - begin_metric_comps
self.study.add_on = self.add_on
self.run_stats["iter_cnt"] += 1
return result
def run(self, stats, **kwargs):
for i in range(stats.runs):
self.new_run_stats(stats.n_trials)
self.opt_res = stats.get_run_opt_res_array()
np.random.seed(stats.seeds[i])
self.incr = stats.increment
self.time_array = stats.get_run_time_array()
self.precision_time_array = stats.get_run_time_array()
self.recall_time_array = stats.get_run_time_array()
self.f_score_time_array = stats.get_run_time_array()
self.jaccard_time_array = stats.get_run_time_array()
self.step = int(stats.encoding_time // self.incr)
self.time_array[: self.step] = np.nan
self.precision_time_array[: self.step] = np.nan
self.recall_time_array[: self.step] = np.nan
self.f_score_time_array[: self.step] = np.nan
self.jaccard_time_array[: self.step] = np.nan
self.add_on = 0
# minimize the objective over the space
self.study = create_study(
sampler=tpe.TPESampler(seed=stats.seeds[i], k=self.k),
direction=self.direction,
seed=stats.seeds[i],
)
self.start = time.time() - stats.encoding_time
self.study.optimize(
self.objective_wrapper,
n_trials=stats.n_trials,
timeout=stats.runtime - stats.encoding_time,
**kwargs,
)
stats.set_run_time(time.time() - self.start, i)
self.time_array[len(self.time_array) - 1] = self.opt_res[
self.run_stats["iter_cnt"] - 1
]
self.precision_time_array[len(self.precision_time_array) - 1] = self.run_stats[
"precision"
][self.run_stats["iter_cnt"] - 1]
self.recall_time_array[len(self.recall_time_array) - 1] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
self.f_score_time_array[len(self.f_score_time_array) - 1] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
self.jaccard_time_array[len(self.jaccard_time_array) - 1] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
output = []
for col in self.cat_cols:
output.append(f"{col} = \"{self.run_stats['best_pred'][col]}\"")
for col in self.num_cols:
minv, leng = (
self.run_stats["best_pred"][f"{col}_min"],
self.run_stats["best_pred"][f"{col}_len"],
)
maxv = min(minv + leng, self.df[col].max())
output.append(f"{minv} <= {col} <= {maxv}")
print("Predicate:", " and ".join(output))
best_params = self.run_stats["best_pred"]
stats.set_run_n_duplicates(self.run_stats["dpl_cnt"], i)
stats.set_run_n_zero_tup_preds(self.run_stats["zero_tup_cnt"], i)
stats.set_run_opt_res(self.opt_res, i)
stats.set_run_preds(best_params, i)
stats.set_run_iter_completed(self.run_stats["iter_cnt"], i)
stats.set_min_iter_completed(self.run_stats["iter_cnt"])
stats.set_run_best_objective_value(self.study.best_value, i)
stats.set_example_best_predicate(best_params, i)
df_rem_pred = self.drop_tuples_satisfying_pred(best_params)
stats.set_run_n_tuples_removed_from_data(len(self.df) - len(df_rem_pred), i)
if self.correct_pred:
df_only_pred = self.tuples_satisfying_pred(best_params)
stats.set_precision(self.run_stats["precision"], i)
stats.set_recall(self.run_stats["recall"], i)
stats.set_f_score(self.run_stats["f_score"], i)
stats.set_jaccard(self.run_stats["jaccard"], i)
stats.set_final_precision(self.precision(df_only_pred), i)
stats.set_final_recall(self.recall(df_only_pred), i)
stats.set_final_f_score(self.f_score(df_only_pred), i)
stats.set_final_jaccard(self.jaccard(df_only_pred), i)
stats.set_precision_time_array(self.precision_time_array, i)
stats.set_recall_time_array(self.recall_time_array, i)
stats.set_f_score_time_array(self.f_score_time_array, i)
stats.set_jaccard_time_array(self.jaccard_time_array, i)
stats.set_run_time_array(self.time_array, i)
stats.set_add_on(self.add_on, i)
# stats.output_temp_file()
# stats.standard_output()
return self.drop_tuples_satisfying_pred(stats.example_best_predicate)
def random(self, stats, **kwargs):
for i in range(stats.runs):
self.new_run_stats(stats.n_trials)
opt_res = stats.get_run_opt_res_array()
np.random.seed(stats.seeds[i])
best_pred, best_value = None, None
# time for averaging
start = time.time()
add_on = 0 # time for computing the metrics
incr, step = stats.increment, 0
time_array = stats.get_run_time_array()
precision_time_array = stats.get_run_time_array()
recall_time_array = stats.get_run_time_array()
f_score_time_array = stats.get_run_time_array()
jaccard_time_array = stats.get_run_time_array()
for _ in range(stats.n_trials):
if time.time() - add_on - start >= (step + 1) * incr:
time_array[step] = opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
precision_time_array[step] = self.run_stats["precision"][
self.run_stats["iter_cnt"] - 1
]
recall_time_array[step] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
f_score_time_array[step] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
jaccard_time_array[step] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
step += 1
if step >= len(time_array):
break
pred = {}
for col in self.cat_cols:
pred[col] = self.uniq_cat_vals[col][
np.random.randint(len(self.uniq_cat_vals[col]))
]
for col in self.num_cols:
if np.issubdtype(self.df[col].dtype, np.signedinteger) or np.issubdtype(
self.df[col].dtype, np.unsignedinteger
):
pred[f"{col}_min"] = np.random.randint(
self.dom[f"{col}_min"], self.dom[f"{col}_max"]
)
pred[f"{col}_len"] = np.random.randint(
0, self.dom[f"{col}_max"] - self.dom[f"{col}_min"]
)
elif np.issubdtype(self.df[col].dtype, np.floating):
pred[f"{col}_min"] = np.random.uniform(
self.dom[f"{col}_min"], self.dom[f"{col}_max"]
)
pred[f"{col}_len"] = np.random.uniform(
0, self.dom[f"{col}_max"] - self.dom[f"{col}_min"]
)
if (dpl_check := tuple(pred.items())) not in self.run_stats["dpls"]:
self.run_stats["dpls"].add(dpl_check)
else:
self.run_stats["dpl_cnt"] += 1
df_rem_pred = self.drop_tuples_satisfying_pred(pred)
if len(df_rem_pred) == len(self.df):
self.run_stats["zero_tup_cnt"] += 1
opt_res[self.run_stats["iter_cnt"]] = opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
self.run_stats["precision"][self.run_stats["iter_cnt"]] = self.run_stats[
"precision"
][self.run_stats["iter_cnt"] - 1]
self.run_stats["recall"][self.run_stats["iter_cnt"]] = self.run_stats[
"recall"
][self.run_stats["iter_cnt"] - 1]
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = self.run_stats[
"f_score"
][self.run_stats["iter_cnt"] - 1]
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = self.run_stats[
"jaccard"
][self.run_stats["iter_cnt"] - 1]
self.run_stats["iter_cnt"] += 1
continue
result = self.objective(df_rem_pred, **kwargs)
# record results
if (
self.dir_enc == 1
and result < opt_res[self.run_stats["iter_cnt"] - 1]
or self.dir_enc == -1
and result > opt_res[self.run_stats["iter_cnt"] - 1]
):
opt_res[self.run_stats["iter_cnt"]] = result
best_pred, best_value = pred, result
else:
opt_res[self.run_stats["iter_cnt"]] = opt_res[self.run_stats["iter_cnt"] - 1]
if self.correct_pred:
begin_metrics_comps = time.time()
df_only_pred = self.tuples_satisfying_pred(pred)
if (prec := self.precision(df_only_pred)) >= self.run_stats["precision"].max():
self.run_stats["precision"][self.run_stats["iter_cnt"]] = prec
else:
self.run_stats["precision"][self.run_stats["iter_cnt"]] = self.run_stats[
"precision"
][self.run_stats["iter_cnt"] - 1]
if (recall := self.recall(df_only_pred)) >= self.run_stats["recall"].max():
self.run_stats["recall"][self.run_stats["iter_cnt"]] = recall
else:
self.run_stats["recall"][self.run_stats["iter_cnt"]] = self.run_stats[
"recall"
][self.run_stats["iter_cnt"] - 1]
if (f_score := self.f_score(df_only_pred)) >= self.run_stats["f_score"].max():
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = f_score
else:
self.run_stats["f_score"][self.run_stats["iter_cnt"]] = self.run_stats[
"f_score"
][self.run_stats["iter_cnt"] - 1]
if (jaccard := self.jaccard(df_only_pred)) >= self.run_stats["jaccard"].max():
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = jaccard
else:
self.run_stats["jaccard"][self.run_stats["iter_cnt"]] = self.run_stats[
"jaccard"
][self.run_stats["iter_cnt"] - 1]
add_on += time.time() - begin_metrics_comps
self.run_stats["iter_cnt"] += 1
time_array[len(time_array) - 1] = opt_res[self.run_stats["iter_cnt"] - 1]
precision_time_array[len(precision_time_array) - 1] = self.run_stats["precision"][
self.run_stats["iter_cnt"] - 1
]
recall_time_array[len(recall_time_array) - 1] = self.run_stats["recall"][
self.run_stats["iter_cnt"] - 1
]
f_score_time_array[len(f_score_time_array) - 1] = self.run_stats["f_score"][
self.run_stats["iter_cnt"] - 1
]
jaccard_time_array[len(jaccard_time_array) - 1] = self.run_stats["jaccard"][
self.run_stats["iter_cnt"] - 1
]
stats.set_run_time(time.time() - start, i)
stats.set_run_n_duplicates(self.run_stats["dpl_cnt"], i)
stats.set_run_n_zero_tup_preds(self.run_stats["zero_tup_cnt"], i)
stats.set_run_opt_res(opt_res, i)
stats.set_run_preds(best_pred, i)
stats.set_run_iter_completed(self.run_stats["iter_cnt"], i)
stats.set_min_iter_completed(self.run_stats["iter_cnt"])
stats.set_run_best_objective_value(best_value, i)
stats.set_example_best_predicate(best_pred, i)
df_rem_pred = self.drop_tuples_satisfying_pred(best_pred)
stats.set_run_n_tuples_removed_from_data(len(self.df) - len(df_rem_pred), i)
if self.correct_pred:
df_only_pred = self.tuples_satisfying_pred(best_pred)
stats.set_precision(self.run_stats["precision"], i)
stats.set_recall(self.run_stats["recall"], i)
stats.set_f_score(self.run_stats["f_score"], i)
stats.set_jaccard(self.run_stats["jaccard"], i)
stats.set_final_precision(self.precision(df_only_pred), i)
stats.set_final_recall(self.recall(df_only_pred), i)
stats.set_final_f_score(self.f_score(df_only_pred), i)
stats.set_final_jaccard(self.jaccard(df_only_pred), i)
stats.set_precision_time_array(precision_time_array, i)
stats.set_recall_time_array(recall_time_array, i)
stats.set_f_score_time_array(f_score_time_array, i)
stats.set_jaccard_time_array(jaccard_time_array, i)
stats.set_run_time_array(time_array, i)
stats.set_add_on(add_on, i)
# stats.output_temp_file()
# stats.standard_output()
return self.drop_tuples_satisfying_pred(stats.example_best_predicate)
def drop_tuples_satisfying_pred(self, pred):
# list of constrained dataframes for doing boolean logic over
cstrs = [self.df[col] < pred[f"{col}_min"] for col in self.num_cols]
cstrs += [self.df[col] > pred[f"{col}_min"] + pred[f"{col}_len"] for col in self.num_cols]
cstrs += [self.df[col] != pred[col] for col in self.cat_cols]
# this dataframe has removed all tuples that satisfy the predicate
return self.df[np.logical_or.reduce(cstrs)]
def tuples_satisfying_pred(self, pred):
# list of constrained dataframes for doing boolean logic over
cstrs = [self.df[col] >= pred[f"{col}_min"] for col in self.num_cols]
cstrs += [self.df[col] <= pred[f"{col}_min"] + pred[f"{col}_len"] for col in self.num_cols]
cstrs += [self.df[col] == pred[col] for col in self.cat_cols]
# this dataframe has only all tuples that satisfy the predicate
return self.df[np.logical_and.reduce(cstrs)]
def is_valid(self, pred):
return 0 < len(self.tuples_satisfying_pred(pred)) < len(self.df)
# def drop_tuples_satisfying_pred(self, pred):
# # numerical constraints
# num = [
# f"`{col}` < {pred[f'{col}_min']} | `{col}` > {pred[f'{col}_min'] + pred[f'{col}_len']}"
# for col in self.num_cols
# ]
# # categorical constraints
# cat = [f'`{col}` != "{pred[col]}"' for col in self.cat_cols]
# # return tuples that satisfy the predicate
# return self.df.query(" | ".join(num + cat))
# def tuples_satisfying_pred(self, pred):
# # numerical constraints
# num = [
# f"`{col}` >= {pred[f'{col}_min']} & `{col}` <= {pred[f'{col}_min'] + pred[f'{col}_len']}"
# for col in self.num_cols
# ]
# # categorical constraints
# cat = [f'`{col}` == "{pred[col]}"' for col in self.cat_cols]
# # return tuples that satisfy the predicate
# return self.df.query(" & ".join(num + cat))
def precision(self, df_only_pred):
# precision
return len(set(df_only_pred.index).intersection(set(self.df_true.index))) / len(
df_only_pred.index
)
def recall(self, df_only_pred):
# recall
return len(set(df_only_pred.index).intersection(set(self.df_true.index))) / len(
self.df_true.index
)
def f_score(self, df_only_pred):
# precision
prec = len(set(df_only_pred.index).intersection(set(self.df_true.index))) / len(
df_only_pred.index
)
# recall
rec = len(set(df_only_pred.index).intersection(set(self.df_true.index))) / len(
self.df_true.index
)
return 2 * prec * rec / (prec + rec + 1e-9)
def jaccard(self, df_only_pred):
return len(set(df_only_pred.index).intersection(set(self.df_true.index))) / len(
set(df_only_pred.index).union(set(self.df_true.index))
)
def new_run_stats(self, n_trials):
self.run_stats = {
"iter_cnt": 0,
"dpls": set(),
"dpl_cnt": 0,
"zero_tup_cnt": 0,
"precision": np.zeros(n_trials),
"recall": np.zeros(n_trials),
"f_score": np.zeros(n_trials),
"jaccard": np.zeros(n_trials),
}
| 32,427
| 43.728276
| 103
|
py
|
SPMC_VideoSR
|
SPMC_VideoSR-master/main_videosr_deploy_2x3f.py
|
import os
import time
import glob
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
import scipy.misc
import random
import subprocess
from datetime import datetime
from math import ceil
# from modules import BasicConvLSTMCell
# from modules.model_easyflow import *
from modules.videosr_ops_lite import *
os.environ["CUDA_VISIBLE_DEVICES"]=str(np.argmax( [int(x.split()[2]) for x in subprocess.Popen("nvidia-smi -q -d Memory | grep -A4 GPU | grep Free", shell=True, stdout=subprocess.PIPE).stdout.readlines()]))
DATA_TEST='./data/test/calendar'
DATA_TEST='./data/test/city'
# DATA_TEST='./data/test/hitachi_isee5_001'
DATA_TRAIN='./data/train/'
img_height = 240
img_width = 320
def check_img_size(input_img):
input_shape = input_img.shape
output_shape = list(input_shape)
output_shape[0] = img_height
output_shape[1] = img_width
output_img = np.zeros(tuple(output_shape))
output_img[:input_shape[0], :input_shape[1], :] = input_img
return output_img
class VIDEOSR(object):
def __init__(self):
self.num_frames = 3
self.scale_factor = 4
def test(self, dataPath=None, scale_factor=2, num_frames=3):
import scipy.misc
dataPath = DATA_TEST
inList = sorted(glob.glob(os.path.join(dataPath, 'input{}/*.png').format(scale_factor)))
#inp = [check_img_size(scipy.misc.imread(i).astype(np.float32)) / 255.0 for i in inList]
print 'Testing path: {}'.format(dataPath)
print '# of testing frames: {}'.format(len(inList))
DATA_TEST_OUT = DATA_TEST+'_SR_{}'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.mkdir(DATA_TEST_OUT)
cnt = 0
self.scale_factor = scale_factor
reuse = False
for idx0 in xrange(len(inList)):
cnt += 1
T = num_frames / 2
imgs = [check_img_size(scipy.misc.imread(inList[0]).astype(np.float32)) / 255.0 for i in xrange(idx0 - T, 0)]
imgs.extend([check_img_size(scipy.misc.imread(inList[i]).astype(np.float32)) / 255.0 for i in xrange(max(0, idx0 - T), idx0)])
imgs.extend([check_img_size(scipy.misc.imread(inList[i]).astype(np.float32)) / 255.0 for i in xrange(idx0, min(len(inList), idx0 + T + 1))])
imgs.extend([check_img_size(scipy.misc.imread(inList[-1]).astype(np.float32)) / 255.0 for i in xrange(idx0 + T, len(inList) - 1, -1)])
#imgs = [inp[0] for i in xrange(idx0 - T, 0)]
#imgs.extend([inp[i] for i in xrange(max(0, idx0 - T), idx0)])
#imgs.extend([inp[i] for i in xrange(idx0, min(len(inList), idx0 + T + 1))])
#imgs.extend([inp[-1] for i in xrange(idx0 + T, len(inList) - 1, -1)])
dims = imgs[0].shape
if len(dims) == 2:
imgs = [np.expand_dims(i, -1) for i in imgs]
h, w, c = imgs[0].shape
out_h = h * scale_factor
out_w = w * scale_factor
padh = int(ceil(h / 4.0) * 4.0 - h)
padw = int(ceil(w / 4.0) * 4.0 - w)
imgs = [np.pad(i, [[0, padh], [0, padw], [0, 0]], 'edge') for i in imgs]
imgs = np.expand_dims(np.stack(imgs, axis=0), 0)
if idx0 == 0:
frames_lr = tf.placeholder(dtype=tf.float32, shape=imgs.shape)
frames_ref_ycbcr = rgb2ycbcr(frames_lr[:, T:T + 1, :, :, :])
frames_ref_ycbcr = tf.tile(frames_ref_ycbcr, [1, num_frames, 1, 1, 1])
with open('spmc_240_320_2x3f.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'Placeholder:0': frames_lr}, return_elements=['output:0'])
output = output[0]
print(output.get_shape())
if len(dims) == 3:
output_rgb = ycbcr2rgb(tf.concat([output, resize_images(frames_ref_ycbcr,
[(h + padh) * scale_factor,
(w + padw) * scale_factor],
method=2)[:, :, :, :, 1:3]], -1))
else:
output_rgb = output
output = output[:, :, :out_h, :out_w, :]
output_rgb = output_rgb[:, :, :out_h, :out_w, :]
if cnt == 1:
sess = tf.Session()
reuse = True
case_path = dataPath.split('/')[-1]
print 'Testing - ', case_path, len(imgs)
[imgs_hr, imgs_hr_rgb] = sess.run([output, output_rgb], feed_dict={frames_lr: imgs})
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'y_%03d.png'%(idx0)),
im2uint8(imgs_hr[0, -1, :, :, 0]))
if len(dims) == 3:
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'rgb_%03d.png'%(idx0)),
im2uint8(imgs_hr_rgb[0, -1, :, :, :]))
print 'SR results path: {}'.format(DATA_TEST_OUT)
def main(_):
model = VIDEOSR()
model.test()
if __name__ == '__main__':
tf.app.run()
| 5,347
| 41.444444
| 206
|
py
|
SPMC_VideoSR
|
SPMC_VideoSR-master/main_videosr_deploy_4x3f.py
|
import os
import time
import glob
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
import scipy.misc
import random
import subprocess
from datetime import datetime
from math import ceil
# from modules import BasicConvLSTMCell
# from modules.model_easyflow import *
from modules.videosr_ops_lite import *
os.environ["CUDA_VISIBLE_DEVICES"]=str(np.argmax( [int(x.split()[2]) for x in subprocess.Popen("nvidia-smi -q -d Memory | grep -A4 GPU | grep Free", shell=True, stdout=subprocess.PIPE).stdout.readlines()]))
DATA_TEST='./data/test/calendar'
# DATA_TEST='./data/test/hitachi_isee5_001'
DATA_TRAIN='./data/train/'
class VIDEOSR(object):
def __init__(self):
self.num_frames = 3
self.scale_factor = 4
def test(self, dataPath=None, scale_factor=4, num_frames=3):
import scipy.misc
dataPath = DATA_TEST
inList = sorted(glob.glob(os.path.join(dataPath, 'input{}/*.png').format(scale_factor)))
inp = [scipy.misc.imread(i).astype(np.float32) / 255.0 for i in inList]
# inp = [scipy.misc.imresize(i, [120, 160]) / 255.0 for i in inp]
inp = [i[:120, :160, :] for i in inp]
print 'Testing path: {}'.format(dataPath)
print '# of testing frames: {}'.format(len(inList))
DATA_TEST_OUT = DATA_TEST+'_SR_{}'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.mkdir(DATA_TEST_OUT)
cnt = 0
self.scale_factor = scale_factor
reuse = False
for idx0 in xrange(len(inList)):
cnt += 1
T = num_frames / 2
imgs = [inp[0] for i in xrange(idx0 - T, 0)]
imgs.extend([inp[i] for i in xrange(max(0, idx0 - T), idx0)])
imgs.extend([inp[i] for i in xrange(idx0, min(len(inList), idx0 + T + 1))])
imgs.extend([inp[-1] for i in xrange(idx0 + T, len(inList) - 1, -1)])
dims = imgs[0].shape
if len(dims) == 2:
imgs = [np.expand_dims(i, -1) for i in imgs]
h, w, c = imgs[0].shape
out_h = h * scale_factor
out_w = w * scale_factor
padh = int(ceil(h / 4.0) * 4.0 - h)
padw = int(ceil(w / 4.0) * 4.0 - w)
imgs = [np.pad(i, [[0, padh], [0, padw], [0, 0]], 'edge') for i in imgs]
imgs = np.expand_dims(np.stack(imgs, axis=0), 0)
if idx0 == 0:
frames_lr = tf.placeholder(dtype=tf.float32, shape=imgs.shape)
frames_ref_ycbcr = rgb2ycbcr(frames_lr[:, T:T + 1, :, :, :])
frames_ref_ycbcr = tf.tile(frames_ref_ycbcr, [1, num_frames, 1, 1, 1])
with open('spmc_120_160_4x3f.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'Placeholder:0': frames_lr}, return_elements=['output:0'])
output = output[0]
print(output.get_shape())
if len(dims) == 3:
output_rgb = ycbcr2rgb(tf.concat([output, resize_images(frames_ref_ycbcr,
[(h + padh) * scale_factor,
(w + padw) * scale_factor],
method=2)[:, :, :, :, 1:3]], -1))
else:
output_rgb = output
output = output[:, :, :out_h, :out_w, :]
output_rgb = output_rgb[:, :, :out_h, :out_w, :]
if cnt == 1:
sess = tf.Session()
reuse = True
case_path = dataPath.split('/')[-1]
print 'Testing - ', case_path, len(imgs)
[imgs_hr, imgs_hr_rgb] = sess.run([output, output_rgb], feed_dict={frames_lr: imgs})
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'y_%03d.png'%(idx0)),
im2uint8(imgs_hr[0, -1, :, :, 0]))
if len(dims) == 3:
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'rgb_%03d.png'%(idx0)),
im2uint8(imgs_hr_rgb[0, -1, :, :, :]))
print 'SR results path: {}'.format(DATA_TEST_OUT)
def main(_):
model = VIDEOSR()
model.test()
if __name__ == '__main__':
tf.app.run()
| 4,510
| 39.276786
| 206
|
py
|
SPMC_VideoSR
|
SPMC_VideoSR-master/modules/utils.py
|
import tensorflow as tf
def weight_from_caffe(caffenet):
def func(shape, dtype):
sc = tf.get_variable_scope()
name = sc.name.split('/')[-1]
print 'init: ', name, shape, caffenet.params[name][0].data.shape
return tf.transpose(caffenet.params[name][0].data, perm=[2 ,3 ,1 ,0])
return func
def bias_from_caffe(caffenet):
def func(shape, dtype):
sc = tf.get_variable_scope()
name = sc.name.split('/')[-1]
return caffenet.params[name][1].data
return func
| 527
| 25.4
| 77
|
py
|
SPMC_VideoSR
|
SPMC_VideoSR-master/modules/videosr_ops_lite.py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
def im2uint8(x):
if x.__class__ == tf.Tensor:
return tf.cast(tf.clip_by_value(x, 0.0, 1.0) * 255.0, tf.uint8)
else:
t = np.clip(x, 0.0, 1.0) * 255.0
return t.astype(np.uint8)
def get_shape(x):
shape = tf.shape(x)
check = tf.Assert(tf.reduce_all(shape >= 0), ["EASYFLOW: Need value.shape >= 0, got ", shape])
shape = control_flow_ops.with_dependencies([check], shape)
return [shape[i] for i in xrange(shape.shape.as_list()[0])]
def zero_upsampling(x, scale_factor):
dims = x.get_shape().as_list()
if len(dims) == 5:
n, t, h, w, c = dims
y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)
y = tf.reshape(y, [n, t, h, w, scale_factor, scale_factor, c])
y = tf.transpose(y, [0, 1, 2, 4, 3, 5, 6])
y = tf.reshape(y, [n, t, h * scale_factor, w * scale_factor, c])
elif len(dims) == 4:
n, h, w, c = dims
y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)
y = tf.reshape(y, [n, h, w, scale_factor, scale_factor, c])
y = tf.transpose(y, [0, 1, 3, 2, 4, 5])
y = tf.reshape(y, [n, h * scale_factor, w * scale_factor, c])
return y
def leaky_relu(x, alpha=0.1):
return tf.maximum(x, alpha * x)
def prelu(x):
alphas = tf.get_variable('alpha', x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(x)
neg = alphas * (x - tf.abs(x)) * 0.5
return pos + neg
def display_tf_variables(train_vars):
print 'Training Variables: '
for var in train_vars:
print '\t', var.name
def resize_images(images, size, method=2, align_corners=False):
dims = len(images.get_shape())
if dims == 5:
n, t, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, [n * t, h, w, c])
images = tf.image.resize_images(images, size, method, align_corners)
if dims == 5:
images = tf.reshape(images, [n, t, size[0], size[1], c])
return images
def rgb2y(inputs):
with tf.name_scope('rgb2y'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'
dims = len(inputs.get_shape())
if dims == 4:
scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 3]) / 255.0
elif dims == 5:
scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 1, 3]) / 255.0
output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)
output = output + 16 / 255.0
return output
def rgb2ycbcr(inputs):
with tf.name_scope('rgb2ycbcr'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'
ndims = len(inputs.get_shape())
origT = [[65.481, 128.553, 24.966], [-37.797, -74.203, 112], [112, -93.786, -18.214]]
origOffset = [16.0, 128.0, 128.0]
if ndims == 4:
origT = [tf.reshape(origT[i], [1, 1, 1, 3]) / 255.0 for i in xrange(3)]
elif ndims == 5:
origT = [tf.reshape(origT[i], [1, 1, 1, 1, 3]) / 255.0 for i in xrange(3)]
output = []
for i in xrange(3):
output.append(tf.reduce_sum(inputs * origT[i], reduction_indices=-1, keep_dims=True) + origOffset[i] / 255.0)
return tf.concat(output, -1)
def ycbcr2rgb(inputs):
with tf.name_scope('ycbcr2rgb'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'
ndims = len(inputs.get_shape())
# origT = np.array([[65.481, 128.553, 24.966], [-37.797 -74.203 112], [112 -93.786 -18.214]])
# T = tf.inv(origT)
Tinv = [[0.00456621, 0., 0.00625893], [0.00456621, -0.00153632, -0.00318811], [0.00456621, 0.00791071, 0.]]
origOffset = [16.0, 128.0, 128.0]
if ndims == 4:
origT = [tf.reshape(Tinv[i], [1, 1, 1, 3]) * 255.0 for i in xrange(3)]
origOffset = tf.reshape(origOffset, [1, 1, 1, 3]) / 255.0
elif ndims == 5:
origT = [tf.reshape(Tinv[i], [1, 1, 1, 1, 3]) * 255.0 for i in xrange(3)]
origOffset = tf.reshape(origOffset, [1, 1, 1, 1, 3]) / 255.0
output = []
for i in xrange(3):
output.append(tf.reduce_sum((inputs - origOffset) * origT[i], reduction_indices=-1, keep_dims=True))
return tf.concat(output, -1)
def rgb2gray(inputs):
with tf.name_scope('rgb2gray'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'
dims = len(inputs.get_shape())
if dims == 4:
scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 3])
elif dims == 5:
scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 1, 3])
output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)
return output
| 5,349
| 37.768116
| 121
|
py
|
SPMC_VideoSR
|
SPMC_VideoSR-master/modules/__init__.py
|
# -*- coding: utf-8 -*-
__all__=['BasicConvLSTMCell','flowTools','model_easyflow','model_flownet','SSIM_Index','utils','videos_ops']
| 135
| 26.2
| 108
|
py
|
CLOSURE
|
CLOSURE-master/setup.py
|
from setuptools import setup
setup(
name="nmn-iwp",
version="0.1",
keywords="",
packages=["vr", "vr.models"]
)
| 128
| 13.333333
| 32
|
py
|
CLOSURE
|
CLOSURE-master/vr/plotting.py
|
import os
import json
from matplotlib import pyplot
import pandas
import scipy.stats as stats
import logging
logger = logging.getLogger(__name__)
def load_log(root, file_, data_train, data_val, args, parts):
slurmid = file_[:-8]
path = os.path.join(root, file_)
log = json.load(open(path))
args[root][slurmid] = log['args']
for i, t in enumerate(log['train_losses_ts']):
data_train['root'].append(root)
data_train['slurmid'].append(slurmid)
data_train['step'].append(t)
data_train['train_loss'].append(log['train_losses'][i])
data_train['ts'].append(log['train_losses_ts'][i])
data_train['entropy'].append(log['entropy'][i] if i < len(log.get('entropy', [])) else 0)
data_train['prog_acc'].append(log['prog_acc'][i] if i < len(log.get('prog_acc', [])) else 0)
part_keys = ['{}_accs'.format(part) for part in parts]
for key in part_keys:
if key in log:
if len(log['val_accs_ts']) > len(log[key]):
logger.warning('numbers of timesteps and values dont match for {}, {}, {}; attempt to fix'.format(
root, file_, key))
log[key] = [None] * (len(log['val_accs_ts']) - len(log[key])) + log[key]
if len(log['val_accs_ts']) < len(log[key]):
raise ValueError()
for i, t in enumerate(log['val_accs_ts']):
data_val['root'].append(root)
data_val['slurmid'].append(slurmid)
data_val['step'].append(t)
for key in part_keys:
data_val[key[:-1]].append(log[key][i] if key in log else None)
def load_logs(root, data_train, data_val, args, parts=['train', 'val']):
for root, dirs, files in os.walk(root):
for file_ in files:
if file_.endswith('pt.json'):
load_log(root, file_, data_train, data_val, args, parts)
def plot_average(df, train_quantity='train_acc', val_quantity='val_acc', window=1, plot_interval=False):
for root, df_root in df.groupby('root'):
min_progress = min([df_slurmid['step'].max() for _, df_slurmid in df_root.groupby('slurmid')])
df_root = df_root[df_root['step'] <= min_progress]
df_agg = df_root.groupby(['step']).agg(['mean', 'std'])
# Plot train
train_values = df_agg[train_quantity]['mean']
train_values = train_values.rolling(window).mean()
train_lines = pyplot.plot(df_agg.index,
train_values,
label=root + ' train',
linestyle='dotted')
# Plot validation
n_seeds = len(df_root['slurmid'].unique())
if val_quantity:
val_values = df_agg[val_quantity]['mean']
val_std = df_agg[val_quantity]['std']
val_values = val_values.rolling(window).mean()
val_std = val_std.rolling(window).mean()
width = val_std * stats.t.ppf(0.975, n_seeds - 1) / (n_seeds ** 0.5)
pyplot.plot(df_agg.index,
val_values,
label=root + " val",
color=train_lines[0].get_color())
if plot_interval:
pyplot.fill_between(df_agg.index,
val_values - width, val_values + width,
color=train_lines[0].get_color(),
alpha=0.5)
# Count number of successes
n_train_successes = 0
n_val_successes = 0
for slurmid, df_slurmid in df_root.groupby('slurmid'):
slurmid_values = df_slurmid[train_quantity].rolling(window).mean()
if slurmid_values.iloc[-1] > 0.99:
n_train_successes += 1
if val_quantity:
slurmid_values = df_slurmid[val_quantity].rolling(window).mean()
if slurmid_values.iloc[-1] > 0.99:
n_val_successes += 1
success_report = "{} out of {}".format(n_train_successes, n_seeds)
# Print
to_print = ["{} ({} steps)".format(root, str(min_progress)),
success_report, "({:.1f})".format(100 * train_values.iloc[-1])]
if val_quantity:
to_print.append("{} out of {}".format(n_val_successes, n_seeds))
to_print.append("({:.1f}+-{:.1f})".format(100 * val_values.iloc[-1], 100 * width.iloc[-1]))
print(*to_print)
pyplot.legend()
quantities = ([train_quantity] if train_quantity else []) + ([val_quantity] if val_quantity else [])
pyplot.title(", ".join(quantities))
def plot_all_runs(df, train_quantity='train_acc', val_quantity='val_acc', color=None, window=1, verbose=True):
kwargs = {}
if color:
kwargs['color'] = color
legend = []
for (root, slurmid), df_run in df.groupby(['root', 'slurmid']):
path = root + ' ' + slurmid
train_lines = pyplot.plot(df_run['step'],
df_run[train_quantity].rolling(window).mean(),
label=path + ' train',
linestyle='dotted',
**kwargs)
legend.append(slurmid)
if val_quantity:
pyplot.plot(df_run['step'],
df_run[val_quantity].rolling(window).mean(),
label=path + ' val',
color=train_lines[0].get_color())
legend.append(slurmid)
to_print = [path, df_run['step'].iloc[-1], df_run[train_quantity].iloc[-1]]
if val_quantity:
to_print.append(df_run[val_quantity].iloc[-1].mean())
if verbose:
print(*to_print)
pyplot.legend(legend)
quantities = ([train_quantity] if train_quantity else []) + ([val_quantity] if val_quantity else [])
pyplot.title(", ".join(quantities))
| 5,907
| 42.441176
| 114
|
py
|
CLOSURE
|
CLOSURE-master/vr/programs.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
"""
Utilities for working with and converting between the various data structures
used to represent programs.
"""
class ProgramConverter(object):
def __init__(self, vocab=None):
"""
`vocab` is necessary only for prefix_to_list, cause in this case
we need to know the arity of the tokens.
"""
self._vocab = vocab
def is_chain(self, program_list):
visited = [False for fn in program_list]
cur_idx = len(program_list) - 1
while True:
visited[cur_idx] = True
inputs = program_list[cur_idx]['inputs']
if len(inputs) == 0:
break
elif len(inputs) == 1:
cur_idx = inputs[0]
elif len(inputs) > 1:
return False
return all(visited)
def list_to_tree(self, program_list):
def build_subtree(cur):
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [build_subtree(program_list[i]) for i in cur['inputs']],
}
return build_subtree(program_list[-1])
def tree_to_prefix(self, program_tree):
output = []
def helper(cur):
output.append({
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
})
for node in cur['inputs']:
helper(node)
helper(program_tree)
return output
def list_to_prefix(self, program_list):
return self.tree_to_prefix(self.list_to_tree(program_list))
def tree_to_postfix(self, program_tree):
output = []
def helper(cur):
for node in cur['inputs']:
helper(node)
output.append({
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
})
helper(program_tree)
return output
def tree_to_list(self, program_tree):
# First count nodes
def count_nodes(cur):
return 1 + sum(count_nodes(x) for x in cur['inputs'])
num_nodes = count_nodes(program_tree)
output = [None] * num_nodes
def helper(cur, idx):
output[idx] = {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [],
}
next_idx = idx - 1
for node in reversed(cur['inputs']):
output[idx]['inputs'].insert(0, next_idx)
next_idx = helper(node, next_idx)
return next_idx
helper(program_tree, num_nodes - 1)
return output
def prefix_to_tree(self, program_prefix):
program_prefix = [x for x in program_prefix]
def helper():
cur = program_prefix.pop(0)
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [helper() for _ in range(self.get_num_inputs(cur))],
}
return helper()
def prefix_to_list(self, program_prefix):
return self.tree_to_list(self.prefix_to_tree(program_prefix))
def list_to_postfix(self, program_list):
return self.tree_to_postfix(self.list_to_tree(program_list))
def postfix_to_tree(self, program_postfix):
program_postfix = [x for x in program_postfix]
def helper():
cur = program_postfix.pop()
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [helper() for _ in range(self, self.get_num_inputs(cur))][::-1],
}
return helper()
def postfix_to_list(self, program_postfix):
return self.tree_to_list(self, self.postfix_to_tree(program_postfix))
def get_num_inputs(self, f):
f = function_to_str(f)
# This is a litle hacky; it would be better to look up from metadata.json
# if type(f) is str:
# f = str_to_function(f)
# name = f['function']
return self._vocab['program_token_arity'][f]
def function_to_str(f):
value_str = ''
if f['value_inputs']:
value_str = '[%s]' % ','.join(f['value_inputs'])
return '%s%s' % (f['function'], value_str)
def str_to_function(s):
if '[' not in s:
return {
'function': s,
'value_inputs': [],
}
name, value_str = s.replace(']', '').split('[')
return {
'function': name,
'value_inputs': value_str.split(','),
}
def list_to_str(program_list):
return ' '.join(function_to_str(f) for f in program_list)
| 5,119
| 31.201258
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/utils.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import inspect
import json
import torch
from vr.models import (ModuleNet,
Seq2Seq,
Seq2SeqAtt,
LstmModel,
CnnLstmModel,
CnnLstmSaModel,
FiLMedNet,
FiLMGen,
MAC)
from vr.ns_vqa.parser import Seq2seqParser
from vr.ns_vqa.clevr_executor import ClevrExecutor
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_cpu(path):
"""
Loads a torch checkpoint, remapping all Tensors to CPU
"""
return torch.load(path, map_location={'cuda:0': 'cpu'})
def load_program_generator(path):
checkpoint = load_cpu(path)
model_type = checkpoint['args']['model_type']
kwargs = checkpoint['program_generator_kwargs']
state = checkpoint['program_generator_state']
if model_type in ['FiLM', 'MAC', 'RelNet', 'Control-EE']:
model = FiLMGen(**kwargs)
elif model_type == 'PG+EE' or model_type == 'PG':
if checkpoint['args']['ns_vqa']:
model = Seq2seqParser(checkpoint['vocab'])
else:
model = Seq2SeqAtt(**kwargs)
else:
model = None
if model is not None:
model.load_state_dict(state)
return model, kwargs
def load_execution_engine(path, verbose=True):
checkpoint = load_cpu(path)
if checkpoint['args'].get('symbolic_ee'):
vocab = load_vocab(checkpoint['args']['vocab_json'])
ee = ClevrExecutor(vocab)
return ee, {}
model_type = checkpoint['args']['model_type']
kwargs = checkpoint['execution_engine_kwargs']
state = checkpoint['execution_engine_state']
kwargs['verbose'] = verbose
if model_type == 'FiLM':
model = FiLMedNet(**kwargs)
elif model_type in ['PG+EE', 'EE', 'Control-EE']:
kwargs.pop('sharing_patterns', None)
kwargs.setdefault('module_pool', 'mean')
kwargs.setdefault('module_use_gammas', 'linear')
model = ModuleNet(**kwargs)
elif model_type == 'MAC':
kwargs.setdefault('write_unit', 'original')
kwargs.setdefault('read_connect', 'last')
kwargs.setdefault('read_unit', 'original')
kwargs.setdefault('noisy_controls', False)
kwargs.pop('sharing_params_patterns', None)
model = MAC(**kwargs)
elif model_type == 'RelNet':
model = RelationNet(**kwargs)
elif model_type == 'SHNMN':
model = SHNMN(**kwargs)
elif model_type == 'SimpleNMN':
model = SimpleModuleNet(**kwargs)
else:
raise ValueError()
cur_state = model.state_dict()
model.load_state_dict(state)
return model, kwargs
def load_baseline(path):
model_cls_dict = {
'LSTM': LstmModel,
'CNN+LSTM': CnnLstmModel,
'CNN+LSTM+SA': CnnLstmSaModel,
}
checkpoint = load_cpu(path)
baseline_type = checkpoint['baseline_type']
kwargs = checkpoint['baseline_kwargs']
state = checkpoint['baseline_state']
model = model_cls_dict[baseline_type](**kwargs)
model.load_state_dict(state)
return model, kwargs
def get_updated_args(kwargs, object_class):
"""
Returns kwargs with renamed args or arg valuesand deleted, deprecated, unused args.
Useful for loading older, trained models.
If using this function is neccessary, use immediately before initializing object.
"""
# Update arg values
for arg in arg_value_updates:
if arg in kwargs and kwargs[arg] in arg_value_updates[arg]:
kwargs[arg] = arg_value_updates[arg][kwargs[arg]]
# Delete deprecated, unused args
valid_args = inspect.getargspec(object_class.__init__)[0]
new_kwargs = {valid_arg: kwargs[valid_arg] for valid_arg in valid_args if valid_arg in kwargs}
return new_kwargs
class EMA():
def __init__(self, mu):
self.mu = mu
self.shadow = {}
def register(self, cat, name, val):
self.shadow[cat + '-' + name] = val.clone()
def __call__(self, cat, name, x):
name = cat + '-' + name
assert name in self.shadow
new_average = self.mu * x + (1.0 - self.mu) * self.shadow[name]
self.shadow[name] = new_average.clone()
return new_average
def load_tbd_net(checkpoint, vocab):
""" Convenience function to load a TbD-Net model from a checkpoint file.
Parameters
----------
checkpoint : Union[pathlib.Path, str]
The path to the checkpoint.
vocab : Dict[str, Dict[any, any]]
The vocabulary file associated with the TbD-Net. For an extended description, see above.
Returns
-------
torch.nn.Module
The TbD-Net model.
Notes
-----
This pushes the TbD-Net model to the GPU if a GPU is available.
"""
tbd_net = TbDNet(vocab)
tbd_net.load_state_dict(torch.load(str(checkpoint), map_location={'cuda:0': 'cpu'}))
if torch.cuda.is_available():
tbd_net.cuda()
return tbd_net
| 6,077
| 32.58011
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/data.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import numpy as np
import PIL.Image
import h5py
import io
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import random, math
import vr.programs
import json
from vr.programs import ProgramConverter
def load_scenes(scenes_json):
with open(scenes_json) as f:
scenes_dict = json.load(f)['scenes']
scenes = []
for s in scenes_dict:
table = []
for i, o in enumerate(s['objects']):
item = {}
item['id'] = '%d-%d' % (s['image_index'], i)
if '3d_coords' in o:
item['position'] = [np.dot(o['3d_coords'], s['directions']['right']),
np.dot(o['3d_coords'], s['directions']['front']),
o['3d_coords'][2]]
else:
item['position'] = o['position']
item['color'] = o['color']
item['material'] = o['material']
item['shape'] = o['shape']
item['size'] = o['size']
table.append(item)
scenes.append(table)
return scenes
def _dataset_to_tensor(dset, mask=None, dtype=None):
arr = np.asarray(dset, dtype=np.int64 if dtype is None else dtype)
if mask is not None:
arr = arr[mask]
tensor = torch.LongTensor(arr)
return tensor
def _gen_subsample_mask(num, percent=1.0):
chosen_num = math.floor(num * percent)
mask = np.full((num,), False)
selected_ids = np.asarray(random.sample(range(num), chosen_num), dtype='int32')
mask[selected_ids] = True
return mask
class ClevrDataset(Dataset):
def __init__(self, question_h5, feature_h5_path, scene_path, vocab,
mode='prefix', load_features=False,
max_samples=None, question_families=None, percent_of_data=1.0,
oversample=None, oversample_shift=None):
print('CLEVR DATASET')
mode_choices = ['prefix', 'postfix']
if mode not in mode_choices:
raise ValueError('Invalid mode "%s"' % mode)
self.vocab = vocab
self.program_converter = ProgramConverter(vocab)
self.feature_h5_path = feature_h5_path
self.feature_h5 = None
self.all_features = None
self.load_features = load_features
self.mode = mode
self.max_samples = max_samples
# Compute the mask
num_mask_options_chosen = (
int(percent_of_data < 1.0) + int(question_families is not None)
+ int(oversample is not None))
if num_mask_options_chosen > 1:
raise ValueError()
mask = None
if oversample is not None:
all_families = question_h5['question_families'][()]
regular_indices = (all_families < oversample_shift).nonzero()[0]
oversampled_indices = (all_families >= oversample_shift).nonzero()[0]
mask = np.hstack([regular_indices] + [oversampled_indices] * oversample)
if question_families is not None:
# Use only the specified families
all_families = np.asarray(question_h5['question_families'])
N = all_families.shape[0]
print(question_families)
target_families = np.asarray(question_families)[:, None]
mask = (all_families == target_families).any(axis=0)
if percent_of_data < 1.0:
num_example = np.asarray(question_h5['image_idxs']).shape[0]
mask = _gen_subsample_mask(num_example, percent_of_data)
self.mask = mask
# Data from the question file is small, so read it all into memory
print('Reading question data into memory')
self.all_types = None
if 'types' in question_h5:
self.all_types = _dataset_to_tensor(question_h5['types'], mask)
self.all_question_families = None
if 'question_families' in question_h5:
self.all_question_families = _dataset_to_tensor(question_h5['question_families'], mask)
self.all_questions = _dataset_to_tensor(question_h5['questions'], mask)
self.all_image_idxs = _dataset_to_tensor(question_h5['image_idxs'], mask)
self.all_programs = None
if 'programs' in question_h5:
self.all_programs = _dataset_to_tensor(question_h5['programs'], mask)
self.all_answers = None
if 'answers' in question_h5:
self.all_answers = _dataset_to_tensor(question_h5['answers'], mask)
if scene_path:
self.all_scenes = load_scenes(scene_path)
else:
self.all_scenes = None
def __getitem__(self, index):
# Open the feature or load them if requested
if self.feature_h5_path and not self.feature_h5:
self.feature_h5 = h5py.File(self.feature_h5_path, 'r')
if self.load_features:
self.features = self.feature_h5['features'][()]
if self.all_question_families is not None:
question_family = self.all_question_families[index]
q_type = None if self.all_types is None else self.all_types[index]
question = self.all_questions[index]
image_idx = self.all_image_idxs[index]
answer = None
if self.all_answers is not None:
answer = self.all_answers[index]
program_seq = None
if self.all_programs is not None:
program_seq = self.all_programs[index]
if self.all_scenes:
scene = self.all_scenes[image_idx]
else:
scene = None
if self.feature_h5_path:
if self.load_features:
feats = self.features[image_idx]
else:
feats = self.feature_h5['features'][image_idx]
if feats.ndim == 1:
feats = np.array(PIL.Image.open(io.BytesIO(feats))).transpose(2, 0, 1) / 255.0
else:
feats = [0]
feats = torch.FloatTensor(np.asarray(feats, dtype=np.float32))
if q_type is None:
return (question, index, feats, scene, answer, program_seq)
return ([question, q_type], index, feats, scene, answer, program_seq)
def __len__(self):
if self.max_samples is None:
return self.all_questions.size(0)
else:
return min(self.max_samples, self.all_questions.size(0))
class ClevrDataLoader(DataLoader):
def __init__(self, **kwargs):
if 'question_h5' not in kwargs:
raise ValueError('Must give question_h5')
if 'feature_h5' not in kwargs:
raise ValueError('Must give feature_h5')
if 'vocab' not in kwargs:
raise ValueError('Must give vocab')
scene_path = kwargs.pop('scene_path')
print('Reading scenes from ', scene_path)
feature_h5_path = kwargs.pop('feature_h5')
print('Reading features from ', feature_h5_path)
question_h5_path = kwargs.pop('question_h5')
print('Reading questions from ', question_h5_path)
vocab = kwargs.pop('vocab')
mode = kwargs.pop('mode', 'prefix')
load_features = kwargs.pop('load_features', False)
percent_of_data = kwargs.pop('percent_of_data', 1.)
oversample = kwargs.pop('oversample', None)
oversample_shift = kwargs.pop('oversample_shift', None)
question_families = kwargs.pop('question_families', None)
max_samples = kwargs.pop('max_samples', None)
with h5py.File(question_h5_path, 'r') as question_h5:
self.dataset = ClevrDataset(
question_h5, feature_h5_path, scene_path, vocab, mode,
load_features=load_features,
max_samples=max_samples,
question_families=question_families,
percent_of_data=percent_of_data,
oversample=oversample,
oversample_shift=oversample_shift)
kwargs['collate_fn'] = clevr_collate
super(ClevrDataLoader, self).__init__(self.dataset, **kwargs)
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def clevr_collate(batch):
transposed = list(zip(*batch))
question_batch = default_collate(transposed[0])
index_batch = default_collate(transposed[1])
feat_batch = transposed[2]
if all(f is not None for f in feat_batch):
feat_batch = default_collate(feat_batch)
scene_batch = transposed[3]
answer_batch = transposed[4]
if transposed[4][0] is not None:
answer_batch = default_collate(answer_batch)
program_seq_batch = transposed[5]
if transposed[5][0] is not None:
program_seq_batch = default_collate(program_seq_batch)
return [question_batch, index_batch, feat_batch, scene_batch, answer_batch, program_seq_batch]
| 9,208
| 37.85654
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/__init__.py
|
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
| 329
| 54
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/preprocess.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
"""
Utilities for preprocessing sequence data.
Special tokens that are in all dictionaries:
<NULL>: Extra parts of the sequence that we should ignore
<START>: Goes at the start of a sequence
<END>: Goes at the end of a sequence, before <NULL> tokens
<UNK>: Out-of-vocabulary words
"""
SPECIAL_TOKENS = {
'<NULL>': 0,
'<START>': 1,
'<END>': 2,
'<UNK>': 3,
}
def tokenize(s, delim=' ',
add_start_token=True, add_end_token=True,
punct_to_keep=None, punct_to_remove=None):
"""
Tokenize a sequence, converting a string s into a list of (string) tokens by
splitting on the specified delimiter. Optionally keep or remove certain
punctuation marks and add start and end tokens.
"""
if punct_to_keep is not None:
for p in punct_to_keep:
s = s.replace(p, '%s%s' % (delim, p))
if punct_to_remove is not None:
for p in punct_to_remove:
s = s.replace(p, '')
tokens = s.split(delim)
if add_start_token:
tokens.insert(0, '<START>')
if add_end_token:
tokens.append('<END>')
return tokens
def build_vocab(sequences, min_token_count=1, delim=' ',
punct_to_keep=None, punct_to_remove=None):
token_to_count = {}
tokenize_kwargs = {
'delim': delim,
'punct_to_keep': punct_to_keep,
'punct_to_remove': punct_to_remove,
}
for seq in sequences:
seq_tokens = tokenize(seq, delim=delim, punct_to_keep=punct_to_keep,
punct_to_remove=punct_to_remove,
add_start_token=False, add_end_token=False)
for token in seq_tokens:
if token not in token_to_count:
token_to_count[token] = 0
token_to_count[token] += 1
token_to_idx = {}
for token, idx in SPECIAL_TOKENS.items():
token_to_idx[token] = idx
for token, count in sorted(token_to_count.items()):
if count >= min_token_count:
token_to_idx[token] = len(token_to_idx)
return token_to_idx
def encode(seq_tokens, token_to_idx, allow_unk=False):
seq_idx = []
for token in seq_tokens:
if token not in token_to_idx:
if allow_unk:
token = '<UNK>'
else:
raise KeyError('Token "%s" not in vocab' % token)
seq_idx.append(token_to_idx[token])
return seq_idx
def decode(seq_idx, idx_to_token, delim=None, stop_at_end=True):
tokens = []
for idx in seq_idx:
tokens.append(idx_to_token[idx])
if stop_at_end and tokens[-1] == '<END>':
break
if delim is None:
return tokens
else:
return delim.join(tokens)
| 3,040
| 29.108911
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/seq2seq_att.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import (pack_padded_sequence,
pad_packed_sequence)
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 3, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, output, encoder_outputs, encoder_mask):
seq_len = encoder_outputs.size(1)
keys = output.repeat(seq_len, 1, 1).transpose(0,1)
attn_energies = self.score(keys, encoder_outputs) # B*1*T
attn_energies -= 1000 * (encoder_mask[:, None, :] == 0).float()
return F.softmax(attn_energies, dim=2)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2,1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) #[B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy
class Seq2SeqAtt(nn.Module):
def __init__(self,
null_token=0,
start_token=1,
end_token=2,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=300,
hidden_dim=256,
rnn_num_layers=2,
rnn_dropout=0,
autoregressive=True,
):
super().__init__()
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = nn.LSTM(wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=True, batch_first=True)
self.decoder_embed = nn.Embedding(decoder_vocab_size, wordvec_dim)
self.decoder_rnn = nn.LSTM(wordvec_dim + 2 * hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_linear = nn.Linear(3 * hidden_dim, decoder_vocab_size)
self.decoder_attn = Attn(hidden_dim)
self.rnn_num_layers = rnn_num_layers
self.NULL = null_token
self.START = start_token
self.END = end_token
self.multinomial_outputs = None
self.autoregressive = autoregressive
self.save_activations = False
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None, y=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.decoder_embed.num_embeddings
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
L = self.encoder_rnn.num_layers
N = x.size(0) if x is not None else None
N = y.size(0) if N is None and y is not None else N
T_in = x.size(1) if x is not None else None
T_out = y.size(1) if y is not None else None
return V_in, V_out, D, H, L, N, T_in, T_out
def encoder(self, x):
x, x_lengths, inverse_index = sort_for_rnn(x, null=self.NULL)
embed = self.encoder_embed(x)
packed = pack_padded_sequence(embed, x_lengths, batch_first=True)
out_packed, hidden = self.encoder_rnn(packed)
out, _ = pad_packed_sequence(out_packed, batch_first=True)
out = out[inverse_index]
hidden = [h[:,inverse_index] for h in hidden]
return out, hidden
def decoder(self, word_inputs, prev_hidden, encoder_outputs, encoder_mask):
hn, cn, an = prev_hidden
# 1 - rnn transition
word_embedded = self.decoder_embed(word_inputs)
if not self.autoregressive:
word_embedded = torch.zeros_like(word_embedded)
rnn_input = torch.cat((word_embedded, an), 1)[:, None, :]
output, (hnext, cnext) = self.decoder_rnn(rnn_input, (hn, cn))
output = output[:, 0, :]
# 2 - perform attention
attn_weights = self.decoder_attn(output, encoder_outputs, encoder_mask)
anext = attn_weights.bmm(encoder_outputs)[:, 0, :]
if self.save_activations:
self._attn_weights.append(attn_weights)
# 3 - compute output logits
logits = self.decoder_linear(torch.cat([output, anext], 1))
return logits, (hnext, cnext, anext)
def compute_loss(self, output_logprobs, y):
"""
Compute loss. We assume that the first element of the output sequence y is
a start token, and that each element of y is left-aligned and right-padded
with self.NULL out to T_out. We want the output_logprobs to predict the
sequence y, shifted by one timestep so that y[0] is fed to the network and
then y[1] is predicted. We also don't want to compute loss for padded
timesteps.
Inputs:
- output_logprobs: Variable of shape (N, T_out, V_out)
- y: LongTensor Variable of shape (N, T_out)
"""
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
output_logprobs = output_logprobs[:, :-1].contiguous()
y = y[:, 1:].contiguous()
losses = F.cross_entropy(output_logprobs.view(-1, V_out), y.view(-1), reduction='none')
losses = losses.view(N, T_out - 1)
losses *= (y != self.NULL).float()
return losses.sum(1)
def log_likelihood(self, x, y):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
encoder_outputs, _ = self.encoder(x)
encoder_mask = x != self.NULL
decoder_inputs = y
decoder_hidden = (torch.zeros(L, N, H).to(x.device),
torch.zeros(L, N, H).to(x.device),
torch.zeros(N, 2 * H).to(x.device)) # attention state
decoder_outputs = []
for t in range(T_out):
decoder_out, decoder_hidden = self.decoder(
decoder_inputs[:,t], decoder_hidden,
encoder_outputs, encoder_mask)
decoder_outputs.append(decoder_out)
decoder_outputs = torch.stack(decoder_outputs, dim=1)
loss = self.compute_loss(decoder_outputs, y)
return loss
def forward(self, x, max_length=30, temperature=1.0, argmax=False):
self._attn_weights = []
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
T_out = max_length
encoded, _ = self.encoder(x)
encoder_mask = x != self.NULL
h, c, a = (torch.zeros(L, N, H).to(x.device), # hidden state
torch.zeros(L, N, H).to(x.device), # cell state
torch.zeros(N, 2 * H).to(x.device)) # attention state
# buffers (on CPU currently)
cur_input = Variable(x.data.new(N).fill_(self.START))
y = torch.LongTensor(N, T_out).fill_(self.NULL)
y[:, 0] = cur_input
y_logprobs = torch.zeros((N, T_out))
done = torch.ByteTensor(N).fill_(0)
for t in range(1, T_out):
# generate output
logprobs, (h, c, a) = self.decoder(cur_input, (h, c, a), encoded, encoder_mask)
logprobs = logprobs / temperature
logprobs = F.log_softmax(logprobs, dim=1)
if argmax:
_, cur_output = logprobs.max(1)
else:
cur_output = torch.exp(logprobs).multinomial(1)[:, 0]
# save output
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[not_done, t] = cur_output_data[not_done]
y_logprobs[:, t] = logprobs[torch.arange(N), cur_output]
done = logical_or(done, (cur_output_data == self.END).byte())
cur_input = cur_output
# stop if fully done
if done.sum() == N:
break
return y.to(x.device), y_logprobs.to(x.device)
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
def sort_for_rnn(x, null=0):
lengths = torch.sum(x != null, dim=1).long()
sorted_lengths, sorted_idx = torch.sort(lengths, dim=0, descending=True)
sorted_lengths = sorted_lengths.data.tolist() # remove for pytorch 0.4+
# ugly
inverse_sorted_idx = torch.LongTensor(sorted_idx.shape).fill_(0).to(x.device)
for i, v in enumerate(sorted_idx):
inverse_sorted_idx[v.data] = i
return x[sorted_idx], sorted_lengths, inverse_sorted_idx
| 8,941
| 38.566372
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/baselines.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from vr.models.layers import init_modules, ResidualBlock
class StackedAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(StackedAttention, self).__init__()
self.Wv = nn.Conv2d(input_dim, hidden_dim, kernel_size=1, padding=0)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0)
self.hidden_dim = hidden_dim
self.attention_maps = None
init_modules(self.modules(), init='normal')
def forward(self, v, u):
"""
Input:
- v: N x D x H x W
- u: N x D
Returns:
- next_u: N x D
"""
N, K = v.size(0), self.hidden_dim
D, H, W = v.size(1), v.size(2), v.size(3)
v_proj = self.Wv(v) # N x K x H x W
u_proj = self.Wu(u) # N x K
u_proj_expand = u_proj.view(N, K, 1, 1).expand(N, K, H, W)
h = F.tanh(v_proj + u_proj_expand)
p = F.softmax(self.Wp(h).view(N, H * W)).view(N, 1, H, W)
self.attention_maps = p.data.clone()
v_tilde = (p.expand_as(v) * v).sum(3).sum(2).view(N, D)
next_u = u + v_tilde
return next_u
class LstmEncoder(nn.Module):
def __init__(self, token_to_idx, wordvec_dim=300,
rnn_dim=256, rnn_num_layers=2, rnn_dropout=0):
super(LstmEncoder, self).__init__()
self.token_to_idx = token_to_idx
self.NULL = token_to_idx['<NULL>']
self.START = token_to_idx['<START>']
self.END = token_to_idx['<END>']
self.embed = nn.Embedding(len(token_to_idx), wordvec_dim)
self.rnn = nn.LSTM(wordvec_dim, rnn_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
def expand_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.embed, token_to_idx,
word2vec=word2vec, std=std)
def forward(self, x):
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence
x_cpu = x.data.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu[i, t] != self.NULL and x_cpu[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data).long()
idx = Variable(idx, requires_grad=False)
hs, _ = self.rnn(self.embed(x))
idx = idx.view(N, 1, 1).expand(N, 1, hs.size(2))
H = hs.size(2)
return hs.gather(1, idx).view(N, H)
def build_cnn(feat_dim=(1024, 14, 14),
res_block_dim=128,
num_res_blocks=0,
proj_dim=512,
pooling='maxpool2'):
C, H, W = feat_dim
layers = []
if num_res_blocks > 0:
layers.append(nn.Conv2d(C, res_block_dim, kernel_size=3, padding=1))
layers.append(nn.ReLU(inplace=True))
C = res_block_dim
for _ in range(num_res_blocks):
layers.append(ResidualBlock(C))
if proj_dim > 0:
layers.append(nn.Conv2d(C, proj_dim, kernel_size=1, padding=0))
layers.append(nn.ReLU(inplace=True))
C = proj_dim
if pooling == 'maxpool2':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
H, W = H // 2, W // 2
return nn.Sequential(*layers), (C, H, W)
def build_mlp(input_dim, hidden_dims, output_dim,
use_batchnorm=False, dropout=0):
layers = []
D = input_dim
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
if use_batchnorm:
layers.append(nn.BatchNorm1d(input_dim))
for dim in hidden_dims:
layers.append(nn.Linear(D, dim))
if use_batchnorm:
layers.append(nn.BatchNorm1d(dim))
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
layers.append(nn.ReLU(inplace=True))
D = dim
layers.append(nn.Linear(D, output_dim))
return nn.Sequential(*layers)
class LstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(LstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
classifier_kwargs = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
q_feats = self.rnn(questions)
scores = self.classifier(q_feats)
return scores
class CnnLstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
cnn_res_block_dim=128, cnn_num_res_blocks=0,
cnn_proj_dim=512, cnn_pooling='maxpool2',
fc_dims=(1024,), fc_use_batchnorm=False, fc_dropout=0):
super(CnnLstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
cnn_kwargs = {
'feat_dim': cnn_feat_dim,
'res_block_dim': cnn_res_block_dim,
'num_res_blocks': cnn_num_res_blocks,
'proj_dim': cnn_proj_dim,
'pooling': cnn_pooling,
}
self.cnn, (C, H, W) = build_cnn(**cnn_kwargs)
classifier_kwargs = {
'input_dim': C * H * W + rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
N = questions.size(0)
assert N == feats.size(0)
q_feats = self.rnn(questions)
img_feats = self.cnn(feats)
cat_feats = torch.cat([q_feats, img_feats.view(N, -1)], 1)
scores = self.classifier(cat_feats)
return scores
class CnnLstmSaModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
stacked_attn_dim=512, num_stacked_attn=2,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(CnnLstmSaModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
C, H, W = cnn_feat_dim
self.image_proj = nn.Conv2d(C, rnn_dim, kernel_size=1, padding=0)
self.stacked_attns = []
for i in range(num_stacked_attn):
sa = StackedAttention(rnn_dim, stacked_attn_dim)
self.stacked_attns.append(sa)
self.add_module('stacked-attn-%d' % i, sa)
classifier_args = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_args)
init_modules(self.modules(), init='normal')
def forward(self, questions, feats):
u = self.rnn(questions)
v = self.image_proj(feats)
for sa in self.stacked_attns:
u = sa(v, u)
scores = self.classifier(u)
return scores
| 8,657
| 34.052632
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/filmed_net.py
|
#!/usr/bin/env python3
import math
import pprint
from termcolor import colored
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
from vr.models.layers import init_modules, GlobalAveragePool, Flatten
from vr.models.layers import build_classifier, build_stem
import vr.programs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FiLM(nn.Module):
"""
A Feature-wise Linear Modulation Layer from
'FiLM: Visual Reasoning with a General Conditioning Layer'
"""
def __init__(self, use_gammas='identity'):
super().__init__()
self.use_gammas = use_gammas
def forward(self, x, gammas, betas):
gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)
if self.use_gammas == 'identity':
pass
elif self.use_gammas == 'sigmoid':
gammas = torch.sigmoid(gammas)
elif self.use_gammas == 'tanh':
gammas = 1 + 2 * torch.tanh(gammas)
elif self.use_gammas == 'nope':
gammas = 1
else:
raise ValueError()
betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)
return (gammas * x) + betas
class FiLMedNet(nn.Module):
def __init__(self, vocab, feature_dim=(1024, 14, 14),
stem_num_layers=2,
stem_batchnorm=False,
stem_kernel_size=3,
stem_subsample_layers=None,
stem_stride=1,
stem_padding=None,
stem_dim=64,
num_modules=4,
module_num_layers=1,
module_dim=128,
module_residual=True,
module_intermediate_batchnorm=False,
module_batchnorm=False,
module_batchnorm_affine=False,
module_dropout=0,
module_input_proj=1,
module_kernel_size=3,
classifier_proj_dim=512,
classifier_downsample='maxpool2',
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
condition_method='bn-film',
condition_pattern=[],
use_gamma=True,
use_beta=True,
use_coords=1,
debug_every=float('inf'),
print_verbose_every=float('inf'),
verbose=True,
):
super(FiLMedNet, self).__init__()
num_answers = len(vocab['answer_idx_to_token'])
self.stem_times = []
self.module_times = []
self.classifier_times = []
self.timing = False
self.num_modules = num_modules
self.module_num_layers = module_num_layers
self.module_batchnorm = module_batchnorm
self.module_dim = module_dim
self.condition_method = condition_method
self.use_gamma = use_gamma
self.use_beta = use_beta
self.use_coords_freq = use_coords
self.debug_every = debug_every
self.print_verbose_every = print_verbose_every
# Initialize helper variables
self.stem_use_coords = (stem_stride == 1) and (self.use_coords_freq > 0)
self.condition_pattern = condition_pattern
if len(condition_pattern) == 0:
self.condition_pattern = []
for i in range(self.module_num_layers * self.num_modules):
self.condition_pattern.append(self.condition_method != 'concat')
else:
self.condition_pattern = [i > 0 for i in self.condition_pattern]
self.extra_channel_freq = self.use_coords_freq
self.block = FiLMedResBlock
self.num_cond_maps = 2 * self.module_dim if self.condition_method == 'concat' else 0
self.fwd_count = 0
self.num_extra_channels = 2 if self.use_coords_freq > 0 else 0
if self.debug_every <= -1:
self.print_verbose_every = 1
# Initialize stem
stem_feature_dim = feature_dim[0] + self.stem_use_coords * self.num_extra_channels
self.stem = build_stem(
stem_feature_dim, stem_dim, module_dim,
num_layers=stem_num_layers, with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size, stride=stem_stride, padding=stem_padding,
subsample_layers=stem_subsample_layers)
tmp = self.stem(Variable(torch.zeros([1, feature_dim[0], feature_dim[1], feature_dim[2]])))
module_H = tmp.size(2)
module_W = tmp.size(3)
self.stem_coords = coord_map((feature_dim[1], feature_dim[2]))
self.coords = coord_map((module_H, module_W))
self.default_weight = torch.ones(1, 1, self.module_dim).to(device)
self.default_bias = torch.zeros(1, 1, self.module_dim).to(device)
# Initialize FiLMed network body
self.function_modules = {}
self.vocab = vocab
for fn_num in range(self.num_modules):
with_cond = self.condition_pattern[self.module_num_layers * fn_num:
self.module_num_layers * (fn_num + 1)]
mod = self.block(module_dim, with_residual=module_residual,
with_intermediate_batchnorm=module_intermediate_batchnorm, with_batchnorm=module_batchnorm,
with_cond=with_cond,
dropout=module_dropout,
num_extra_channels=self.num_extra_channels,
extra_channel_freq=self.extra_channel_freq,
with_input_proj=module_input_proj,
num_cond_maps=self.num_cond_maps,
kernel_size=module_kernel_size,
batchnorm_affine=module_batchnorm_affine,
num_layers=self.module_num_layers,
condition_method=condition_method,
debug_every=self.debug_every)
self.add_module(str(fn_num), mod)
self.function_modules[fn_num] = mod
# Initialize output classifier
self.classifier = build_classifier(module_dim + self.num_extra_channels, module_H, module_W,
num_answers, classifier_fc_layers, classifier_proj_dim,
classifier_downsample, with_batchnorm=classifier_batchnorm,
dropout=classifier_dropout)
init_modules(self.modules())
def forward(self, x, film, save_activations=False):
# Initialize forward pass and externally viewable activations
self.fwd_count += 1
if save_activations:
self.feats = None
self.module_outputs = []
self.cf_input = None
if self.debug_every <= -2:
pdb.set_trace()
# Prepare FiLM layers
gammas = None
betas = None
if self.condition_method == 'concat':
# Use parameters usually used to condition via FiLM instead to condition via concatenation
cond_params = film[:,:,:2*self.module_dim]
cond_maps = cond_params.unsqueeze(3).unsqueeze(4).expand(cond_params.size() + x.size()[-2:])
else:
gammas, betas = torch.split(film[:,:,:2*self.module_dim], self.module_dim, dim=-1)
if not self.use_gamma:
gammas = self.default_weight.expand_as(gammas)
if not self.use_beta:
betas = self.default_bias.expand_as(betas)
# Propagate up image features CNN
stem_batch_coords = None
batch_coods = None
if self.use_coords_freq > 0:
stem_batch_coords = self.stem_coords.unsqueeze(0).expand(
torch.Size((x.size(0), *self.stem_coords.size())))
batch_coords = self.coords.unsqueeze(0).expand(
torch.Size((x.size(0), *self.coords.size())))
if self.stem_use_coords:
x = torch.cat([x, stem_batch_coords], 1)
feats = self.stem(x)
if save_activations:
self.feats = feats
N, _, H, W = feats.size()
# Propagate up the network from low-to-high numbered blocks
module_inputs = torch.zeros(feats.size()).unsqueeze(1).expand(
N, self.num_modules, self.module_dim, H, W).to(device)
module_inputs[:,0] = feats
for fn_num in range(self.num_modules):
if self.condition_method == 'concat':
layer_output = self.function_modules[fn_num](module_inputs[:,fn_num],
extra_channels=batch_coords, cond_maps=cond_maps[:,fn_num])
else:
layer_output = self.function_modules[fn_num](module_inputs[:,fn_num],
gammas[:,fn_num,:], betas[:,fn_num,:], batch_coords)
# Store for future computation
if save_activations:
self.module_outputs.append(layer_output)
if fn_num == (self.num_modules - 1):
final_module_output = layer_output
else:
module_inputs_updated = module_inputs.clone()
module_inputs_updated[:,fn_num+1] = module_inputs_updated[:,fn_num+1] + layer_output
module_inputs = module_inputs_updated
if self.debug_every <= -2:
pdb.set_trace()
# Run the final classifier over the resultant, post-modulated features.
if self.use_coords_freq > 0:
final_module_output = torch.cat([final_module_output, batch_coords], 1)
if save_activations:
self.cf_input = final_module_output
out = self.classifier(final_module_output)
if ((self.fwd_count % self.debug_every) == 0) or (self.debug_every <= -1):
pdb.set_trace()
return out
class FiLMedResBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, with_residual=True, with_intermediate_batchnorm=False, with_batchnorm=True,
with_cond=[False], dropout=0, num_extra_channels=0, extra_channel_freq=1,
with_input_proj=0, num_cond_maps=0, kernel_size=3, batchnorm_affine=False,
num_layers=1, condition_method='bn-film', debug_every=float('inf')):
if out_dim is None:
out_dim = in_dim
super(FiLMedResBlock, self).__init__()
self.with_residual = with_residual
self.with_intermediate_batchnorm = with_intermediate_batchnorm
self.with_batchnorm = with_batchnorm
self.with_cond = with_cond
self.dropout = dropout
self.extra_channel_freq = 0 if num_extra_channels == 0 else extra_channel_freq
self.with_input_proj = with_input_proj # Kernel size of input projection
self.num_cond_maps = num_cond_maps
self.kernel_size = kernel_size
self.batchnorm_affine = batchnorm_affine
self.num_layers = num_layers
self.condition_method = condition_method
self.debug_every = debug_every
if self.kernel_size % 2 == 0:
raise(NotImplementedError)
if self.num_layers >= 2:
raise(NotImplementedError)
if self.condition_method == 'block-input-film' and self.with_cond[0]:
self.film = FiLM()
if self.with_input_proj:
self.input_proj = nn.Conv2d(in_dim + (num_extra_channels if self.extra_channel_freq >= 1 else 0),
in_dim, kernel_size=self.with_input_proj, padding=self.with_input_proj // 2)
self.conv1 = nn.Conv2d(in_dim + self.num_cond_maps +
(num_extra_channels if self.extra_channel_freq >= 2 else 0),
out_dim, kernel_size=self.kernel_size,
padding=self.kernel_size // 2)
if self.condition_method == 'conv-film' and self.with_cond[0]:
self.film = FiLM()
if self.with_intermediate_batchnorm:
self.bn0 = nn.BatchNorm2d(in_dim, affine=((not self.with_cond[0]) or self.batchnorm_affine))
if self.with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim, affine=((not self.with_cond[0]) or self.batchnorm_affine))
if self.condition_method == 'bn-film' and self.with_cond[0]:
self.film = FiLM()
if dropout > 0:
self.drop = nn.Dropout2d(p=self.dropout)
if ((self.condition_method == 'relu-film' or self.condition_method == 'block-output-film')
and self.with_cond[0]):
self.film = FiLM()
init_modules(self.modules())
def forward(self, x, gammas=None, betas=None, extra_channels=None, cond_maps=None):
if self.debug_every <= -2:
pdb.set_trace()
if self.condition_method == 'block-input-film' and self.with_cond[0]:
x = self.film(x, gammas, betas)
# ResBlock input projection
if self.with_input_proj:
if extra_channels is not None and self.extra_channel_freq >= 1:
x = torch.cat([x, extra_channels], 1)
x = self.input_proj(x)
if self.with_intermediate_batchnorm:
x = self.bn0(x)
x = F.relu(x)
out = x
# ResBlock body
if cond_maps is not None:
out = torch.cat([out, cond_maps], 1)
if extra_channels is not None and self.extra_channel_freq >= 2:
out = torch.cat([out, extra_channels], 1)
out = self.conv1(out)
if self.condition_method == 'conv-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
if self.with_batchnorm:
out = self.bn1(out)
if self.condition_method == 'bn-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
if self.dropout > 0:
out = self.drop(out)
out = F.relu(out)
if self.condition_method == 'relu-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
# ResBlock remainder
if self.with_residual:
out = x + out
if self.condition_method == 'block-output-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
return out
class ConcatFiLMedResBlock(nn.Module):
def __init__(self, num_input, in_dim, out_dim=None, with_residual=True, with_intermediate_batchnorm=False, with_batchnorm=True,
with_cond=[False], dropout=0, num_extra_channels=0, extra_channel_freq=1,
with_input_proj=0, num_cond_maps=0, kernel_size=3, batchnorm_affine=False,
num_layers=1, condition_method='bn-film', debug_every=float('inf')):
super(ConcatFiLMedResBlock, self).__init__()
self.proj = nn.Conv2d(num_input * in_dim, in_dim, kernel_size=1, padding=0)
self.tfilmedResBlock = FiLMedResBlock(in_dim=in_dim, out_dim=out_dim, with_residual=with_residual,
with_intermediate_batchnorm=with_intermediate_batchnorm, with_batchnorm=with_batchnorm,
with_cond=with_cond, dropout=dropout, num_extra_channels=num_extra_channels, extra_channel_freq=extra_channel_freq,
with_input_proj=with_input_proj, num_cond_maps=num_cond_maps, kernel_size=kernel_size, batchnorm_affine=batchnorm_affine,
num_layers=num_layers, condition_method=condition_method, debug_every=debug_every)
def forward(self, x, gammas=None, betas=None, extra_channels=None, cond_maps=None):
out = torch.cat(x, 1) # Concatentate along depth
out = F.relu(self.proj(out))
out = self.tfilmedResBlock(out, gammas=gammas, betas=betas, extra_channels=extra_channels, cond_maps=cond_maps)
return out
class SharedFiLMedModule(nn.Module):
"""Takes 3 inputs:
- the word
- the left input
- the right input
"""
def __init__(self, dim, kernel_size=3,
use_gammas='identity', num_layers=1, with_residual=True,
pool='mean', post_linear=False, learn_embeddings=True):
super().__init__()
if kernel_size % 2 == 0 or post_linear:
raise NotImplementedError()
if learn_embeddings:
self.embed = nn.Embedding(100, dim)
else:
self.embed = None
self.film = FiLM(use_gammas)
for i in range(2 * num_layers):
conv = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.add_module('conv' + str(i), conv)
film_computer = nn.Sequential(
nn.Linear(3 * dim, 3 * dim),
nn.ReLU(),
nn.Linear(3 * dim, 2 * dim))
self.add_module('film_computer' + str(i), film_computer)
self.num_layers = num_layers
self.with_residual = with_residual
self.dim = dim
self.pool = pool
def forward(self, x, word, left_inp=None, right_inp=None):
if left_inp is None:
# assuming that interface dimensionality if equal to that
# of stem
left_inp = torch.zeros_like(x)[:, :, 0, 0]
if right_inp is None:
right_inp = torch.zeros_like(left_inp)
embedding = self.embed(word) if self.embed else word
inp = torch.cat([embedding, left_inp, right_inp], 1)
for i in range(self.num_layers):
film0_coeffs = getattr(self, 'film_computer{}'.format(2 * i))(inp)
film1_coeffs = getattr(self, 'film_computer{}'.format(2 * i + 1))(inp)
conv1 = getattr(self, 'conv{}'.format(2 * i))
conv2 = getattr(self, 'conv{}'.format(2 * i + 1))
# a bit ugly now cause we apply film to the input of conv1
out0 = self.film(x, film0_coeffs[:, :self.dim], film0_coeffs[:, self.dim:])
out1 = F.relu(conv1(out0))
out1 = self.film(out1, film1_coeffs[:, :self.dim], film1_coeffs[:, self.dim:])
out2 = F.relu((x if self.with_residual else 0) + conv2(out1))
x = out2
if self.pool == 'mean':
res = x.mean(3).mean(2)
elif self.pool == 'max':
res = x.max(3)[0].max(2)[0]
else:
raise ValueError()
return res
class FiLMModule(nn.Module):
def __init__(self, shared_film_module, word):
super().__init__()
self.shared_film_module = shared_film_module
self.word = torch.LongTensor([word]).to(device)
def forward(self, x, left_inp=None, right_inp=None):
return self.shared_film_module(x, self.word, left_inp, right_inp)
def coord_map(shape, start=-1, end=1):
"""
Gives, a 2d shape tuple, returns two mxn coordinate maps,
Ranging min-max in the x and y directions, respectively.
"""
m, n = shape
x_coord_row = torch.linspace(start, end, steps=n).to(device)
y_coord_row = torch.linspace(start, end, steps=m).to(device)
x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n))).unsqueeze(0)
y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n))).unsqueeze(0)
return Variable(torch.cat([x_coords, y_coords], 0))
| 19,181
| 42.202703
| 133
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/seq2seq.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Seq2Seq(nn.Module):
def __init__(self,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=300,
hidden_dim=256,
rnn_num_layers=2,
rnn_dropout=0,
null_token=0,
start_token=1,
end_token=2,
encoder_embed=None
):
super(Seq2Seq, self).__init__()
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = nn.LSTM(wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_embed = nn.Embedding(decoder_vocab_size, wordvec_dim)
self.decoder_rnn = nn.LSTM(wordvec_dim + hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_rnn_new = nn.LSTM(hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_linear = nn.Linear(hidden_dim, decoder_vocab_size)
self.NULL = null_token
self.START = start_token
self.END = end_token
self.multinomial_outputs = None
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None, y=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.decoder_embed.num_embeddings
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
L = self.encoder_rnn.num_layers
N = x.size(0) if x is not None else None
N = y.size(0) if N is None and y is not None else N
T_in = x.size(1) if x is not None else None
T_out = y.size(1) if y is not None else None
return V_in, V_out, D, H, L, N, T_in, T_out
def before_rnn(self, x, replace=0):
# TODO: Use PackedSequence instead of manually plucking out the last
# non-NULL entry of each sequence; it is cleaner and more efficient.
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence. Is there a clean
# way to do this?
x_cpu = x.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu.data[i, t] != self.NULL and x_cpu.data[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data)
x[x.data == self.NULL] = replace
return x, Variable(idx)
def encoder(self, x):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
x, idx = self.before_rnn(x)
embed = self.encoder_embed(x)
h0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
c0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
out, _ = self.encoder_rnn(embed, (h0, c0))
# Pull out the hidden state for the last non-null value in each input
idx = idx.view(N, 1, 1).expand(N, 1, H)
return out.gather(1, idx).view(N, H)
def decoder(self, encoded, y, h0=None, c0=None):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
if T_out > 1:
y, _ = self.before_rnn(y)
y_embed = self.decoder_embed(y)
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
rnn_input = torch.cat([encoded_repeat, y_embed], 2)
if h0 is None:
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
if c0 is None:
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn(rnn_input, (h0, c0))
rnn_output_2d = rnn_output.contiguous().view(N * T_out, H)
output_logprobs = self.decoder_linear(rnn_output_2d).view(N, T_out, V_out)
return output_logprobs, ht, ct
def compute_loss(self, output_logprobs, y):
"""
Compute loss. We assume that the first element of the output sequence y is
a start token, and that each element of y is left-aligned and right-padded
with self.NULL out to T_out. We want the output_logprobs to predict the
sequence y, shifted by one timestep so that y[0] is fed to the network and
then y[1] is predicted. We also don't want to compute loss for padded
timesteps.
Inputs:
- output_logprobs: Variable of shape (N, T_out, V_out)
- y: LongTensor Variable of shape (N, T_out)
"""
self.multinomial_outputs = None
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
mask = y.data != self.NULL
y_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
y_mask[:, 1:] = mask[:, 1:]
y_masked = y[y_mask]
out_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
out_mask[:, :-1] = mask[:, 1:]
out_mask = out_mask.view(N, T_out, 1).expand(N, T_out, V_out)
out_masked = output_logprobs[out_mask].view(-1, V_out)
loss = F.cross_entropy(out_masked, y_masked)
return loss
def forward(self, x, y):
encoded = self.encoder(x)
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
T_out = 15
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn_new(encoded_repeat, (h0, c0))
output_logprobs, _, _ = self.decoder(encoded, y)
loss = self.compute_loss(output_logprobs, y)
return loss
def reinforce_sample(self, x, max_length=30, temperature=1.0, argmax=False):
N, T = x.size(0), max_length
encoded = self.encoder(x)
y = torch.LongTensor(N, T).fill_(self.NULL)
y_logprobs = torch.Tensor(N, T).fill_(0.)
done = torch.ByteTensor(N).fill_(0)
cur_input = Variable(x.data.new(N, 1).fill_(self.START))
h, c = None, None
for t in range(T):
# generate output
logprobs, h, c = self.decoder(encoded, cur_input, h0=h, c0=c)
logprobs = logprobs[:, 0, :]
logprobs = logprobs / temperature
logprobs = F.log_softmax(logprobs, dim=1)
if argmax:
_, cur_output = logprobs.max(1, keepdim=True)
else:
cur_output = torch.exp(logprobs).multinomial(1) # Now N x 1
# save output
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[not_done, t] = cur_output_data[not_done, 0]
y_logprobs[not_done, t] = logprobs[not_done, cur_output[:, 0]]
done = logical_or(done, cur_output_data[:, 0] == self.END)
cur_input = cur_output
# stop if fully done
if done.sum() == N:
break
return y.type_as(x.data), y_logprobs.to(x.device)
def logical_and(x, y):
return x * y
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
| 7,657
| 38.885417
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/layers.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal_, kaiming_uniform_
class SequentialSaveActivations(nn.Sequential):
def forward(self, input_):
self.outputs = [input_]
for module in self._modules.values():
input_ = module(input_)
self.outputs.append(input_)
return input_
class SimpleVisualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, kernel_size=3):
if out_dim is None:
out_dim = in_dim
super(SimpleVisualBlock, self).__init__()
if kernel_size % 2 == 0:
raise NotImplementedError()
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
def forward(self, x):
out = F.relu(self.conv(x))
return out
class ResidualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, kernel_size=3, with_residual=True, with_batchnorm=True,
shared_block=None, post_linear=False):
if out_dim is None:
out_dim = in_dim
super(ResidualBlock, self).__init__()
if kernel_size % 2 == 0:
raise NotImplementedError()
self.conv1 = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.conv2 = nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.shared_block=shared_block
self.with_batchnorm = with_batchnorm
if with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim)
self.bn2 = nn.BatchNorm2d(out_dim)
self.with_residual = with_residual
if in_dim == out_dim or not with_residual:
self.proj = None
else:
self.proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
if post_linear:
self.post_linear = nn.Conv2d(out_dim, 2 * out_dim, kernel_size=1)
# UGLY HACK!!!
self.post_linear.weight.data[:out_dim, :, 0, 0] = torch.eye(out_dim)
else:
self.post_linear = None
def forward(self, x):
if self.with_batchnorm:
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
else:
out = self.conv2(F.relu(self.conv1(x)))
res = x if self.proj is None else self.proj(x)
if self.with_residual:
out = F.relu(res + out)
else:
out = F.relu(out)
if self.shared_block:
out = self.shared_block(out)
if self.post_linear:
out = self.post_linear(out)
return out
class SimpleConcatBlock(nn.Module):
def __init__(self, dim, kernel_size, shared_block=None):
super().__init__()
self.proj = nn.Conv2d(3 * dim, dim, kernel_size=1, padding=0)
self.impl = ResidualBlock(
dim, dim, kernel_size=kernel_size,
with_residual=True, with_batchnorm=False, shared_block=shared_block)
def forward(self, feats, x, y):
out = torch.cat([feats, x, y], 1) # Concatentate along depth
return self.impl(F.relu(self.proj(out)))
class ConcatBlock(nn.Module):
def __init__(self, dim, kernel_size, with_residual=True, with_batchnorm=True,
shared_block=None, post_linear=False):
super().__init__()
self.proj = nn.Conv2d(2 * dim, dim, kernel_size=1, padding=0)
self.vis_block = ResidualBlock(
dim, kernel_size=kernel_size,
with_residual=with_residual,with_batchnorm=with_batchnorm,
shared_block=shared_block, post_linear=post_linear)
def forward(self, x, y):
out = torch.cat([x, y], 1) # Concatentate along depth
out = F.relu(self.proj(out))
out = self.vis_block(out)
return out
class GlobalAveragePool(nn.Module):
def forward(self, x):
N, C = x.size(0), x.size(1)
return x.view(N, C, -1).mean(2).squeeze(2)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def build_stem(feature_dim,
stem_dim,
module_dim,
num_layers=2,
with_batchnorm=True,
kernel_size=[3],
stride=[1],
padding=None,
subsample_layers=None,
acceptEvenKernel=False):
layers = []
prev_dim = feature_dim
if len(kernel_size) == 1:
kernel_size = num_layers * kernel_size
if len(stride) == 1:
stride = num_layers * stride
if padding == None:
padding = num_layers * [None]
if len(padding) == 1:
padding = num_layers * padding
if subsample_layers is None:
subsample_layers = []
for i, cur_kernel_size, cur_stride, cur_padding in zip(range(num_layers), kernel_size, stride, padding):
curr_out = module_dim if (i == (num_layers-1) ) else stem_dim
if cur_padding is None: # Calculate default padding when None provided
if cur_kernel_size % 2 == 0 and not acceptEvenKernel:
raise(NotImplementedError)
cur_padding = cur_kernel_size // 2
layers.append(nn.Conv2d(prev_dim, curr_out,
kernel_size=cur_kernel_size, stride=cur_stride, padding=cur_padding,
bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm2d(curr_out))
layers.append(nn.ReLU(inplace=True))
if i in subsample_layers:
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
prev_dim = curr_out
return SequentialSaveActivations(*layers)
class HybridPool(nn.Module):
def __init__(self, width):
super().__init__()
self.maxpool = nn.MaxPool2d(kernel_size=width, stride=width, padding=0)
self.avgpool = nn.AvgPool2d(kernel_size=width, stride=width, padding=0)
def forward(self, x):
return torch.cat([self.maxpool(x), self.avgpool(x)], 1)
def build_classifier(module_C, module_H, module_W, num_answers,
fc_dims=[], proj_dim=None, downsample=None,
with_batchnorm=True, dropout=[]):
layers = []
prev_dim = module_C * module_H * module_W
cur_dim = module_C
if proj_dim is not None and proj_dim > 0:
layers.append(nn.Conv2d(module_C, proj_dim, kernel_size=1, bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm2d(proj_dim))
layers.append(nn.ReLU(inplace=True))
prev_dim = proj_dim * module_H * module_W
cur_dim = proj_dim
if downsample is not None:
if 'maxpool' in downsample or 'avgpool' in downsample:
pool = nn.MaxPool2d if 'maxpool' in downsample else nn.AvgPool2d
if 'full' in downsample:
assert module_H == module_W
pool_size = module_H
else:
pool_size = int(downsample[-1])
# Note: Potentially sub-optimal padding for non-perfectly aligned pooling
padding = (0 if ((module_H % pool_size == 0) and (module_W % pool_size == 0)) else 1)
layers.append(pool(kernel_size=pool_size, stride=pool_size, padding=padding))
prev_dim = cur_dim * math.ceil(module_H / pool_size) * math.ceil(module_W / pool_size)
if downsample == 'hybrid':
assert module_H == module_W
pool = HybridPool(module_H)
layers.append(pool)
prev_dim = cur_dim * 2
if downsample == 'aggressive':
raise ValueError()
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
layers.append(nn.AvgPool2d(kernel_size=module_H // 2, stride=module_W // 2))
prev_dim = proj_dim
fc_dims = [] # No FC layers here
layers.append(Flatten())
if isinstance(dropout, float):
dropout = [dropout] * len(fc_dims)
elif not dropout:
dropout = [0] * len(fc_dims)
for next_dim, next_dropout in zip(fc_dims, dropout):
layers.append(nn.Linear(prev_dim, next_dim, bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm1d(next_dim))
layers.append(nn.ReLU(inplace=True))
if next_dropout > 0:
layers.append(nn.Dropout(p=next_dropout))
prev_dim = next_dim
layers.append(nn.Linear(prev_dim, num_answers))
return nn.Sequential(*layers)
def init_modules(modules, init='uniform'):
if init.lower() == 'normal':
init_params = kaiming_normal_
elif init.lower() == 'uniform':
init_params = kaiming_uniform_
else:
return
for m in modules:
if isinstance(m, (nn.Conv2d, nn.Linear)):
init_params(m.weight)
| 9,120
| 36.228571
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/maced_net.py
|
#!/usr/bin/env python3
import numpy as np
import math
import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
import math
from torch.nn.init import kaiming_normal, kaiming_uniform, xavier_uniform, xavier_normal, constant
from vr.models.layers import build_classifier, build_stem
import vr.programs
from vr.models.filmed_net import coord_map, SharedFiLMedModule
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class MACControl(nn.Module):
def __init__(self, num_modules, rnn_dim, module_dim):
super().__init__()
self.num_modules = num_modules
self.inputUnits = []
for i in range(self.num_modules):
mod = InputUnit(module_dim)
self.add_module('InputUnit' + str(i+1), mod)
self.inputUnits.append(mod)
self.controlUnit = ControlUnit(module_dim)
self.init_question_transformer = nn.Linear(rnn_dim, module_dim)
self.init_question_non_linear = nn.Tanh()
def forward(self, question):
q_states, q_rep, q_mask = question
q_states = self.init_question_transformer(q_states)
q_rep = self.init_question_transformer(q_rep)
init_control = q_rep
q_rep = self.init_question_non_linear(q_rep)
# TODO: get rid of recurrency
controls = [init_control]
control_scores = [torch.zeros_like(q_states[:, :, 0])]
for fn_num in range(self.num_modules):
inputUnit = self.inputUnits[fn_num]
q_rep_i = inputUnit(q_rep)
control_i, control_scores_i = self.controlUnit(
controls[fn_num], q_rep_i, q_states, q_mask)
controls.append(control_i)
control_scores.append(control_scores_i)
controls = torch.cat([c.unsqueeze(1) for c in controls], 1) # N x M x D
control_scores = torch.cat([c.unsqueeze(1) for c in control_scores], 1) # N x M x T
return controls, control_scores
class MAC(nn.Module):
"""Implementation of the Compositional Attention Networks from: https://openreview.net/pdf?id=S1Euwz-Rb"""
def __init__(self, vocab, feature_dim,
stem_num_layers,
stem_batchnorm,
stem_kernel_size,
stem_subsample_layers,
stem_stride,
stem_padding,
stem_dim,
num_modules,
module_dim,
question_embedding_dropout,
stem_dropout,
memory_dropout,
read_dropout,
nonlinearity,
use_prior_control_in_control_unit,
use_self_attention,
use_memory_gate,
question2output,
classifier_batchnorm,
classifier_fc_layers,
classifier_dropout,
use_coords,
write_unit,
read_connect,
read_unit,
noisy_controls,
debug_every=float('inf'),
print_verbose_every=float('inf'),
hard_code_control=False,
pretrained_control=None,
verbose=True,
):
super().__init__()
num_answers = len(vocab['answer_idx_to_token'])
self.stem_times = []
self.module_times = []
self.classifier_times = []
self.timing = False
self.num_modules = num_modules
self.question_embedding_dropout = question_embedding_dropout
self.memory_dropout = memory_dropout
self.read_dropout = read_dropout
self.module_dim = module_dim
self.read_connect = read_connect
self.question2output = question2output
self.use_self_attention = use_self_attention == 1
self.use_memory_gate = use_memory_gate == 1
self.use_coords_freq = use_coords
self.debug_every = debug_every
self.print_verbose_every = print_verbose_every
# Initialize helper variables
self.stem_use_coords = self.use_coords_freq
self.extra_channel_freq = self.use_coords_freq
self.fwd_count = 0
self.num_extra_channels = 2 if self.use_coords_freq > 0 else 0
if self.debug_every <= -1:
self.print_verbose_every = 1
# Initialize stem
stem_feature_dim = feature_dim[0] + self.stem_use_coords * self.num_extra_channels
self.stem = build_stem(stem_feature_dim, stem_dim, module_dim,
num_layers=stem_num_layers, with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size, stride=stem_stride, padding=stem_padding,
subsample_layers=stem_subsample_layers, acceptEvenKernel=True)
#Define units
self.inputUnits = []
for i in range(self.num_modules):
mod = InputUnit(module_dim)
self.add_module('InputUnit' + str(i+1), mod)
self.inputUnits.append(mod)
self.controlUnit = ControlUnit(module_dim, use_prior_control_in_control_unit=use_prior_control_in_control_unit)
if read_unit == 'original':
self.readUnit = ReadUnit(module_dim, nonlinearity, self.read_dropout)
elif read_unit == 'film':
self.readUnit = SharedFiLMedModule(module_dim, learn_embeddings=False)
else:
raise ValueError()
if write_unit == 'original':
mod = WriteUnit(module_dim,
use_self_attention=self.use_self_attention,
use_memory_gate=self.use_memory_gate)
elif write_unit == 'gru':
mod = GRUWriteUnit(module_dim)
elif write_unit == 'lastread':
mod = LastReadWriteUnit()
elif write_unit == 'noop':
mod = NoOpWriteUnit()
else:
raise ValueError(mod)
self.add_module('WriteUnit', mod)
self.writeUnit = mod
#parameters for initial memory and control vectors
self.init_memory = nn.Parameter(torch.randn(module_dim).to(device))
#first transformation of question embeddings
self.init_question_transformer = nn.Linear(self.module_dim, self.module_dim)
self.init_question_non_linear = nn.Tanh()
self.vocab = vocab
self.question_embedding_dropout_module = nn.Dropout(p=self.question_embedding_dropout)
# Initialize output classifier
self.classifier = OutputUnit(
module_dim, classifier_fc_layers, num_answers,
with_batchnorm=classifier_batchnorm, dropout=classifier_dropout,
nonlinearity=nonlinearity, question2output=question2output)
init_modules(self.modules())
def forward(self, x, ques, isTest=False, save_activations=False):
# Initialize forward pass and externally viewable activations
self.fwd_count += 1
if save_activations:
self.cf_input = None
q_context, q_rep, q_mask = ques
original_q_rep = q_rep
q_rep = self.question_embedding_dropout_module(q_rep)
init_control = q_rep
q_rep = self.init_question_non_linear(self.init_question_transformer(q_rep))
stem_batch_coords = None
if self.use_coords_freq > 0:
stem_coords = coord_map((x.size(2), x.size(3)))
stem_batch_coords = stem_coords.unsqueeze(0).expand(
torch.Size((x.size(0), *stem_coords.size())))
if self.stem_use_coords:
x = torch.cat([x, stem_batch_coords], 1)
feats = self.stem(x)
if save_activations:
self.feats = feats
self.read_scores = []
N, _, H, W = feats.size()
memory_storage = torch.zeros(N, 1+self.num_modules, self.module_dim).to(device)
memory_storage[:,0,:] = self.init_memory.expand(N, self.module_dim)
if self.memory_dropout > 0. and not isTest:
dropout_mask_memory = torch.Tensor(N, self.module_dim).fill_(
self.memory_dropout).bernoulli_().to(device)
else:
dropout_mask_memory = None
# compute controls
controls = [init_control]
control_scores = [torch.zeros_like(q_context[:, :, 0])]
for fn_num in range(self.num_modules):
inputUnit = getattr(self, 'InputUnit{}'.format(fn_num + 1))
#compute question representation specific to this cell
q_rep_i = inputUnit(q_rep) # N x d
#compute control at the current step
control_i, control_scores_i = self.controlUnit(
controls[fn_num], q_rep_i, q_context, q_mask)
controls.append(control_i)
control_scores.append(control_scores_i)
controls = torch.cat([c.unsqueeze(1) for c in controls], 1) # N x M x D
control_scores = torch.cat([c.unsqueeze(1) for c in control_scores], 1) # N x M x T
# run that reasoning
for fn_num in range(self.num_modules):
inputUnit = getattr(self, 'InputUnit{}'.format(fn_num + 1))
#compute read at the current step
read_input = memory_storage[:,fn_num,:]
if isinstance(self.readUnit, ReadUnit):
read_i, read_scores_i = self.readUnit(
read_input, controls[:,(fn_num+1),:], feats,
memory_dropout=self.memory_dropout, dropout_mask_memory=dropout_mask_memory,
isTest=isTest)
else:
read_i = self.readUnit(feats, controls[:,(fn_num+1),:])
read_scores_i = torch.Tensor([0])
#compute write memeory at the current step
memory_i = self.writeUnit(memory_storage, controls, read_i, fn_num+1)
if fn_num == (self.num_modules - 1):
final_module_output = memory_i
else:
memory_updated = memory_storage.clone()
memory_updated[:,(fn_num+1),:] = memory_updated[:,(fn_num+1),:] + memory_i
memory_storage = memory_updated
if save_activations:
self.read_scores.append(read_scores_i)
if save_activations:
self.cf_input = final_module_output
self.controls = controls
self.control_scores = control_scores
self.memory_storage = memory_storage
self.read_scores = torch.cat([rs.unsqueeze(1) for rs in self.read_scores], 1)
# output time
out = self.classifier(final_module_output, original_q_rep, isTest=isTest)
return out
class OutputUnit(nn.Module):
def __init__(self, module_dim, hidden_units, num_outputs,
nonlinearity, with_batchnorm, dropout, question2output):
super().__init__()
self.dropout = dropout
self.question2output = question2output
if question2output:
self.question_transformer = nn.Linear(module_dim, module_dim)
input_dim = 2*module_dim if question2output else module_dim
hidden_units = [input_dim] + [h for h in hidden_units] + [num_outputs]
self.n_layers = len(hidden_units) - 1
for i, (nin, nout) in enumerate(zip(hidden_units, hidden_units[1:])):
mod = nn.Linear(nin, nout)
self.add_module('MAC_LinearFC' + str(i), mod)
mod = nn.BatchNorm1d(nin) if with_batchnorm else None
if mod is not None:
self.add_module('MAC_BatchNormFC' + str(i), mod)
else:
setattr(self, 'MAC_BatchNormFC' + str(i), None)
self.non_linear = nn.ReLU()
self.dropout_module = nn.Dropout(p=self.dropout)
init_modules(self.modules())
def forward(self, final_memory, original_q_rep, isTest=False):
if self.question2output:
transformed_question = self.question_transformer(original_q_rep)
features = torch.cat([final_memory, transformed_question], 1)
else:
features = final_memory
for i in range(self.n_layers):
batchnorm = getattr(self, 'MAC_BatchNormFC' + str(i))
if batchnorm is not None:
features = batchnorm(features)
features = self.dropout_module(features)
linear = getattr(self, 'MAC_LinearFC' + str(i))
features = linear(features)
if i + 1 < self.n_layers:
features = self.non_linear(features)
return features
class NoOpWriteUnit(nn.Module):
def forward(self, memories, controls, current_read, idx):
return torch.zeros_like(current_read)
class LastReadWriteUnit(nn.Module):
def forward(self, memories, controls, current_read, idx):
return current_read
class GRUWriteUnit(nn.Module):
def __init__(self, common_dim):
super().__init__()
self.gru = nn.GRUCell(common_dim, common_dim)
def forward(self, memories, controls, current_read, idx):
return self.gru.forward(current_read, memories[:, idx - 1, :])
class WriteUnit(nn.Module):
def __init__(self, common_dim, use_self_attention=False, use_memory_gate=False):
super(WriteUnit, self).__init__()
self.common_dim = common_dim
self.use_self_attention = use_self_attention
self.use_memory_gate = use_memory_gate
self.control_memory_transfomer = nn.Linear(2 * common_dim, common_dim) #Eq (w1)
if use_self_attention:
self.current_control_transformer = nn.Linear(common_dim, common_dim)
self.control_transformer = nn.Linear(common_dim, 1) #Eq (w2.1)
self.acc_memory_transformer = nn.Linear(common_dim, common_dim, bias=False)
self.pre_memory_transformer = nn.Linear(common_dim, common_dim) #Eq (w2.3)
if use_memory_gate:
self.gated_control_transformer = nn.Linear(common_dim, 1) #Eq (w3.1)
self.non_linear = nn.Sigmoid()
init_modules(self.modules())
def forward(self, memories, controls, current_read, idx):
#memories (N x num_cell x d), controls (N x num_cell x d), current_read (N x d), idx (int starting from 1)
prior_memory = memories[:,idx-1,:]
#Eq (w1)
res_memory = self.control_memory_transfomer( torch.cat([current_read, prior_memory], 1) ) #N x d
if self.use_self_attention:
current_control = controls[:,idx,:] # N x d
current_control = self.current_control_transformer(current_control) # N x d in code
if idx > 1:
#Eq (w2.1)
previous_controls = controls[:,1:idx,:] # N x (idx-1) x d
cscores = previous_controls * current_control.unsqueeze(1) # N x (idx-1) x d
cscores = self.control_transformer(cscores).squeeze(2) # N x (idx -1)
cscores = torch.exp(cscores - cscores.max(1, keepdim=True)[0]) # N x (idx -1)
cscores = cscores / cscores.sum(1, keepdim=True) # N x (idx -1)
#Eq (w2.2)
previous_memories = memories[:,1:idx,:] #N x (idx-1) x d
acc_memory = (previous_memories * cscores.unsqueeze(2)).sum(1) # N x d
#Eq (w2.3)
res_memory = self.acc_memory_transformer(acc_memory) + self.pre_memory_transformer(res_memory)
else:
#Eq (w2.3) as there is no m_i^{sa} in this case
res_memory = self.pre_memory_transformer(res_memory)
if self.use_memory_gate:
#Eq (w3.1)
gated_control = self.gated_control_transformer(controls[:,idx,:]) #N x 1
#Eq (w3.2)
gated_control = self.non_linear(gated_control) #-1)
res_memory = memories[:,idx-1,:] * gated_control + res_memory * (1. - gated_control)
return res_memory
class ReadUnit(nn.Module):
def __init__(self, common_dim, nonlinearity, read_dropout=0.):
super().__init__()
self.common_dim = common_dim
self.read_dropout = read_dropout
#Eq (r1)
self.pre_memory_transformer = nn.Linear(common_dim, common_dim)
self.image_element_transformer = nn.Linear(common_dim, common_dim)
#Eq (r2)
self.intermediate_transformer = nn.Linear(2 * common_dim, common_dim)
#self.intermediate_transformer_2 = nn.Linear(common_dim, common_dim)
#Eq (r3.1)
self.read_attention_transformer = nn.Linear(common_dim, 1)
self.non_linear = getattr(nn, nonlinearity)()
self.read_dropout_module = nn.Dropout(p=self.read_dropout)
init_modules(self.modules())
def forward(self, pre_memory, current_control, image,
memory_dropout=0., dropout_mask_memory=None, isTest=False):
#pre_memory(Nxd), current_control(Nxd), image(NxdxHxW)
image = image.transpose(1,2).transpose(2,3) #NXHxWxd
trans_image = image
if not isTest and memory_dropout > 0.:
assert dropout_mask_memory is not None
pre_memory = (pre_memory / (1. - memory_dropout)) * dropout_mask_memory
pre_memory = self.read_dropout_module(pre_memory)
trans_image = self.read_dropout_module(trans_image)
#Eq (r1)
trans_pre_memory = self.pre_memory_transformer(pre_memory) #Nxd
trans_image = self.image_element_transformer(trans_image) #NxHxWxd image
trans_pre_memory = trans_pre_memory.unsqueeze(1).unsqueeze(2).expand(trans_image.size()) #NxHxWxd
intermediate = trans_pre_memory * trans_image #NxHxWxd
#Eq (r2)
#trans_intermediate = self.intermediate_transformer(torch.cat([intermediate, image], 3)) #NxHxWxd
trans_intermediate = self.intermediate_transformer(torch.cat([intermediate, trans_image], 3)) #NxHxWxd
trans_intermediate = self.non_linear(trans_intermediate)
#trans_intermediate = self.intermediate_transformer_2(trans_intermediate)
#Eq (r3.1)
trans_current_control = current_control.unsqueeze(1).unsqueeze(2).expand(trans_intermediate.size()) #NxHxWxd
intermediate_score = trans_current_control * trans_intermediate
intermediate_score = self.non_linear(intermediate_score)
intermediate_score = self.read_dropout_module(intermediate_score)
scores = self.read_attention_transformer(intermediate_score).squeeze(3) #NxHxWx1 -> NxHxW
#Eq (r3.2): softmax
rscores = scores.view(scores.shape[0], -1) #N x (H*W)
rscores = torch.exp(rscores - rscores.max(1, keepdim=True)[0])
rscores = rscores / rscores.sum(1, keepdim=True)
scores = rscores.view(scores.shape) #NxHxW
#Eq (r3.3)
readrep = image * scores.unsqueeze(3)
readrep = readrep.view(readrep.shape[0], -1, readrep.shape[-1]) #N x (H*W) x d
readrep = readrep.sum(1) #N x d
return readrep, scores
class ControlUnit(nn.Module):
def __init__(self, common_dim, use_prior_control_in_control_unit=False):
super().__init__()
self.common_dim = common_dim
self.use_prior_control_in_control_unit = use_prior_control_in_control_unit
if use_prior_control_in_control_unit:
self.control_question_transformer = nn.Linear(2 * common_dim, common_dim) #Eq (c1)
self.score_transformer = nn.Linear(common_dim, 1) # Eq (c2.1)
init_modules(self.modules())
def forward(self, pre_control, question, context, mask):
#pre_control (Nxd), question (Nxd), context(NxLxd), mask(NxL)
#Eq (c1)
if self.use_prior_control_in_control_unit:
control_question = self.control_question_transformer(torch.cat([pre_control, question], 1)) # N x d
else:
control_question = question # N x d
#Eq (c2.1)
scores = self.score_transformer(context * control_question.unsqueeze(1)).squeeze(2) #NxLxd -> NxLx1 -> NxL
#Eq (c2.2) : softmax
scores = torch.exp(scores - scores.max(1, keepdim=True)[0]) * mask #mask help to eliminate null tokens
scores = scores / scores.sum(1, keepdim=True) #NxL
#Eq (c2.3)
control = (context * scores.unsqueeze(2)).sum(1) #Nxd
return control, scores
class InputUnit(nn.Module):
def __init__(self, common_dim):
super().__init__()
self.common_dim = common_dim
self.question_transformer = nn.Linear(common_dim, common_dim)
init_modules(self.modules())
def forward(self, question):
return self.question_transformer(question) #Section 2.1
def sincos_coord_map(shape, p_h=64., p_w=64.):
m, n = shape
x_coords = torch.zeros(m,n)
y_coords = torch.zeros(m,n)
for i in range(m):
for j in range(n):
icoord = i if i % 2 == 0 else i-1
jcoord = j if j % 2 == 0 else j-1
x_coords[i, j] = math.sin(1.0 * i / (10000. ** (1.0 * jcoord / p_h)))
y_coords[i, j] = math.cos(1.0 * j / (10000. ** (1.0 * icoord / p_w)))
x_coords = torch.Tensor(x_coords).to(device).unsqueeze(0)
y_coords = torch.Tensor(y_coords).to(device).unsqueeze(0)
return Variable(torch.cat([x_coords, y_coords], 0))
def init_modules(modules, init='uniform'):
if init.lower() == 'normal':
init_params = xavier_normal
elif init.lower() == 'uniform':
init_params = xavier_uniform
else:
return
for m in modules:
if isinstance(m, (nn.Conv2d, nn.Linear)):
init_params(m.weight)
if m.bias is not None: constant(m.bias, 0.)
| 21,616
| 37.809695
| 119
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/convlstm.py
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
from torch.autograd import Variable
from vr.models.layers import (build_classifier,
build_stem,
init_modules)
class ConvLSTM(nn.Module):
def __init__(self,
vocab,
feature_dim=[3, 64, 64],
stem_dim=128,
module_dim=128,
stem_num_layers=2,
stem_batchnorm=True,
stem_kernel_size=3,
stem_stride=1,
stem_padding=None,
stem_feature_dim=24,
stem_subsample_layers=None,
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
rnn_hidden_dim=128,
**kwargs):
super().__init__()
# initialize stem
self.stem = build_stem(feature_dim[0],
stem_dim,
module_dim,
num_layers=stem_num_layers,
with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size,
stride=stem_stride,
padding=stem_padding,
subsample_layers=stem_subsample_layers)
tmp = self.stem(Variable(torch.zeros([1] + feature_dim)))
_, F, H, W = tmp.size()
# initialize classifier
# TODO(mnoukhov): fix this for >1 layer RNN
question_dim = rnn_hidden_dim
image_dim = F*H*W
num_answers = len(vocab['answer_idx_to_token'])
self.classifier = build_classifier(image_dim + question_dim,
1,
1,
num_answers,
classifier_fc_layers,
None,
None,
classifier_batchnorm,
classifier_dropout)
init_modules(self.modules())
def forward(self, image, question):
# convert image to features
img_feats = self.stem(image) # N x F x H x W
img_feats = img_feats.view(img_feats.size(0), -1) # N x F*H*W
# get hidden state from question
_, q_feats, _ = question # N x Q
# concatenate feats
feats = torch.cat([img_feats, q_feats], dim=1) # N x F*H*W+Q
# pass through classifier
out = self.classifier(feats)
return out
| 2,782
| 35.142857
| 73
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/__init__.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
from vr.models.module_net import ModuleNet
from vr.models.filmed_net import FiLMedNet
from vr.models.seq2seq import Seq2Seq
from vr.models.seq2seq_att import Seq2SeqAtt
from vr.models.film_gen import FiLMGen
from vr.models.maced_net import MAC
from vr.models.baselines import LstmModel, CnnLstmModel, CnnLstmSaModel
from vr.models.convlstm import ConvLSTM
| 710
| 40.823529
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/film_gen.py
|
#!/usr/bin/env python3
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from vr.models.layers import init_modules
from torch.nn.init import uniform_, xavier_uniform_, constant_
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FiLMGen(nn.Module):
def __init__(self,
null_token=0,
start_token=1,
end_token=2,
encoder_embed=None,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=200,
hidden_dim=512,
rnn_num_layers=1,
rnn_dropout=0,
output_batchnorm=False,
bidirectional=False,
encoder_type='gru',
decoder_type='linear',
gamma_option='linear',
gamma_baseline=1,
num_modules=4,
module_num_layers=1,
module_dim=128,
parameter_efficient=False,
debug_every=float('inf'),
taking_context=False,
variational_embedding_dropout=0.,
embedding_uniform_boundary=0.,
use_attention=False,
):
super(FiLMGen, self).__init__()
self.use_attention = use_attention
self.taking_context = taking_context
if self.use_attention:
#if we want to use attention, the full context should be computed
self.taking_context = True
if self.taking_context:
#if we want to use the full context, it makes sense to use bidirectional modeling.
bidirectional = True
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.output_batchnorm = output_batchnorm
self.bidirectional = bidirectional
self.num_dir = 2 if self.bidirectional else 1
self.gamma_option = gamma_option
self.gamma_baseline = gamma_baseline
self.num_modules = num_modules
self.module_num_layers = module_num_layers
self.module_dim = module_dim
self.debug_every = debug_every
self.NULL = null_token
self.START = start_token
self.END = end_token
self.variational_embedding_dropout = variational_embedding_dropout
if self.bidirectional: # and not self.taking_context:
if decoder_type != 'linear':
raise(NotImplementedError)
hidden_dim = (int) (hidden_dim / self.num_dir)
self.func_list = {
'linear': None,
'sigmoid': F.sigmoid,
'tanh': F.tanh,
'exp': torch.exp,
}
self.cond_feat_size = 2 * self.module_dim * self.module_num_layers # FiLM params per ResBlock
if not parameter_efficient: # parameter_efficient=False only used to load older trained models
self.cond_feat_size = 4 * self.module_dim + 2 * self.num_modules
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = init_rnn(self.encoder_type, wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=self.bidirectional)
self.decoder_rnn = init_rnn(self.decoder_type, hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=self.bidirectional)
if self.taking_context:
self.decoder_linear = None #nn.Linear(2 * hidden_dim, hidden_dim)
for n, p in self.encoder_rnn.named_parameters():
if n.startswith('weight'): xavier_uniform_(p)
elif n.startswith('bias'): constant_(p, 0.)
else:
self.decoder_linear = nn.Linear(hidden_dim * self.num_dir, self.num_modules * self.cond_feat_size)
if self.use_attention:
# Florian Strub used Tanh here, but let's use identity to make this model
# closer to the baseline film version
#Need to change this if we want a different mechanism to compute attention weights
attention_dim = self.module_dim
self.context2key = nn.Linear(hidden_dim * self.num_dir, self.module_dim)
# to transform control vector to film coefficients
self.last_vector2key = []
self.decoders_att = []
for i in range(num_modules):
mod = nn.Linear(hidden_dim * self.num_dir, attention_dim)
self.add_module("last_vector2key{}".format(i), mod)
self.last_vector2key.append(mod)
mod = nn.Linear(hidden_dim * self.num_dir, 2*self.module_dim)
self.add_module("decoders_att{}".format(i), mod)
self.decoders_att.append(mod)
if self.output_batchnorm:
self.output_bn = nn.BatchNorm1d(self.cond_feat_size, affine=True)
init_modules(self.modules())
if embedding_uniform_boundary > 0.:
uniform_(self.encoder_embed.weight, -1.*embedding_uniform_boundary, embedding_uniform_boundary)
# The attention scores will be saved here if the attention is used.
self.scores = None
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.cond_feat_size
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
H_full = self.encoder_rnn.hidden_size * self.num_dir
L = self.encoder_rnn.num_layers * self.num_dir
N = x.size(0) if x is not None else None
T_in = x.size(1) if x is not None else None
T_out = self.num_modules
return V_in, V_out, D, H, H_full, L, N, T_in, T_out
def before_rnn(self, x, replace=0):
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
#mask to specify non-null tokens
mask = torch.FloatTensor(N, T).zero_()
# Find the last non-null element in each sequence.
x_cpu = x.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu.data[i, t] != self.NULL and x_cpu.data[i, t + 1] == self.NULL:
idx[i] = t
break
for i in range(N):
for t in range(T):
if x_cpu.data[i, t] not in [self.NULL]:
mask[i, t] = 1.
idx = idx.type_as(x.data)
x[x.data == self.NULL] = replace
return x, idx, mask.to(device)
def encoder(self, x, isTest=False):
V_in, V_out, D, H, H_full, L, N, T_in, T_out = self.get_dims(x=x)
x, idx, mask = self.before_rnn(x) # Tokenized word sequences (questions), end index
if self.taking_context:
lengths = torch.LongTensor(idx.shape).fill_(1) + idx.data.cpu()
lengths = lengths.to(device)
seq_lengths, perm_idx = lengths.sort(0, descending=True)
iperm_idx = torch.LongTensor(perm_idx.shape).fill_(0).to(device)
for i, v in enumerate(perm_idx):
iperm_idx[v.data] = i
x = x[perm_idx]
embed = self.encoder_embed(x)
h0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
if self.encoder_type == 'lstm':
c0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
if self.variational_embedding_dropout > 0. and not isTest:
varDrop = torch.Tensor(N, D).fill_(self.variational_embedding_dropout).bernoulli_().to(device)
embed = (embed / (1. - self.variational_embedding_dropout)) * varDrop.unsqueeze(1)
if self.taking_context:
embed = pack_padded_sequence(embed, seq_lengths.data.cpu().numpy(), batch_first=True)
if self.encoder_type == 'lstm':
out, (hn, _) = self.encoder_rnn(embed, (h0, c0))
elif self.encoder_type == 'gru':
out, hn = self.encoder_rnn(embed, h0)
hn = hn.transpose(1,0).contiguous()
hn = hn.view(hn.shape[0], -1)
# Pull out the hidden state for the last non-null value in each input
if self.taking_context:
idx_out = None
out, _ = pad_packed_sequence(out, batch_first=True)
out = out[iperm_idx]
if out.shape[1] < T_in:
out = F.pad(out, (0, 0, 0, T_in - out.shape[1], 0, 0))
#mask = mask[:, :(out.shape[1]-T_in)] #The packing truncate the original length so we need to change mask to fit it
hn = hn[iperm_idx]
else:
idx = idx.view(N, 1, 1).expand(N, 1, H_full)
idx_out = out.gather(1, idx).view(N, H_full)
out = None
hn = None
return idx_out, out, hn, mask
def decoder(self, encoded, dims, h0=None, c0=None):
#if self.taking_context:
# return self.decoder_linear(encoded)
V_in, V_out, D, H, H_full, L, N, T_in, T_out = dims
if self.decoder_type == 'linear':
# (N x H) x (H x T_out*V_out) -> (N x T_out*V_out) -> N x T_out x V_out
return self.decoder_linear(encoded).view(N, T_out, V_out), (None, None)
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
if not h0:
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
if self.decoder_type == 'lstm':
if not c0:
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn(encoded_repeat, (h0, c0))
elif self.decoder_type == 'gru':
ct = None
rnn_output, ht = self.decoder_rnn(encoded_repeat, h0)
rnn_output_2d = rnn_output.contiguous().view(N * T_out, H)
linear_output = self.decoder_linear(rnn_output_2d)
if self.output_batchnorm:
linear_output = self.output_bn(linear_output)
output_shaped = linear_output.view(N, T_out, V_out)
return output_shaped, (ht, ct)
def attention_decoder(self, context, last_vector, mask):
context_keys = self.context2key(context)
out = []
self.scores = []
for i in range(self.num_modules):
# vanilla dot-product attention in the key space
query = self.last_vector2key[i](last_vector)
scores = (context_keys * query.unsqueeze(1)).sum(2) #NxLxd -> NxL
# softmax
scores = torch.exp(scores - scores.max(1, keepdim=True)[0]) * mask #mask help to eliminate padding words
scores = scores / scores.sum(1, keepdim=True) #NxL
self.scores.append(scores)
control = (context * scores.unsqueeze(2)).sum(1) #Nxd
coefficients = self.decoders_att[i](control).unsqueeze(1) #Nxd -> Nx2d -> Nx1x2d
out.append(coefficients)
self.scores = torch.cat([t.unsqueeze(0) for t in self.scores], 0)
if len(out) == 0: return None
if len(out) == 1: return out[0]
return torch.cat(out, 1) #N x num_module x 2d
def forward(self, x, isTest=False):
if self.debug_every <= -2:
pdb.set_trace()
encoded, whole_context, last_vector, mask = self.encoder(x, isTest=isTest)
if self.taking_context and not self.use_attention:
#whole_context = self.decoder(whole_context, None)
return (whole_context, last_vector, mask)
if self.use_attention: #make sure taking_context is True as well if we want to use this.
film_pre_mod = self.attention_decoder(whole_context, last_vector, mask)
else:
film_pre_mod, _ = self.decoder(encoded, self.get_dims(x=x))
film = self.modify_output(film_pre_mod, gamma_option=self.gamma_option,
gamma_shift=self.gamma_baseline)
return film
def modify_output(self, out, gamma_option='linear', gamma_scale=1, gamma_shift=0,
beta_option='linear', beta_scale=1, beta_shift=0):
gamma_func = self.func_list[gamma_option]
beta_func = self.func_list[beta_option]
gs = []
bs = []
for i in range(self.module_num_layers):
gs.append(slice(i * (2 * self.module_dim), i * (2 * self.module_dim) + self.module_dim))
bs.append(slice(i * (2 * self.module_dim) + self.module_dim, (i + 1) * (2 * self.module_dim)))
if gamma_func is not None:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = gamma_func(out[:,:,gs[i]])
if gamma_scale != 1:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = out[:,:,gs[i]] * gamma_scale
if gamma_shift != 0:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = out[:,:,gs[i]] + gamma_shift
if beta_func is not None:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = beta_func(out[:,:,bs[i]])
out[:,:,b2] = beta_func(out[:,:,b2])
if beta_scale != 1:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = out[:,:,bs[i]] * beta_scale
if beta_shift != 0:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = out[:,:,bs[i]] + beta_shift
return out
def init_rnn(rnn_type, hidden_dim1, hidden_dim2, rnn_num_layers,
dropout=0, bidirectional=False):
if rnn_type == 'gru':
return nn.GRU(hidden_dim1, hidden_dim2, rnn_num_layers, dropout=dropout,
batch_first=True, bidirectional=bidirectional)
elif rnn_type == 'lstm':
return nn.LSTM(hidden_dim1, hidden_dim2, rnn_num_layers, dropout=dropout,
batch_first=True, bidirectional=bidirectional)
elif rnn_type == 'linear':
return None
else:
print('RNN type ' + str(rnn_type) + ' not yet implemented.')
raise(NotImplementedError)
| 13,957
| 39.34104
| 131
|
py
|
CLOSURE
|
CLOSURE-master/vr/models/module_net.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
from vr.models.layers import (
init_modules, ResidualBlock, GlobalAveragePool, Flatten,
build_classifier, build_stem, ConcatBlock, SimpleConcatBlock)
import vr.programs
from torch.nn.init import kaiming_normal, kaiming_uniform, xavier_uniform, xavier_normal, constant
from torch.autograd import Function
from vr.models.filmed_net import FiLM, FiLMedResBlock, ConcatFiLMedResBlock, coord_map, SharedFiLMedModule, FiLMModule
from vr.models.maced_net import MACControl
class ModuleNet(nn.Module):
def __init__(self, vocab, feature_dim,
use_film,
use_simple_block,
stem_num_layers,
stem_batchnorm,
stem_subsample_layers,
stem_kernel_size,
stem_stride,
stem_padding,
stem_dim,
module_dim,
module_pool,
module_use_gammas,
module_kernel_size,
module_input_proj,
module_residual=True,
module_batchnorm=False,
module_num_layers=1,
mod_id_loss=False,
kl_loss=False,
learn_control=False,
rnn_dim=None,
classifier_proj_dim=512,
classifier_downsample='maxpool2',
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
discriminator_proj_dim=None,
discriminator_downsample=None,
discriminator_fc_layers=None,
discriminator_dropout=None,
verbose=True,
type_anonymizer=False):
super(ModuleNet, self).__init__()
if discriminator_proj_dim is None:
discriminator_proj_dim = classifier_proj_dim
if discriminator_downsample is None:
discriminator_downsample = classifier_downsample
if discriminator_fc_layers is None:
discriminator_fc_layers = classifier_fc_layers
if discriminator_dropout is None:
discriminator_dropout = classifier_dropout
self.module_dim = module_dim
self.use_film = use_film
self.use_simple_block = use_simple_block
self.mod_id_loss = mod_id_loss
self.kl_loss = kl_loss
self.learn_control = learn_control
self.stem = build_stem(feature_dim[0], stem_dim, module_dim,
num_layers=stem_num_layers,
subsample_layers=stem_subsample_layers,
kernel_size=stem_kernel_size,
padding=stem_padding,
with_batchnorm=stem_batchnorm)
tmp = self.stem(Variable(torch.zeros([1, feature_dim[0], feature_dim[1], feature_dim[2]])))
module_H = tmp.size(2)
module_W = tmp.size(3)
self.coords = coord_map((module_H, module_W))
if verbose:
print('Here is my stem:')
print(self.stem)
classifier_kwargs = dict(module_C=module_dim, module_H=module_H, module_W=module_W,
num_answers=len(vocab['answer_idx_to_token']),
fc_dims=classifier_fc_layers,
proj_dim=classifier_proj_dim,
downsample=classifier_downsample,
with_batchnorm=classifier_batchnorm,
dropout=classifier_dropout)
discriminator_kwargs = dict(module_C=module_dim, module_H=module_H, module_W=module_W,
num_answers=len(vocab['program_idx_to_token']),
fc_dims=discriminator_fc_layers,
proj_dim=discriminator_proj_dim,
downsample=discriminator_downsample,
with_batchnorm=False,
dropout=discriminator_dropout)
if self.use_film:
classifier_kwargs['module_H'] = 1
classifier_kwargs['module_W'] = 1
discriminator_kwargs['module_H'] = 1
discriminator_kwargs['module_W'] = 1
self.classifier = build_classifier(**classifier_kwargs)
if self.mod_id_loss:
self.module_identifier = build_classifier(**discriminator_kwargs)
if verbose:
print('Here is my classifier:')
print(self.classifier)
self.function_modules = {}
self.function_modules_num_inputs = {}
self.vocab = vocab
shared_block = None
if type_anonymizer:
shared_block = ResidualBlock(module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm)
elif use_film == 1:
assert module_W == module_H
shared_block = SharedFiLMedModule(module_dim,
kernel_size=module_kernel_size,
num_layers=module_num_layers,
with_residual=module_residual,
pool=module_pool,
use_gammas=module_use_gammas,
post_linear=kl_loss,
learn_embeddings=not learn_control)
if shared_block:
self.shared_block = shared_block
self.add_module('shared', shared_block)
for fn_str, fn_idx in vocab['program_token_to_idx'].items():
num_inputs = vocab['program_token_arity'][fn_str]
self.function_modules_num_inputs[fn_str] = num_inputs
def create_module():
if num_inputs > 2:
raise Exception('Not implemented!')
if use_film == 1:
return FiLMModule(shared_block, fn_idx)
if use_film == 2:
separate_core_block = SharedFiLMedModule(module_dim, module_W,
kernel_size=module_kernel_size,
with_residual=module_residual)
return FiLMModule(separate_core_block, fn_idx)
if use_simple_block:
# brutally simple concatentation block
# with 2 layers, no residual connection
return SimpleConcatBlock(
module_dim,
kernel_size=module_kernel_size)
if num_inputs in [0, 1]:
return ResidualBlock(
module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm,
shared_block=shared_block,
post_linear=kl_loss)
else:
return ConcatBlock(
module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm,
shared_block=shared_block,
post_linear=kl_loss)
mod = create_module()
if mod is not None:
self.add_module(fn_str, mod)
self.function_modules[fn_str] = mod
self.save_module_outputs = False
self.noise_enabled = True
if learn_control:
self.controller = MACControl(30, rnn_dim, module_dim)
def _forward_modules_ints_helper(self, feats, program, i, j, module_outputs):
used_fn_j = True
orig_j = j
if j < program.size(1):
fn_idx = program.data[i, j]
fn_str = self.vocab['program_idx_to_token'][fn_idx.item()]
else:
used_fn_j = False
fn_str = 'scene'
if fn_str == '<NULL>':
used_fn_j = False
fn_str = 'scene'
elif fn_str == '<START>':
used_fn_j = False
return self._forward_modules_ints_helper(feats, program, i, j + 1, module_outputs)
if used_fn_j:
self.used_fns[i, j] = 1
j += 1
num_inputs = self.function_modules_num_inputs[fn_str]
if fn_str == 'scene':
num_inputs = 1
module = self.function_modules[fn_str]
if fn_str == 'scene':
module_inputs = [feats[i:i+1]]
else:
module_inputs = []
while len(module_inputs) < num_inputs:
cur_input, j = self._forward_modules_ints_helper(feats, program, i, j, module_outputs)
module_inputs.append(cur_input)
if self.use_film:
module_inputs = [feats[i:i+1]] + module_inputs
if self.use_simple_block:
# simple block must have 3 inputs
if len(module_inputs) < 2:
module_inputs.append(torch.zeros_like(module_inputs[0]))
module_inputs = [feats[i:i+1]] + module_inputs
module_output = module(*module_inputs)
if self.kl_loss:
mu = module_output[:, :self.module_dim]
logvar = module_output[:, self.module_dim:] - 5
logvar = torch.min(logvar, torch.ones_like(logvar))
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(logvar) if self.noise_enabled else 0
module_output = mu + std * eps
self._mus.append(mu)
self._logvars.append(logvar)
# a module is uniquely identified by an (i, orig_j)
if used_fn_j:
module_outputs[(i, orig_j)] = module_output
return module_output, j
def _forward_modules_ints(self, feats, program):
"""
feats: FloatTensor of shape (N, C, H, W) giving features for each image
program: LongTensor of shape (N, L) giving a prefix-encoded program for
each image.
"""
N = feats.size(0)
final_module_outputs = []
self.used_fns = torch.Tensor(program.size()).fill_(0)
module_outputs = {}
for i in range(N):
cur_output, _ = self._forward_modules_ints_helper(feats, program, i, 0, module_outputs)
final_module_outputs.append(cur_output)
final_module_outputs = torch.cat(final_module_outputs, 0)
self.used_fns = self.used_fns.type_as(program.data).float()
return final_module_outputs, module_outputs
def _forward_batch(self, feats, program, question, save_activations=False):
cur = None
batch_size = program.shape[0]
max_program_len = program.shape[1]
stacks = [[] for j in range(batch_size)]
program_wellformed = torch.ones(batch_size, dtype=torch.bool)
zero_inp = torch.zeros_like(feats)[:, :, 0, 0]
memory = zero_inp[None, :]
if question is not None:
controls, control_scores = self.controller(question)
assert max_program_len <= controls.shape[1]
lengths = (program > 0).sum(1)
new_controls = []
for j, leng in zip(range(batch_size), lengths):
#shift controls so that the last control goes to the first module
new_controls.append(
torch.cat([controls[j, -leng:],
torch.zeros((max_program_len - leng, controls.shape[2]),
device=controls.device)],
0))
controls = torch.cat([c[None, :] for c in new_controls], 0)
# skip <START> at the position 0
for i in reversed(range(1, max_program_len)):
fn_names = [self.vocab['program_idx_to_token'][program[j, i].item()]
for j in range(batch_size)]
mask = torch.ones_like(program[:, 0])
for j in range(batch_size):
if fn_names[j] in ['<END>', '<NULL>']:
mask[j] = 0
num_inputs = [self.function_modules_num_inputs[fn_name] if mask[j] else 0
for j, fn_name in enumerate(fn_names)]
# prepare inputs
input_indices = [[max_program_len, max_program_len] for j in range(batch_size)]
for j in range(batch_size):
for k in range(num_inputs[j]):
if stacks[j]:
input_indices[j][k] = stacks[j].pop()
else:
program_wellformed[j] = False
inputs = []
for k in range(2):
indices = [input_indices[j][k] - i - 1 for j in range(batch_size)]
inputs.append(memory[indices, range(batch_size)])
# run the batched compute
control_i = controls[:, i] if question else program[:, i]
cur = self.shared_block(feats, control_i, inputs[0], inputs[1])
memory = torch.cat([cur[None, :], memory])
# push the new results onto the stack
for j in range(batch_size):
if mask[j]:
stacks[j].append(i)
for j in range(batch_size):
if len(stacks[j]) != 1:
program_wellformed[j] = False
if save_activations and self.learn_control:
self.control_scores = control_scores
return cur, program_wellformed
def forward(self, x, program, save_activations=False, question=None):
N = x.size(0)
assert N == len(program)
feats = self.stem(x)
program_wellformed = None
if self.use_film == 1:
final_module_outputs, program_wellformed = self._forward_batch(
feats, program, question=question if self.learn_control else None,
save_activations=save_activations)
#check = self._forward_modules_ints(feats, program)
#print(abs(final_module_outputs - check[0]).sum())
else:
final_module_outputs, _ = self._forward_modules_ints(feats, program)
scores = self.classifier(final_module_outputs)
return scores, program_wellformed, None
| 15,132
| 40.803867
| 138
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/parser.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from . import create_seq2seq_net, TrainOptions
class Seq2seqParser(nn.Module):
"""Model interface for seq2seq parser"""
def __init__(self, vocab):
super().__init__()
self.opt = TrainOptions().parse()
self.vocab = vocab
self.net_params = self._get_net_params(self.opt, self.vocab)
self.seq2seq = create_seq2seq_net(**self.net_params)
self.variable_lengths = self.net_params['variable_lengths']
self.end_id = self.net_params['end_id']
#self.gpu_ids = opt.gpu_ids
self.criterion = nn.NLLLoss()
def set_input(self, x, y=None):
input_lengths, idx_sorted = None, None
if self.variable_lengths:
x, y, input_lengths, idx_sorted = self._sort_batch(x, y)
#self.x = self._to_var(x)
#if y is not None:
#self.y = self._to_var(y)
#else:
#self.y = None
self.x = x
self.y = y
self.input_lengths = input_lengths
self.idx_sorted = idx_sorted
def log_likelihood(self, x, y):
self.set_input(x, y)
assert self.y is not None, 'Must set y value'
output_logprob = self.seq2seq(self.x, self.y, self.input_lengths)
loss = self.criterion(output_logprob[:,:-1,:].contiguous().view(-1, output_logprob.size(2)),
self.y[:,1:].contiguous().view(-1))
return loss
def forward(self, x, argmax=False):
self.set_input(x)
rl_seq, logprobs = self.seq2seq.reinforce_forward(self.x, self.input_lengths, argmax=argmax)
rl_seq = self._restore_order(rl_seq.data.cpu())
logprobs = self._restore_order(logprobs)
self.reward = None # Need to recompute reward from environment each time a new sequence is sampled
return rl_seq.to(x.device), logprobs
def reinforce_backward(self, entropy_factor=0.0):
assert self.reward is not None, 'Must run forward sampling and set reward before REINFORCE'
self.seq2seq.reinforce_backward(self.reward, entropy_factor)
def parse(self):
output_sequence = self.seq2seq.sample_output(self.x, self.input_lengths)
output_sequence = self._restore_order(output_sequence.data.cpu())
return output_sequence
def _get_net_params(self, opt, vocab):
net_params = {
'input_vocab_size': len(vocab['question_token_to_idx']),
'output_vocab_size': len(vocab['program_token_to_idx']),
'hidden_size': opt.hidden_size,
'word_vec_dim': opt.word_vec_dim,
'n_layers': opt.n_layers,
'bidirectional': opt.bidirectional,
'variable_lengths': opt.variable_lengths,
'use_attention': opt.use_attention,
'encoder_max_len': opt.encoder_max_len,
'decoder_max_len': opt.decoder_max_len,
'start_id': opt.start_id,
'end_id': opt.end_id,
'word2vec_path': opt.word2vec_path,
'fix_embedding': opt.fix_embedding,
}
return net_params
def _sort_batch(self, x, y):
_, lengths = torch.eq(x, self.end_id).max(1)
lengths += 1
lengths_sorted, idx_sorted = lengths.sort(0, descending=True)
x_sorted = x[idx_sorted]
y_sorted = None
if y is not None:
y_sorted = y[idx_sorted]
lengths_list = lengths_sorted.cpu().numpy()
return x_sorted, y_sorted, lengths_list, idx_sorted
def _restore_order(self, x):
if self.idx_sorted is not None:
inv_idxs = self.idx_sorted.clone()
inv_idxs.scatter_(0, self.idx_sorted,
torch.arange(x.size(0)).to(inv_idxs.device).long())
return x[inv_idxs]
return x
def _to_var(self, x):
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def _to_numpy(self, x):
return x.data.cpu().numpy().astype(float)
| 4,059
| 37.301887
| 106
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/base_rnn.py
|
import torch.nn as nn
class BaseRNN(nn.Module):
"""Base RNN module"""
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p,
dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.dropout_p = dropout_p
if rnn_cell == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError('Unsupported RNN Cell: %s' % rnn_cell)
self.input_dropout = nn.Dropout(p=input_dropout_p)
def forward(self, *args, **kwargs):
raise NotImplementedError()
| 829
| 28.642857
| 74
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/train_options.py
|
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""Train option class"""
def __init__(self):
super(TrainOptions, self).__init__()
# Data
self.parser.add_argument('--max_train_samples', default=None, type=int, help='max number of training samples')
self.parser.add_argument('--max_val_samples', default=10000, type=int, help='max number of val samples')
# Model
self.parser.add_argument('--load_checkpoint_path', default=None, type=str, help='checkpoint path')
self.parser.add_argument('--encoder_max_len', default=50, type=int, help='max length of input sequence')
self.parser.add_argument('--decoder_max_len', default=27, type=int, help='max length of output sequence')
self.parser.add_argument('--hidden_size', default=256, type=int, help='hidden layer dimension')
self.parser.add_argument('--word_vec_dim', default=300, type=int, help='dimension of word embedding vector')
self.parser.add_argument('--input_dropout_p', default=0., type=float, help='dropout probability for input sequence')
self.parser.add_argument('--dropout_p', default=0., type=float, help='dropout probability for output sequence')
self.parser.add_argument('--n_layers', default=2, type=int, help='number of hidden layers')
self.parser.add_argument('--rnn_cell', default='lstm', type=str, help='encoder rnn cell type, options: lstm, gru')
self.parser.add_argument('--bidirectional', default=True, type=int, help='bidirectional encoder')
self.parser.add_argument('--variable_lengths', default=1, type=int, help='variable input length')
self.parser.add_argument('--use_attention', default=1, type=int, help='use attention in decoder')
self.parser.add_argument('--use_input_embedding', default=0, type=int, help='use pretrained word embedding for input sentences')
self.parser.add_argument('--fix_input_embedding', default=0, type=int, help='fix word embedding for input sentences')
self.parser.add_argument('--start_id', default=1, type=int, help='id for start token')
self.parser.add_argument('--end_id', default=2, type=int, help='id for end token')
self.parser.add_argument('--null_id', default=0, type=int, help='id for null token')
self.parser.add_argument('--word2vec_path', default=None, type=str, help='pretrained embedding path')
self.parser.add_argument('--fix_embedding', default=0, type=int, help='fix pretrained embedding')
# Training
self.parser.add_argument('--reinforce', default=0, type=int, help='train reinforce')
self.parser.add_argument('--batch_size', default=64, type=int, help='batch size')
self.parser.add_argument('--learning_rate', default=7e-4, type=float, help='learning rate')
self.parser.add_argument('--entropy_factor', default=0.0, type=float, help='entropy weight in reinforce loss')
self.parser.add_argument('--num_iters', default=20000, type=int, help='total number of iterations')
self.parser.add_argument('--reward_decay', default=0.9, type=float, help='decay weight for reward moving average')
self.parser.add_argument('--display_every', default=20, type=int, help='display every')
self.parser.add_argument('--checkpoint_every', default=1000, type=int, help='validate and save checkpoint every')
self.parser.add_argument('--visualize_training', default=0, type=int, help='visualize training with tensorboard')
self.is_train = True
| 3,556
| 79.840909
| 136
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/base_options.py
|
import os
import argparse
import numpy as np
import torch
class BaseOptions():
"""Base option class"""
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--run_dir', default='_scratch/test_run', type=str, help='experiment directory')
self.parser.add_argument('--dataset', default='clevr', type=str, help='select dataset, options: clevr, clevr-humans')
# Dataloader
self.parser.add_argument('--shuffle', default=1, type=int, help='shuffle dataset')
self.parser.add_argument('--num_workers', default=1, type=int, help='number of workers for loading data')
# Run
self.parser.add_argument('--manual_seed', default=None, type=int, help='manual seed')
self.parser.add_argument('--gpu_ids', default='0', type=str, help='ids of gpu to be used')
self.parser.add_argument('--visualize', default=0, type=int, help='visualize experiment')
# Dataset catalog
# - CLEVR
self.parser.add_argument('--clevr_train_scene_path', default='../data/raw/CLEVR_v1.0/scenes/CLEVR_train_scenes.json',
type=str, help='path to clevr train scenes')
self.parser.add_argument('--clevr_val_scene_path', default='../data/raw/CLEVR_v1.0/scenes/CLEVR_val_scenes.json',
type=str, help='path to clevr val scenes')
self.parser.add_argument('--clevr_train_question_path', default='../data/reason/clevr_h5/clevr_train_questions.h5',
type=str, help='path to clevr train questions')
self.parser.add_argument('--clevr_val_question_path', default='../data/reason/clevr_h5/clevr_val_questions.h5',
type=str, help='path to clevr val questions')
self.parser.add_argument('--clevr_vocab_path', default='../data/reason/clevr_h5/clevr_vocab.json',
type=str, help='path to clevr vocab')
def parse(self):
# Instantiate option
self.opt = self.parser.parse_args([])
# Parse gpu id list
str_gpu_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_gpu_ids:
if str_id.isdigit() and int(str_id) >= 0:
self.opt.gpu_ids.append(int(str_id))
if len(self.opt.gpu_ids) > 0 and torch.cuda.is_available():
torch.cuda.set_device(self.opt.gpu_ids[0])
else:
print('| using cpu')
self.opt.gpu_ids = []
# Set manual seed
if self.opt.manual_seed is not None:
torch.manual_seed(self.opt.manual_seed)
if len(self.opt.gpu_ids) > 0 and torch.cuda.is_available():
torch.cuda.manual_seed(self.opt.manual_seed)
# Print and save options
args = vars(self.opt)
print('| options')
for k, v in args.items():
print('%s: %s' % (str(k), str(v)))
if not os.path.isdir(self.opt.run_dir):
os.makedirs(self.opt.run_dir)
if self.is_train:
file_path = os.path.join(self.opt.run_dir, 'train_opt.txt')
else:
file_path = os.path.join(self.opt.run_dir, 'test_opt.txt')
with open(file_path, 'wt') as fout:
fout.write('| options\n')
for k, v in args.items():
fout.write('%s: %s\n' % (str(k), str(v)))
return self.opt
| 3,439
| 45.486486
| 125
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/seq2seq.py
|
import torch
import torch.nn as nn
class Seq2seq(nn.Module):
"""Seq2seq model module
To do: add docstring to methods
"""
def __init__(self, encoder, decoder):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x, y, input_lengths=None):
encoder_outputs, encoder_hidden = self.encoder(x, input_lengths)
decoder_outputs, decoder_hidden = self.decoder(y, encoder_outputs, encoder_hidden)
return decoder_outputs
def reinforce_forward(self, x, input_lengths=None, argmax=False):
encoder_outputs, encoder_hidden = self.encoder(x, input_lengths)
output_symbols, output_logprobs = self.decoder.forward_sample(encoder_outputs, encoder_hidden, reinforce_sample=not argmax)
return (torch.stack(output_symbols).transpose(0, 1),
torch.stack(output_logprobs).transpose(0, 1))
def reinforce_backward(self, reward, entropy_factor=0.0):
assert self.output_logprobs is not None and self.output_symbols is not None, 'must call reinforce_forward first'
losses = []
grad_output = []
for i, symbol in enumerate(self.output_symbols):
if len(self.output_symbols[0].shape) == 1:
loss = - torch.diag(torch.index_select(self.output_logprobs[i], 1, symbol)).sum()*reward \
+ entropy_factor*(self.output_logprobs[i]*torch.exp(self.output_logprobs[i])).sum()
else:
loss = - self.output_logprobs[i]*reward
losses.append(loss.sum())
grad_output.append(None)
torch.autograd.backward(losses, grad_output, retain_graph=True)
| 1,700
| 42.615385
| 131
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/utils.py
|
import os
import json
import numpy as np
import torch
def mkdirs(paths):
if isinstance(paths, list):
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
else:
if not os.path.exists(paths):
os.makedirs(paths)
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_scenes(scenes_json):
with open(scenes_json) as f:
scenes_dict = json.load(f)['scenes']
scenes = []
for s in scenes_dict:
table = []
for i, o in enumerate(s['objects']):
item = {}
item['id'] = '%d-%d' % (s['image_index'], i)
if '3d_coords' in o:
item['position'] = [np.dot(o['3d_coords'], s['directions']['right']),
np.dot(o['3d_coords'], s['directions']['front']),
o['3d_coords'][2]]
else:
item['position'] = o['position']
item['color'] = o['color']
item['material'] = o['material']
item['shape'] = o['shape']
item['size'] = o['size']
table.append(item)
scenes.append(table)
return scenes
def load_embedding(path):
return torch.Tensor(np.load(path))
| 2,041
| 31.935484
| 85
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/encoder.py
|
import torch.nn as nn
from .base_rnn import BaseRNN
class Encoder(BaseRNN):
"""Encoder RNN module"""
def __init__(self, vocab_size, max_len, word_vec_dim, hidden_size, n_layers,
input_dropout_p=0., dropout_p=0., bidirectional=False, rnn_cell='lstm',
variable_lengths=False, word2vec=None, fix_embedding=False):
super(Encoder, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
if word2vec is not None:
assert word2vec.size(0) == vocab_size
self.word_vec_dim = word2vec.size(1)
self.embedding = nn.Embedding(vocab_size, self.word_vec_dim)
self.embedding.weight = nn.Parameter(word2vec)
else:
self.word_vec_dim = word_vec_dim
self.embedding = nn.Embedding(vocab_size, word_vec_dim)
if fix_embedding:
self.embedding.weight.requires_grad = False
self.rnn = self.rnn_cell(self.word_vec_dim, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
"""
To do: add input, output dimensions to docstring
"""
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
output, hidden = self.rnn(embedded)
if self.variable_lengths:
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return output, hidden
| 1,726
| 44.447368
| 119
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/clevr_executor.py
|
import torch
import random
import json
CLEVR_COLORS = ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow']
CLEVR_MATERIALS = ['rubber', 'metal']
CLEVR_SHAPES = ['cube', 'cylinder', 'sphere']
CLEVR_SIZES = ['large', 'small']
CLEVR_ANSWER_CANDIDATES = {
'count': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'],
'equal_color': ['yes', 'no'],
'equal_integer': ['yes', 'no'],
'equal_material': ['yes', 'no'],
'equal_shape': ['yes', 'no'],
'equal_size': ['yes', 'no'],
'exist': ['yes', 'no'],
'greater_than': ['yes', 'no'],
'less_than': ['yes', 'no'],
'query_color': ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow'],
'query_material': ['metal', 'rubber'],
'query_size': ['small', 'large'],
'query_shape': ['cube', 'cylinder', 'sphere'],
'same_color': ['yes', 'no'],
'same_material': ['yes', 'no'],
'same_size': ['yes', 'no'],
'same_shape': ['yes', 'no']
}
class ClevrExecutor:
"""Symbolic program executor for CLEVR"""
def __init__(self, vocab):
self.vocab = vocab
self.colors = CLEVR_COLORS
self.materials = CLEVR_MATERIALS
self.shapes = CLEVR_SHAPES
self.sizes = CLEVR_SIZES
self.answer_candidates = CLEVR_ANSWER_CANDIDATES
self.modules = {}
self._register_modules()
def __call__(self, scenes, programs):
preds = []
for i in range(programs.shape[0]):
pred = self.run(programs[i].cpu().numpy(), scenes[i])
preds.append(self.vocab['answer_token_to_idx'].get(pred, -1))
return torch.LongTensor(preds)
def run(self, x, scene, guess=False, debug=False):
assert self.modules, 'Must have scene annotations and define modules first'
ans, temp = None, None
# Find the length of the program sequence before the '<END>' token
length = 0
for k in range(len(x)):
l = len(x) - k
if self.vocab['program_idx_to_token'][x[l-1]] == '<END>':
length = l
if length == 0:
return 'error'
self.exe_trace = []
for j in range(length):
i = length - 1 - j
token = self.vocab['program_idx_to_token'][x[i]]
if token == 'scene':
if temp is not None:
ans = 'error'
break
temp = ans
ans = list(scene)
elif token in self.modules:
module = self.modules[token]
if token.startswith('same') or token.startswith('relate'):
ans = module(ans, scene)
else:
ans = module(ans, temp)
if ans == 'error':
break
self.exe_trace.append(ans)
if debug:
print(token)
print('ans:')
self._print_debug_message(ans)
print('temp: ')
self._print_debug_message(temp)
print()
ans = str(ans)
if ans == 'error' and guess:
final_module = self.vocab['program_idx_to_token'][x[0]]
if final_module in self.answer_candidates:
ans = random.choice(self.answer_candidates[final_module])
return ans
def _print_debug_message(self, x):
if type(x) == list:
for o in x:
print(self._object_info(o))
elif type(x) == dict:
print(self._object_info(x))
else:
print(x)
def _object_info(self, obj):
return '%s %s %s %s at %s' % (obj['size'], obj['color'], obj['material'], obj['shape'], str(obj['position']))
def _register_modules(self):
self.modules['count'] = self.count
self.modules['equal_color'] = self.equal_color
self.modules['equal_integer'] = self.equal_integer
self.modules['equal_material'] = self.equal_material
self.modules['equal_shape'] = self.equal_shape
self.modules['equal_size'] = self.equal_size
self.modules['exist'] = self.exist
self.modules['filter_color[blue]'] = self.filter_blue
self.modules['filter_color[brown]'] = self.filter_brown
self.modules['filter_color[cyan]'] = self.filter_cyan
self.modules['filter_color[gray]'] = self.filter_gray
self.modules['filter_color[green]'] = self.filter_green
self.modules['filter_color[purple]'] = self.filter_purple
self.modules['filter_color[red]'] = self.filter_red
self.modules['filter_color[yellow]'] = self.filter_yellow
self.modules['filter_material[rubber]'] = self.filter_rubber
self.modules['filter_material[metal]'] = self.filter_metal
self.modules['filter_shape[cube]'] = self.filter_cube
self.modules['filter_shape[cylinder]'] = self.filter_cylinder
self.modules['filter_shape[sphere]'] = self.filter_sphere
self.modules['filter_size[large]'] = self.filter_large
self.modules['filter_size[small]'] = self.filter_small
self.modules['greater_than'] = self.greater_than
self.modules['less_than'] = self.less_than
self.modules['intersect'] = self.intersect
self.modules['query_color'] = self.query_color
self.modules['query_material'] = self.query_material
self.modules['query_shape'] = self.query_shape
self.modules['query_size'] = self.query_size
self.modules['relate[behind]'] = self.relate_behind
self.modules['relate[front]'] = self.relate_front
self.modules['relate[left]'] = self.relate_left
self.modules['relate[right]'] = self.relate_right
self.modules['same_color'] = self.same_color
self.modules['same_material'] = self.same_material
self.modules['same_shape'] = self.same_shape
self.modules['same_size'] = self.same_size
self.modules['union'] = self.union
self.modules['unique'] = self.unique
def count(self, scene, _):
if type(scene) == list:
return len(scene)
return 'error'
def equal_color(self, color1, color2):
if type(color1) == str and color1 in self.colors and type(color2) == str and color2 in self.colors:
if color1 == color2:
return 'yes'
else:
return 'no'
return 'error'
def equal_integer(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 == integer2:
return 'yes'
else:
return 'no'
return 'error'
def equal_material(self, material1, material2):
if type(material1) == str and material1 in self.materials and type(material2) == str and material2 in self.materials:
if material1 == material2:
return 'yes'
else:
return 'no'
return 'error'
def equal_shape(self, shape1, shape2):
if type(shape1) == str and shape1 in self.shapes and type(shape2) == str and shape2 in self.shapes:
if shape1 == shape2:
return 'yes'
else:
return 'no'
return 'error'
def equal_size(self, size1, size2):
if type(size1) == str and size1 in self.sizes and type(size2) == str and size2 in self.sizes:
if size1 == size2:
return 'yes'
else:
return 'no'
return 'error'
def exist(self, scene, _):
if type(scene) == list:
if len(scene) != 0:
return 'yes'
else:
return 'no'
return 'error'
def filter_blue(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'blue':
output.append(o)
return output
return 'error'
def filter_brown(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'brown':
output.append(o)
return output
return 'error'
def filter_cyan(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'cyan':
output.append(o)
return output
return 'error'
def filter_gray(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'gray':
output.append(o)
return output
return 'error'
def filter_green(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'green':
output.append(o)
return output
return 'error'
def filter_purple(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'purple':
output.append(o)
return output
return 'error'
def filter_red(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'red':
output.append(o)
return output
return 'error'
def filter_yellow(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'yellow':
output.append(o)
return output
return 'error'
def filter_rubber(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['material'] == 'rubber':
output.append(o)
return output
return 'error'
def filter_metal(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['material'] == 'metal':
output.append(o)
return output
return 'error'
def filter_cube(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'cube':
output.append(o)
return output
return 'error'
def filter_cylinder(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'cylinder':
output.append(o)
return output
return 'error'
def filter_sphere(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'sphere':
output.append(o)
return output
return 'error'
def filter_large(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['size'] == 'large':
output.append(o)
return output
return 'error'
def filter_small(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['size'] == 'small':
output.append(o)
return output
return 'error'
def greater_than(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 > integer2:
return 'yes'
else:
return 'no'
return 'error'
def less_than(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 < integer2:
return 'yes'
else:
return 'no'
return 'error'
def intersect(self, scene1, scene2):
if type(scene1) == list and type(scene2) == list:
output = []
for o in scene1:
if o in scene2:
output.append(o)
return output
return 'error'
def query_color(self, obj, _):
if type(obj) == dict and 'color' in obj:
return obj['color']
return 'error'
def query_material(self, obj, _):
if type(obj) == dict and 'material' in obj:
return obj['material']
return 'error'
def query_shape(self, obj, _):
if type(obj) == dict and 'shape' in obj:
return obj['shape']
return 'error'
def query_size(self, obj, _):
if type(obj) == dict and 'size' in obj:
return obj['size']
return 'error'
def relate_behind(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][1] < obj['position'][1]:
output.append(o)
return output
return 'error'
def relate_front(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][1] > obj['position'][1]:
output.append(o)
return output
return 'error'
def relate_left(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][0] < obj['position'][0]:
output.append(o)
return output
return 'error'
def relate_right(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][0] > obj['position'][0]:
output.append(o)
return output
return 'error'
def same_color(self, obj, scene):
if type(obj) == dict and 'color' in obj and type(scene) == list:
output = []
for o in scene:
if o['color'] == obj['color'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_material(self, obj, scene):
if type(obj) == dict and 'material' in obj and type(scene) == list:
output = []
for o in scene:
if o['material'] == obj['material'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_shape(self, obj, scene):
if type(obj) == dict and 'shape' in obj and type(scene) == list:
output = []
for o in scene:
if o['shape'] == obj['shape'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_size(self, obj, scene):
if type(obj) == dict and 'size' in obj and type(scene) == list:
output = []
for o in scene:
if o['size'] == obj['size'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def union(self, scene1, scene2):
if type(scene1) == list and type(scene2) == list:
output = list(scene2)
for o in scene1:
if o not in scene2:
output.append(o)
return output
return 'error'
def unique(self, scene, _):
if type(scene) == list and len(scene) > 0:
return scene[0]
return 'error'
| 15,787
| 32.378436
| 125
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/decoder.py
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from .base_rnn import BaseRNN
from .attention import Attention
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
class Decoder(BaseRNN):
"""Decoder RNN module
To do: add docstring to methods
"""
def __init__(self, vocab_size, max_len, word_vec_dim, hidden_size,
n_layers, start_id=1, end_id=2, rnn_cell='lstm',
bidirectional=False, input_dropout_p=0,
dropout_p=0, use_attention=False):
super(Decoder, self).__init__(vocab_size, max_len, hidden_size,
input_dropout_p, dropout_p, n_layers, rnn_cell)
self.max_length = max_len
self.output_size = vocab_size
self.hidden_size = hidden_size
self.word_vec_dim = word_vec_dim
self.bidirectional_encoder = bidirectional
if bidirectional:
self.hidden_size *= 2
self.use_attention = use_attention
self.start_id = start_id
self.end_id = end_id
self.embedding = nn.Embedding(self.output_size, self.word_vec_dim)
self.rnn = self.rnn_cell(self.word_vec_dim, self.hidden_size, n_layers, batch_first=True, dropout=dropout_p)
self.out_linear = nn.Linear(self.hidden_size, self.output_size)
if use_attention:
self.attention = Attention(self.hidden_size)
def forward_step(self, input_var, hidden, encoder_outputs):
batch_size = input_var.size(0)
output_size = input_var.size(1)
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
output, hidden = self.rnn(embedded, hidden)
attn = None
if self.use_attention:
output, attn = self.attention(output, encoder_outputs)
output = self.out_linear(output.contiguous().view(-1, self.hidden_size))
predicted_softmax = F.log_softmax(output.view(batch_size, output_size, -1), 2)
return predicted_softmax, hidden, attn
def forward(self, y, encoder_outputs, encoder_hidden):
decoder_hidden = self._init_state(encoder_hidden)
decoder_outputs, decoder_hidden, attn = self.forward_step(y, decoder_hidden, encoder_outputs)
return decoder_outputs, decoder_hidden
def forward_sample(self, encoder_outputs, encoder_hidden, reinforce_sample=False):
if isinstance(encoder_hidden, tuple):
batch_size = encoder_hidden[0].size(1)
else:
batch_size = encoder_hidden.size(1)
decoder_hidden = self._init_state(encoder_hidden)
decoder_input = Variable(torch.LongTensor(batch_size, 1).fill_(self.start_id))
decoder_input = decoder_input.to(encoder_hidden[0].device)
output_symbols = [decoder_input.squeeze()]
output_logprobs = [torch.zeros(batch_size).to(decoder_input.device)]
done = torch.ByteTensor(batch_size).fill_(0).to(decoder_input.device)
def decode(i, output, reinforce_sample=reinforce_sample):
nonlocal done
if reinforce_sample:
dist = torch.distributions.Categorical(probs=torch.exp(output.view(batch_size, -1))) # better initialize with logits
symbols = dist.sample().unsqueeze(1)
else:
symbols = output.topk(1)[1].view(batch_size, -1)
symbol_logprobs = output[:, 0, :][torch.arange(batch_size), symbols[:, 0]]
not_done = logical_not(done)
output_logprobs.append(not_done.float() * symbol_logprobs)
output_symbols.append(symbols.squeeze())
done = logical_or(done, symbols[:, 0] == self.end_id)
return symbols
for i in range(self.max_length):
decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
decoder_input = decode(i, decoder_output)
return output_symbols, output_logprobs
def _init_state(self, encoder_hidden):
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
return encoder_hidden
def _cat_directions(self, h):
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
| 4,566
| 39.061404
| 132
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/__init__.py
|
from .encoder import Encoder
from .decoder import Decoder
from .seq2seq import Seq2seq
from .train_options import TrainOptions
def create_seq2seq_net(input_vocab_size, output_vocab_size, hidden_size,
word_vec_dim, n_layers, bidirectional, variable_lengths,
use_attention, encoder_max_len, decoder_max_len, start_id,
end_id, word2vec_path=None, fix_embedding=False):
word2vec = None
if word2vec_path is not None:
word2vec = utils.load_embedding(word2vec_path)
encoder = Encoder(input_vocab_size, encoder_max_len,
word_vec_dim, hidden_size, n_layers,
bidirectional=bidirectional, variable_lengths=variable_lengths,
word2vec=word2vec, fix_embedding=fix_embedding)
decoder = Decoder(output_vocab_size, decoder_max_len,
word_vec_dim, hidden_size, n_layers, start_id, end_id,
bidirectional=bidirectional, use_attention=use_attention)
return Seq2seq(encoder, decoder)
| 1,071
| 45.608696
| 85
|
py
|
CLOSURE
|
CLOSURE-master/vr/ns_vqa/attention.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Attention layer"""
def __init__(self, dim, use_weight=False, hidden_size=512):
super(Attention, self).__init__()
self.use_weight = use_weight
self.hidden_size = hidden_size
if use_weight:
print('| using weighted attention layer')
self.attn_weight = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_out = nn.Linear(2*dim, dim)
def forward(self, output, context):
"""
- args
output : Tensor
decoder output, dim (batch_size, output_size, hidden_size)
context : Tensor
context vector from encoder, dim (batch_size, input_size, hidden_size)
- returns
output : Tensor
attention layer output, dim (batch_size, output_size, hidden_size)
attn : Tensor
attention map, dim (batch_size, output_size, input_size)
"""
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
if self.use_weight:
output = self.attn_weight(output.contiguous().view(-1, hidden_size)).view(batch_size, -1, hidden_size)
attn = torch.bmm(output, context.transpose(1, 2))
attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size) # (batch_size, output_size, input_size)
mix = torch.bmm(attn, context) # (batch_size, output_size, hidden_size)
comb = torch.cat((mix, output * 0), dim=2) # (batch_size, output_size, 2*hidden_size)
output = torch.tanh(self.linear_out(comb.view(-1, 2*hidden_size)).view(batch_size, -1, hidden_size)) # (batch_size, output_size, hidden_size)
return output, attn
| 1,804
| 38.23913
| 149
|
py
|
CLOSURE
|
CLOSURE-master/scripts/question_engine.py
|
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import json, os, math
import random
from collections import defaultdict
"""
Utilities for working with function program representations of questions.
Some of the metadata about what question node types are available etc are stored
in a JSON metadata file.
"""
_check_uniqueness = True
# Handlers for answering questions. Each handler receives the scene structure
# that was output from Blender, the node, and a list of values that were output
# from each of the node's inputs; the handler should return the computed output
# value from this node.
def scene_handler(scene_struct, inputs, side_inputs):
# Just return all objects in the scene
return list(range(len(scene_struct['objects'])))
def make_filter_handler(attribute):
def filter_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
value = side_inputs[0]
output = []
for idx in inputs[0]:
atr = scene_struct['objects'][idx][attribute]
if value == atr or value in atr:
output.append(idx)
return output
return filter_handler
def unique_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if _check_uniqueness:
if len(inputs[0]) != 1:
return '__INVALID__'
else:
if len(inputs[0]) == 0:
return '__INVALID__'
return inputs[0][random.randrange(0, len(inputs[0]))]
def vg_relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
output = set()
for rel in scene_struct['relationships']:
if rel['predicate'] == side_inputs[0] and rel['subject_idx'] == inputs[0]:
output.add(rel['object_idx'])
return sorted(list(output))
def relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
relation = side_inputs[0]
return scene_struct['relationships'][relation][inputs[0]]
def union_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) | set(inputs[1])))
def intersect_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) & set(inputs[1])))
def count_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
return len(inputs[0])
def make_same_attr_handler(attribute):
def same_attr_handler(scene_struct, inputs, side_inputs):
cache_key = '_same_%s' % attribute
if cache_key not in scene_struct:
cache = {}
for i, obj1 in enumerate(scene_struct['objects']):
same = []
for j, obj2 in enumerate(scene_struct['objects']):
if i != j and obj1[attribute] == obj2[attribute]:
same.append(j)
cache[i] = same
scene_struct[cache_key] = cache
cache = scene_struct[cache_key]
assert len(inputs) == 1
assert len(side_inputs) == 0
return cache[inputs[0]]
return same_attr_handler
def make_query_handler(attribute):
def query_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
idx = inputs[0]
obj = scene_struct['objects'][idx]
assert attribute in obj
val = obj[attribute]
assert not isinstance(val, list)
if type(val) == list and len(val) != 1:
return '__INVALID__'
elif type(val) == list and len(val) == 1:
return val[0]
else:
return val
return query_handler
def exist_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
return len(inputs[0]) > 0
def equal_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] == inputs[1]
def less_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] < inputs[1]
def greater_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] > inputs[1]
# Register all of the answering handlers here.
# TODO maybe this would be cleaner with a function decorator that takes
# care of registration? Not sure. Also what if we want to reuse the same engine
# for different sets of node types?
execute_handlers = {
'scene': scene_handler,
'filter_color': make_filter_handler('color'),
'filter_shape': make_filter_handler('shape'),
'filter_material': make_filter_handler('material'),
'filter_size': make_filter_handler('size'),
'filter_objectcategory': make_filter_handler('objectcategory'),
'unique': unique_handler,
'relate': relate_handler,
'union': union_handler,
'intersect': intersect_handler,
'count': count_handler,
'query_color': make_query_handler('color'),
'query_shape': make_query_handler('shape'),
'query_material': make_query_handler('material'),
'query_size': make_query_handler('size'),
'exist': exist_handler,
'equal_color': equal_handler,
'equal_shape': equal_handler,
'equal_integer': equal_handler,
'equal_material': equal_handler,
'equal_size': equal_handler,
'equal_object': equal_handler,
'less_than': less_than_handler,
'greater_than': greater_than_handler,
'same_color': make_same_attr_handler('color'),
'same_shape': make_same_attr_handler('shape'),
'same_size': make_same_attr_handler('size'),
'same_material': make_same_attr_handler('material'),
}
def answer_question(question, metadata, scene_struct, all_outputs=False,
cache_outputs=True):
"""
Use structured scene information to answer a structured question. Most of the
heavy lifting is done by the execute handlers defined above.
We cache node outputs in the node itself; this gives a nontrivial speedup
when we want to answer many questions that share nodes on the same scene
(such as during question-generation DFS). This will NOT work if the same
nodes are executed on different scenes.
"""
all_input_types, all_output_types = [], []
node_outputs = []
for node in question['nodes']:
if cache_outputs and '_output' in node:
node_output = node['_output']
else:
node_type = node['type']
msg = 'Could not find handler for "%s"' % node_type
assert node_type in execute_handlers, msg
handler = execute_handlers[node_type]
node_inputs = [node_outputs[idx] for idx in node['inputs']]
side_inputs = node.get('side_inputs', [])
node_output = handler(scene_struct, node_inputs, side_inputs)
if cache_outputs:
node['_output'] = node_output
node_outputs.append(node_output)
if node_output == '__INVALID__':
break
if all_outputs:
return node_outputs
else:
return node_outputs[-1]
def insert_scene_node(nodes, idx):
# First make a shallow-ish copy of the input
new_nodes = []
for node in nodes:
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
new_nodes.append(new_node)
# Replace the specified index with a scene node
new_nodes[idx] = {'type': 'scene', 'inputs': []}
# Search backwards from the last node to see which nodes are actually used
output_used = [False] * len(new_nodes)
idxs_to_check = [len(new_nodes) - 1]
while idxs_to_check:
cur_idx = idxs_to_check.pop()
output_used[cur_idx] = True
idxs_to_check.extend(new_nodes[cur_idx]['inputs'])
# Iterate through nodes, keeping only those whose output is used;
# at the same time build up a mapping from old idxs to new idxs
old_idx_to_new_idx = {}
new_nodes_trimmed = []
for old_idx, node in enumerate(new_nodes):
if output_used[old_idx]:
new_idx = len(new_nodes_trimmed)
new_nodes_trimmed.append(node)
old_idx_to_new_idx[old_idx] = new_idx
# Finally go through the list of trimmed nodes and change the inputs
for node in new_nodes_trimmed:
new_inputs = []
for old_idx in node['inputs']:
new_inputs.append(old_idx_to_new_idx[old_idx])
node['inputs'] = new_inputs
return new_nodes_trimmed
def is_degenerate(question, metadata, scene_struct, answer=None, verbose=False,
check_uniqueness=True):
"""
A question is degenerate if replacing any of its relate nodes with a scene
node results in a question with the same answer.
"""
# Passing an argument through a global variable (*sigh*)
global _check_uniqueness
_check_uniqueness = check_uniqueness
if answer is None:
answer = answer_question(question, metadata, scene_struct)
for idx, node in enumerate(question['nodes']):
if node['type'] == 'relate' or node['type'].startswith('same'):
new_question = {
'nodes': insert_scene_node(question['nodes'], idx)
}
new_answer = answer_question(new_question, metadata, scene_struct)
if verbose:
print('here is truncated question:')
for i, n in enumerate(new_question['nodes']):
name = n['type']
if 'side_inputs' in n and n['side_inputs']:
name = '%s[%s]' % (name, n['side_inputs'][0])
print(i, name)#, n['_output'])
print('new answer is: ', new_answer)
if new_answer == answer:
return True
# Restore the normal configuration for the module
_check_uniqueness = True
return False
| 9,688
| 30.767213
| 138
|
py
|
CLOSURE
|
CLOSURE-master/scripts/generate_templates.py
|
import json
import argparse
# "query" is the attribute that is queried
QUERY_PH = '%query%'
Q_PH = '%Q%'
# "attr" is the attribute that is used for reasoning
ATTR_PH = '%attr%'
A_PH = '%A%'
VALUES = [('color', 'C'), ('shape', 'S'),
('material', 'M'), ('size', 'Z')]
parser = argparse.ArgumentParser('Generate templates from a meta-template')
parser.add_argument('meta_template')
parser.add_argument('templates')
parser.add_argument('--query-material', type=int, default=1)
args = parser.parse_args()
with open(args.meta_template) as src:
meta_template = src.read()
str_templates = set()
for query in VALUES if args.query_material else [t for t in VALUES if t[0] != 'material']:
for attr in VALUES:
template = meta_template
if query[0] == attr[0]:
continue
template = template.replace(QUERY_PH, query[0])
template = template.replace(Q_PH, query[1])
template = template.replace(ATTR_PH, attr[0])
template = template.replace(A_PH, attr[1])
str_templates.add(template)
templates = []
for str_ in str_templates:
templates.append(json.loads(str_)[0])
with open(args.templates, 'w') as dst:
json.dump(templates, dst, indent=2)
| 1,221
| 28.804878
| 90
|
py
|
CLOSURE
|
CLOSURE-master/scripts/preprocess_questions.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import argparse
import json
import os
import h5py
import numpy as np
import vr.programs
from vr.preprocess import tokenize, encode, build_vocab
"""
Preprocessing script for CLEVR question files.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='prefix',
choices=['chain', 'prefix', 'postfix'])
parser.add_argument('--input_questions_json', required=True, action='append')
parser.add_argument('--q_family_shift', type=int, action='append')
parser.add_argument('--input_vocab_json', default='')
parser.add_argument('--expand_vocab', default=0, type=int)
parser.add_argument('--unk_threshold', default=1, type=int)
parser.add_argument('--encode_unk', default=0, type=int)
parser.add_argument('--output_h5_file', required=True)
parser.add_argument('--output_vocab_json', default='')
def program_to_str(program, mode):
converter = vr.programs.ProgramConverter()
if mode == 'chain':
if not converter.is_chain(program):
return None
return vr.programs.list_to_str(program)
elif mode == 'prefix':
program_prefix = converter.list_to_prefix(program)
return vr.programs.list_to_str(program_prefix)
elif mode == 'postfix':
program_postfix = converter.list_to_postfix(program)
return vr.programs.list_to_str(program_postfix)
return None
def main(args):
if (args.input_vocab_json == '') and (args.output_vocab_json == ''):
print('Must give one of --input_vocab_json or --output_vocab_json')
return
print('Loading data from', args.input_questions_json)
if args.q_family_shift and len(args.q_family_shift):
if len(args.q_family_shift) != len(args.input_questions_json):
raise ValueError("shift must be provided for each question file")
q_family_shifts = args.q_family_shift
else:
q_family_shifts = [0] * len(args.input_questions_json)
questions = []
for q_file, shift in zip(args.input_questions_json, q_family_shifts):
print(q_file)
with open(q_file, 'r') as f:
more_questions = json.load(f)['questions']
for q in more_questions:
q['question_family_index'] += shift
questions.extend(more_questions)
# Either create the vocab or load it from disk
if args.input_vocab_json == '' or args.expand_vocab == 1:
print('Building vocab')
if 'answer' in questions[0]:
answer_token_to_idx = build_vocab(
(q['answer'] for q in questions)
)
question_token_to_idx = build_vocab(
(q['question'] for q in questions),
min_token_count=args.unk_threshold,
punct_to_keep=[';', ','], punct_to_remove=['?', '.']
)
all_program_strs = []
for q in questions:
if 'program' not in q:
continue
program_str = program_to_str(q['program'], args.mode)
if program_str is not None:
all_program_strs.append(program_str)
program_token_to_idx = build_vocab(all_program_strs)
vocab = {
'question_token_to_idx': question_token_to_idx,
'program_token_to_idx': program_token_to_idx,
'answer_token_to_idx': answer_token_to_idx,
}
def arity(name):
if name == 'scene':
return 0
if 'equal' in name or name in ['union', 'intersect', 'less_than', 'greater_than']:
return 2
return 1
vocab['program_token_arity'] = {name: arity(name) for name in program_token_to_idx}
if args.input_vocab_json != '':
print('Loading vocab')
if args.expand_vocab == 1:
new_vocab = vocab
with open(args.input_vocab_json, 'r') as f:
vocab = json.load(f)
if args.expand_vocab == 1:
num_new_words = 0
for word in new_vocab['question_token_to_idx']:
if word not in vocab['question_token_to_idx']:
print('Found new word %s' % word)
idx = len(vocab['question_token_to_idx'])
vocab['question_token_to_idx'][word] = idx
num_new_words += 1
print('Found %d new words' % num_new_words)
if args.output_vocab_json != '':
with open(args.output_vocab_json, 'w') as f:
json.dump(vocab, f)
# Encode all questions and programs
print('Encoding data')
questions_encoded = []
programs_encoded = []
question_families = []
orig_idxs = []
image_idxs = []
answers = []
types = []
for orig_idx, q in enumerate(questions):
question = q['question']
if 'program' in q:
types += [q['program'][-1]['function']]
orig_idxs.append(orig_idx)
image_idxs.append(q['image_index'])
if 'question_family_index' in q:
question_families.append(q['question_family_index'])
question_tokens = tokenize(question,
punct_to_keep=[';', ','],
punct_to_remove=['?', '.'])
question_encoded = encode(question_tokens,
vocab['question_token_to_idx'],
allow_unk=args.encode_unk == 1)
questions_encoded.append(question_encoded)
if 'program' in q:
program = q['program']
program_str = program_to_str(program, args.mode)
program_tokens = tokenize(program_str)
program_encoded = encode(program_tokens, vocab['program_token_to_idx'])
programs_encoded.append(program_encoded)
if 'answer' in q:
answers.append(vocab['answer_token_to_idx'][q['answer']])
# Pad encoded questions and programs
max_question_length = max(len(x) for x in questions_encoded)
for qe in questions_encoded:
while len(qe) < max_question_length:
qe.append(vocab['question_token_to_idx']['<NULL>'])
if len(programs_encoded) > 0:
max_program_length = max(len(x) for x in programs_encoded)
for pe in programs_encoded:
while len(pe) < max_program_length:
pe.append(vocab['program_token_to_idx']['<NULL>'])
# Create h5 file
print('Writing output')
questions_encoded = np.asarray(questions_encoded, dtype=np.int32)
programs_encoded = np.asarray(programs_encoded, dtype=np.int32)
print(questions_encoded.shape)
print(programs_encoded.shape)
mapping = {}
for i, t in enumerate(set(types)):
mapping[t] = i
print(mapping)
types_coded = []
for t in types:
types_coded += [mapping[t]]
with h5py.File(args.output_h5_file, 'w') as f:
f.create_dataset('questions', data=questions_encoded)
f.create_dataset('image_idxs', data=np.asarray(image_idxs))
f.create_dataset('orig_idxs', data=np.asarray(orig_idxs))
if len(programs_encoded) > 0:
f.create_dataset('programs', data=programs_encoded)
if len(question_families) > 0:
f.create_dataset('question_families', data=np.asarray(question_families))
if len(answers) > 0:
f.create_dataset('answers', data=np.asarray(answers))
if len(types) > 0:
f.create_dataset('types', data=np.asarray(types_coded))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 7,871
| 35.613953
| 138
|
py
|
CLOSURE
|
CLOSURE-master/scripts/run_pg.py
|
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import random
import shutil
from termcolor import colored
import time
from tqdm import tqdm
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
from scipy.misc import imread, imresize, imsave
import vr.utils as utils
import vr.programs
from vr.data import ClevrDataset, ClevrDataLoader
from vr.preprocess import tokenize, encode
from vr.models import *
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--debug_every', default=float('inf'), type=float)
parser.add_argument('--use_gpu', default=torch.cuda.is_available(), type=int)
# For running on a preprocessed dataset
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--part', default='val', type=str)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
parser.add_argument('--num_examples', default=None, type=int)
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
parser.add_argument('--output_preds', default=None)
grads = {}
programs = {} # NOTE: Useful for zero-shot program manipulation when in debug mode
def main(args):
input_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(args.part))
input_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(args.part))
pg, _ = utils.load_program_generator(args.program_generator)
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
vocab = load_vocab(args)
loader_kwargs = {
'question_h5': input_question_h5,
'feature_h5': input_features_h5,
'vocab': vocab,
'batch_size': 128,
}
with ClevrDataLoader(**loader_kwargs) as loader:
run_batch(args, pg, loader, dtype)
def run_batch(args, pg, loader, dtype):
pg.type(dtype)
pg.eval()
all_correct = []
all_preds = []
num_samples = 0
num_correct = 0
for batch in tqdm(loader):
questions, images, feats, answers, programs = batch
if isinstance(questions, list):
questions_var = questions[0].type(dtype).long()
else:
questions_var = questions.type(dtype).long()
feats_var = feats.type(dtype)
programs = programs.to(feats_var.device)
programs_pred, _ = pg.forward(questions_var, argmax=True)
min_length = min(programs.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs = programs[:, :min_length]
correct = (programs_pred == programs).int().sum(1) == min_length
num_correct += correct.sum()
all_correct.append(correct)
all_preds.append(programs_pred)
num_samples += programs.size(0)
if args.num_examples and num_samples >= args.num_examples:
break
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
output_path = ('output_' + args.part + "_" + args.program_generator[:-3] + ".h5"
if not args.output_h5
else args.output_h5)
preds_path = ('programs_' + args.part + "_" + args.program_generator[:-3] + ".txt"
if not args.output_preds
else args.output_preds)
print('Writing output to "%s"' % output_path)
with h5py.File(output_path, 'w') as fout:
fout.create_dataset('correct', data=torch.cat(all_correct, 0).cpu().numpy())
vocab = load_vocab(args)
all_preds = torch.cat(all_preds, 0).cpu().numpy()
all_preds_strings = []
for i in range(len(all_preds)):
all_preds_strings.append(
" ".join(vocab['program_idx_to_token'][w] for w in all_preds[i]))
save_to_file(all_preds_strings, preds_path)
if args.debug_every <= 1:
pdb.set_trace()
return
def load_vocab(args):
return utils.load_cpu(args.program_generator)['vocab']
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
def save_to_file(text, filename):
with open(filename, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(text))
myfile.write('\n')
def get_index(l, index, default=-1):
try:
return l.index(index)
except ValueError:
return default
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 5,036
| 29.713415
| 138
|
py
|
CLOSURE
|
CLOSURE-master/scripts/train_model.py
|
#!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import os
import pdb
import random
import shutil
import sys
import subprocess
import time
import logging
import itertools
import lru
import pickle
import h5py
import numpy as np
from termcolor import colored
import torch
torch.backends.cudnn.enabled = True
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel, DataParallel
import vr
import vr.utils
import vr.preprocess
from vr.data import (ClevrDataset,
ClevrDataLoader)
from vr.models import *
from vr.ns_vqa.parser import Seq2seqParser
from vr.ns_vqa.clevr_executor import ClevrExecutor
parser = argparse.ArgumentParser()
logger = logging.getLogger(__name__)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def is_multigpu():
nproc_str = os.environ.get('NPROC', '1')
if not nproc_str:
return False
return int(nproc_str) > 1
def atomic_torch_save(object_, path):
tmp_path = path + '.tmp'
torch.save(object_, tmp_path)
shutil.move(tmp_path, path)
def parse_int_list(input_):
if not input_:
return []
return list(map(int, input_.split(',')))
def parse_float_list(input_):
if not input_:
return []
return list(map(float, input_.split(',')))
def one_or_list(parser):
def parse_one_or_list(input_):
output = parser(input_)
if len(output) == 1:
return output[0]
else:
return output
return parse_one_or_list
def get_parameter_norm(model):
total_param_norm = 0
for p in model.parameters():
total_param_norm += (p ** 2).sum()
return total_param_norm ** (1. / 2)
def get_parameter_grad_norm(model):
total_param_norm = 0
for p in model.parameters():
if p.grad is not None:
total_param_norm += (p.grad ** 2).sum()
return total_param_norm ** (1. / 2)
parser.add_argument("--seed", default=None)
# for DDP launcher
parser.add_argument("--rank", type=int, default=0)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--world_size", type=int, default=1)
# Input data
parser.add_argument('--data_dir', required=True)
parser.add_argument('--val_part', default=[], action='append')
parser.add_argument('--feature_dim', default=[1024,14,14], type=parse_int_list)
parser.add_argument('--vocab_json', default='vocab.json')
parser.add_argument('--load_features', type=int, default=1)
parser.add_argument('--loader_num_workers', type=int, default=0)
parser.add_argument('--use_local_copies', default=0, type=int)
parser.add_argument('--cleanup_local_copies', default=1, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=None, type=int)
parser.add_argument('--shuffle_train_data', default=1, type=int)
parser.add_argument('--oversample', type=int)
parser.add_argument('--oversample_shift', type=int)
parser.add_argument('--percent_of_data_for_training', default=1., type=float)
parser.add_argument('--simple_encoder', default=0, type=int)
# What type of model to use and which parts to train
parser.add_argument('--model_type', default='PG',
choices=['RTfilm', 'Tfilm', 'FiLM',
'PG', 'EE', 'PG+EE', 'Control-EE',
'LSTM', 'CNN+LSTM', 'CNN+LSTM+SA',
'Hetero', 'MAC',
'SimpleNMN', 'RelNet', 'SHNMN',
'ConvLSTM'])
parser.add_argument('--train_program_generator', default=1, type=int)
parser.add_argument('--train_execution_engine', default=1, type=int)
parser.add_argument('--baseline_train_only_rnn', default=0, type=int)
# Start from an existing checkpoint
parser.add_argument('--program_generator_start_from', default=None)
parser.add_argument('--execution_engine_start_from', default=None)
parser.add_argument('--baseline_start_from', default=None)
# RNN options (for PG)
parser.add_argument('--rnn_wordvec_dim', default=300, type=int)
parser.add_argument('--rnn_hidden_dim', default=256, type=int)
parser.add_argument('--rnn_num_layers', default=2, type=int)
parser.add_argument('--rnn_dropout', default=0, type=float)
parser.add_argument('--rnn_attention', action='store_true')
parser.add_argument('--rnn_nonautoreg', action='store_true')
parser.add_argument('--ns_vqa', action='store_true')
# Symbolic EE
parser.add_argument('--symbolic_ee', action='store_true')
# Module net / FiLMedNet options
parser.add_argument('--module_stem_num_layers', default=2, type=int)
parser.add_argument('--module_stem_subsample_layers', default=[], type=parse_int_list)
parser.add_argument('--module_stem_batchnorm', default=0, type=int)
parser.add_argument('--module_dim', default=128, type=int)
parser.add_argument('--stem_dim', default=64, type=int)
parser.add_argument('--module_residual', default=1, type=int)
parser.add_argument('--module_batchnorm', default=0, type=int)
parser.add_argument('--module_intermediate_batchnorm', default=0, type=int)
parser.add_argument('--use_color', default=0, type=int)
parser.add_argument('--nmn_type', default='chain1', choices = ['chain1', 'chain2', 'chain3', 'tree'])
# FiLM only options
parser.add_argument('--set_execution_engine_eval', default=0, type=int)
parser.add_argument('--program_generator_parameter_efficient', default=1, type=int)
parser.add_argument('--rnn_output_batchnorm', default=0, type=int)
parser.add_argument('--bidirectional', default=0, type=int)
parser.add_argument('--encoder_type', default='gru', type=str,
choices=['linear', 'gru', 'lstm'])
parser.add_argument('--decoder_type', default='linear', type=str,
choices=['linear', 'gru', 'lstm'])
parser.add_argument('--gamma_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp'])
parser.add_argument('--gamma_baseline', default=1, type=float)
parser.add_argument('--num_modules', default=4, type=int)
parser.add_argument('--module_stem_kernel_size', default=[3], type=parse_int_list)
parser.add_argument('--module_stem_stride', default=[1], type=parse_int_list)
parser.add_argument('--module_stem_padding', default=None, type=parse_int_list)
parser.add_argument('--module_num_layers', default=1, type=int) # Only mnl=1 currently implemented
parser.add_argument('--module_batchnorm_affine', default=0, type=int) # 1 overrides other factors
parser.add_argument('--module_dropout', default=5e-2, type=float)
parser.add_argument('--module_input_proj', default=1, type=int) # Inp conv kernel size (0 for None)
parser.add_argument('--module_kernel_size', default=3, type=int)
parser.add_argument('--condition_method', default='bn-film', type=str,
choices=['nothing', 'block-input-film', 'block-output-film', 'bn-film', 'concat', 'conv-film', 'relu-film'])
parser.add_argument('--condition_pattern', default=[], type=parse_int_list) # List of 0/1's (len = # FiLMs)
parser.add_argument('--use_gamma', default=1, type=int)
parser.add_argument('--use_beta', default=1, type=int)
parser.add_argument('--use_coords', default=1, type=int) # 0: none, 1: low usage, 2: high usage
parser.add_argument('--grad_clip', default=0, type=float) # <= 0 for no grad clipping
parser.add_argument('--debug_every', default=float('inf'), type=float) # inf for no pdb
parser.add_argument('--print_verbose_every', default=float('inf'), type=float) # inf for min print
parser.add_argument('--film_use_attention', default=0, type=int)
#MAC options
parser.add_argument('--mac_write_unit', default='original', type=str)
parser.add_argument('--mac_read_connect', default='last', type=str)
parser.add_argument('--mac_read_unit', default='original', type=str)
parser.add_argument('--mac_vib_start', default=0, type=float)
parser.add_argument('--mac_vib_coof', default=0., type=float)
parser.add_argument('--mac_use_self_attention', default=1, type=int)
parser.add_argument('--mac_use_memory_gate', default=1, type=int)
parser.add_argument('--mac_nonlinearity', default='ELU', type=str)
parser.add_argument('--mac_question2output', default=1, type=int)
parser.add_argument('--mac_train_just_control', action='store_true')
parser.add_argument('--mac_question_embedding_dropout', default=0.08, type=float)
parser.add_argument('--mac_stem_dropout', default=0.18, type=float)
parser.add_argument('--mac_memory_dropout', default=0.15, type=float)
parser.add_argument('--mac_read_dropout', default=0.15, type=float)
parser.add_argument('--mac_use_prior_control_in_control_unit', default=0, type=int)
parser.add_argument('--variational_embedding_dropout', default=0.15, type=float)
parser.add_argument('--mac_embedding_uniform_boundary', default=1., type=float)
parser.add_argument('--hard_code_control', action="store_true")
parser.add_argument('--exponential_moving_average_weight', default=1., type=float)
#NMNFilm2 options
parser.add_argument('--nmn_use_film', default=0, type=int)
parser.add_argument('--nmn_use_simple_block', default=0, type=int)
parser.add_argument('--nmn_module_pool', default='mean', type=str)
parser.add_argument('--nmn_use_gammas', default='identity', type=str)
parser.add_argument('--nmn_learn_control', default=0, type=int)
parser.add_argument('--entropy_coef', default=0.0, type=float)
# CNN options (for baselines)
parser.add_argument('--cnn_res_block_dim', default=128, type=int)
parser.add_argument('--cnn_num_res_blocks', default=0, type=int)
parser.add_argument('--cnn_proj_dim', default=512, type=int)
parser.add_argument('--cnn_pooling', default='maxpool2',
choices=['none', 'maxpool2'])
# Stacked-Attention options
parser.add_argument('--stacked_attn_dim', default=512, type=int)
parser.add_argument('--num_stacked_attn', default=2, type=int)
# Classifier options
parser.add_argument('--classifier_proj_dim', default=512, type=int)
parser.add_argument('--classifier_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool3', 'maxpool4', 'maxpool5', 'maxpool7', 'maxpoolfull', 'none',
'avgpool2', 'avgpool3', 'avgpool4', 'avgpool5', 'avgpool7', 'avgpoolfull', 'aggressive',
'hybrid'])
parser.add_argument('--classifier_fc_dims', default=[1024], type=parse_int_list)
parser.add_argument('--classifier_batchnorm', default=0, type=int)
parser.add_argument('--classifier_dropout', default=0.0, type=one_or_list(parse_float_list))
# Discriminator options
parser.add_argument('--discriminator_proj_dim', default=512, type=int)
parser.add_argument('--discriminator_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool3', 'maxpool4', 'maxpool5', 'maxpool7', 'maxpoolfull', 'none',
'avgpool2', 'avgpool3', 'avgpool4', 'avgpool5', 'avgpool7', 'avgpoolfull', 'aggressive',
'hybrid'])
parser.add_argument('--discriminator_fc_dims', default=[1024], type=parse_int_list)
parser.add_argument('--discriminator_dropout', default=0.0, type=one_or_list(parse_float_list))
# Optimization options
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--val_batch_size', default=512, type=int)
parser.add_argument('--num_iterations', default=100000, type=int)
parser.add_argument('--optimizer', default='Adam',
choices=['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'ASGD', 'RMSprop', 'SGD'])
parser.add_argument('--learning_rate', default=5e-4, type=float)
parser.add_argument('--pg_learning_rate', default=None, type=float)
parser.add_argument('--beta1', default=0.9, type=float)
parser.add_argument('--beta2', default=0.999, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--reward_decay', default=0.9, type=float)
parser.add_argument('--same_reward', action="store_true", default=False)
parser.add_argument('--weight_decay', default=0, type=float)
parser.add_argument('--ewa_baseline', default=1, type=int)
parser.add_argument('--enforce_wellformed', default=False, action="store_true")
parser.add_argument('--temperature_increase', default=None, type=float)
# Output options
parser.add_argument('--checkpoint_path', default='{slurmid}.pt')
parser.add_argument('--allow_resume', action='store_true')
parser.add_argument('--load_ee_parameters', default=None, type=str)
parser.add_argument('--randomize_checkpoint_path', type=int, default=0)
parser.add_argument('--avoid_checkpoint_override', default=0, type=int)
parser.add_argument('--record_loss_every', default=1, type=int)
parser.add_argument('--checkpoint_every', default=400, type=int)
parser.add_argument('--validate_every', default=10000, type=int)
parser.add_argument('--time', default=0, type=int)
def main(args):
if args.validate_every % args.checkpoint_every != 0:
raise ValueError("must validate at iteration where checkpointing is also done")
if is_multigpu():
torch.distributed.init_process_group(backend='nccl')
global device
device = (torch.device('cuda:{}'.format(args.local_rank))
if torch.cuda.is_available()
else torch.device('cpu'))
if args.seed is not None:
torch.manual_seed(args.seed)
nmn_iwp_code = list(vr.__path__)[0]
try:
last_commit = subprocess.check_output(
'cd {}; git log -n1'.format(nmn_iwp_code), shell=True).decode('utf-8')
logger.info('LAST COMMIT INFO:')
logger.info(last_commit)
except subprocess.CalledProcessError:
logger.info('Could not figure out the last commit')
try:
diff = subprocess.check_output(
'cd {}; git diff'.format(nmn_iwp_code), shell=True).decode('utf-8')
if diff:
logger.info('GIT DIFF:')
logger.info(diff)
except subprocess.CalledProcessError:
logger.info('Could not figure out the last commit')
logger.info('Will save checkpoints to %s' % args.checkpoint_path)
args.vocab_json = os.path.join(args.data_dir, args.vocab_json)
if not args.checkpoint_path:
raise NotImplementedError('no default checkpoint path')
args.vocab_json = os.path.join(args.data_dir, args.vocab_json)
vocab = vr.utils.load_vocab(args.vocab_json)
logger.info(args)
question_families = None
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
question_families = json.load(f)
scenes_needed = args.symbolic_ee
features_needed = args.model_type != 'PG' and not args.symbolic_ee
train_question_h5 = os.path.join(args.data_dir, 'train_questions.h5')
train_features_h5 = os.path.join(args.data_dir, 'train_features.h5')
train_scenes = os.path.join(args.data_dir, 'train_scenes.json')
train_loader_kwargs = {
'question_h5': train_question_h5,
'feature_h5': train_features_h5 if features_needed else None,
'scene_path': train_scenes if scenes_needed else None,
'load_features': args.load_features,
'vocab': vocab,
'batch_size': args.batch_size,
'shuffle': args.shuffle_train_data == 1,
'question_families': question_families,
'max_samples': args.num_train_samples,
'num_workers': args.loader_num_workers,
'percent_of_data': args.percent_of_data_for_training,
'oversample': args.oversample,
'oversample_shift': args.oversample_shift
}
train_loader = ClevrDataLoader(**train_loader_kwargs)
val_loaders = []
for val_part in args.val_part:
val_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(val_part))
val_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(val_part))
val_scenes = os.path.join(args.data_dir, '{}_scenes.json'.format(val_part))
val_loader_kwargs = {
'question_h5': val_question_h5,
'feature_h5': val_features_h5 if features_needed else None,
'scene_path': val_scenes if scenes_needed else None,
'load_features': args.load_features,
'vocab': vocab,
'batch_size': args.val_batch_size,
'question_families': question_families,
'max_samples': args.num_val_samples,
'num_workers': args.loader_num_workers,
}
val_loaders.append(ClevrDataLoader(**val_loader_kwargs))
try:
train_loop(args, train_loader, val_loaders)
finally:
for loader in [train_loader] + val_loaders:
loader.close()
def train_loop(args, train_loader, val_loaders):
vocab = vr.utils.load_vocab(args.vocab_json)
program_generator, pg_kwargs, pg_optimizer = None, None, None
execution_engine, ee_kwargs, ee_optimizer = None, None, None
baseline_model, baseline_kwargs, baseline_optimizer = None, None, None
baseline_type = None
stats = {
'train_losses': [], 'train_rewards': [], 'train_losses_ts': [],
'train_accs': [], 'val_accs_ts': [], 'alphas' : [], 'grads' : [],
'model_t': 0, 'model_epoch': 0,
'entropy': [], 'prog_acc': [], 'compute_time': []
}
for val_part in args.val_part:
stats['best_' + val_part + '_acc'] = -1
stats[val_part + "_accs"] = []
models_that_need_pg = ['MAC', 'RTfilm', 'Tfilm', 'FiLM',
'PG', 'PG+EE', 'Control-EE', 'RelNet', 'ConvLSTM']
models_that_need_ee = ['MAC', 'RTfilm', 'Tfilm', 'FiLM', 'EE', 'PG+EE',
'Control-EE', 'Hetero', 'SimpleNMN', 'SHNMN', 'RelNet', 'ConvLSTM']
# Set up model
if args.allow_resume and os.path.exists(args.checkpoint_path):
# EITHER resume existing experiment
logger.info("Trying to resume")
if args.model_type in models_that_need_pg:
program_generator, pg_kwargs = vr.utils.load_program_generator(args.checkpoint_path)
program_generator.to(device)
if is_multigpu():
program_generator = DistributedDataParallel(program_generator, device_ids=[args.local_rank])
if args.model_type in models_that_need_ee:
if args.symbolic_ee:
execution_engine, ee_kwargs = get_execution_engine(args)
else:
execution_engine, ee_kwargs = vr.utils.load_execution_engine(args.checkpoint_path)
execution_engine.to(device)
if is_multigpu():
execution_engine = DistributedDataParallel(execution_engine, device_ids=[args.local_rank])
with open(args.checkpoint_path + '.json', 'r') as f:
checkpoint = json.load(f)
for key in list(stats.keys()):
if key in checkpoint:
stats[key] = checkpoint[key]
stats['model_epoch'] -= 1
best_pg_state = get_state(program_generator)
best_ee_state = get_state(execution_engine)
# no support for PG+EE her
best_baseline_state = None
else:
# OR start a new one
if args.model_type in models_that_need_pg:
program_generator, pg_kwargs = get_program_generator(args)
logger.info('Here is the conditioning network:')
logger.info(program_generator)
if args.model_type in models_that_need_ee:
execution_engine, ee_kwargs = get_execution_engine(args)
logger.info('Here is the conditioned network:')
logger.info(execution_engine)
if args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_model, baseline_kwargs = get_baseline_model(args)
params = baseline_model.parameters()
if args.baseline_train_only_rnn == 1:
params = baseline_model.rnn.parameters()
logger.info('Here is the baseline model')
logger.info(baseline_model)
baseline_type = args.model_type
if args.load_ee_parameters:
state = vr.utils.load_cpu(args.load_ee_parameters)
execution_engine.load_state_dict(state['execution_engine_state'], strict=False)
optim_method = getattr(torch.optim, args.optimizer)
if program_generator:
pg_learning_rate = args.pg_learning_rate
if pg_learning_rate is None:
pg_learning_rate = args.learning_rate
pg_optimizer = optim_method(program_generator.parameters(),
lr=pg_learning_rate,
weight_decay=args.weight_decay,
eps=args.eps)
if execution_engine and not args.symbolic_ee:
if args.mac_train_just_control:
parameters = list(execution_engine.controlUnit.parameters())
for inpUnit in execution_engine.inputUnits:
parameters.extend(list(inpUnit.parameters()))
else:
parameters = execution_engine.parameters()
ee_optimizer = optim_method(parameters,
lr=args.learning_rate,
weight_decay=args.weight_decay,
eps=args.eps)
if baseline_model:
baseline_optimizer = optim_method(params,
lr=args.learning_rate,
weight_decay=args.weight_decay)
loss_fn = torch.nn.CrossEntropyLoss().to(device)
t, epoch, reward_moving_average = stats['model_t'], stats['model_epoch'], 0
set_mode('train', [program_generator, execution_engine, baseline_model])
logger.info('train_loader has {} samples'.format(len(train_loader.dataset)))
for val_part, val_loader in zip(args.val_part, val_loaders):
logger.info('{}_loader has {} samples'.format(val_part, len(val_loader.dataset)))
num_checkpoints = 0
epoch_start_time = 0.0
epoch_total_time = 0.0
train_pass_total_time = 0.0
val_pass_total_time = 0.0
valB_pass_total_time = 0.0
running_loss = 0.0
cache = [lru.LRU(10) for i in range(len(train_loader.dataset))]
while t < args.num_iterations:
if (epoch > 0) and (args.time == 1):
epoch_time = time.time() - epoch_start_time
epoch_total_time += epoch_time
logger.info('EPOCH PASS AVG TIME: ' + str(epoch_total_time / epoch), 'white')
logger.info('Epoch Pass Time : ' + str(epoch_time), 'white')
epoch_start_time = time.time()
fwd_pass_time = 0.
bwd_pass_time = 0.
epoch += 1
logger.info('Starting epoch %d' % epoch)
batch_start_time = time.time()
for batch in train_loader:
compute_start_time = time.time()
t += 1
acc = None
prog_acc = None
entropy = None
data_moving_start_time = time.time()
(questions, indices, feats, scenes, answers, programs) = batch
if isinstance(questions, list):
questions = questions[0]
questions = questions[:, :(questions.sum(0) > 0).sum()]
questions_var = Variable(questions.to(device))
feats_var = Variable(feats.to(device))
answers_var = Variable(answers.to(device))
if programs[0] is not None:
programs_var = Variable(programs.to(device))
data_moving_time = time.time() - compute_start_time
reward = None
if args.model_type == 'PG':
# Train program generator with ground-truth programs
pg_optimizer.zero_grad()
loss = program_generator.log_likelihood(questions_var, programs_var).mean()
loss.backward()
pg_optimizer.step()
elif args.model_type in ['EE', 'Hetero']:
# Train execution engine with ground-truth programs
ee_optimizer.zero_grad()
scores, _, _ = execution_engine(feats_var, programs_var, question=questions_var)
full_loss = loss = loss_fn(scores, answers_var)
acc = (scores.argmax(1) == answers_var).float().mean()
full_loss.backward()
ee_optimizer.step()
elif args.model_type in ['Control-EE']:
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
question_repr = program_generator(questions_var)
scores, _, _ = execution_engine(feats_var, programs_var, question=question_repr)
loss = loss_fn(scores, answers_var)
acc = (scores.argmax(1) == answers_var).float().mean()
loss = loss_fn(scores, answers_var)
loss.backward()
pg_optimizer.step()
ee_optimizer.step()
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_optimizer.zero_grad()
baseline_model.zero_grad()
scores = baseline_model(questions_var, feats_var)
loss = loss_fn(scores, answers_var)
loss.backward()
baseline_optimizer.step()
elif args.model_type == 'PG+EE':
programs_pred, token_logprobs = program_generator.forward(questions_var)
if args.symbolic_ee:
preds = execution_engine(scenes, programs_pred)
else:
with torch.set_grad_enabled(bool(args.train_execution_engine)):
scores, program_wellformed, _ = execution_engine(feats_var, programs_pred)
preds = scores.argmax(1).cpu()
if args.enforce_wellformed:
preds[~program_wellformed] = -1
loss = loss_fn(scores, answers_var)
raw_reward = (preds == answers).float()
acc = raw_reward.mean()
if args.symbolic_ee:
loss = -acc
reward_moving_average *= args.reward_decay
reward_moving_average += (1.0 - args.reward_decay) * raw_reward.mean()
centered_reward = raw_reward - (reward_moving_average if args.ewa_baseline else 0.5)
entropy = -token_logprobs.sum(1).mean()
min_length = min(programs_var.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs_var = programs_var[:, :min_length]
correct = (programs_pred == programs_var).int().sum(1) == min_length
prog_acc = correct.float().mean()
if args.train_execution_engine == 1:
ee_optimizer.zero_grad()
loss.backward()
ee_optimizer.step()
if args.train_program_generator == 1:
pg_optimizer.zero_grad()
weights = centered_reward.to(device)[:, None]
if args.entropy_coef:
# maximizing entropy = using -logprobs as rewards
weights += args.entropy_coef * -token_logprobs.sum(1)[:, None].detach()
if args.same_reward:
weights = weights.mean()
surrogate_loss = (-token_logprobs * weights).sum(1).mean()
surrogate_loss.backward()
pg_optimizer.step()
elif args.model_type == 'FiLM' or args.model_type == 'MAC':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
forward_start_time = time.time()
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
full_loss = loss.clone()
fwd_pass_time = time.time() - forward_start_time
backward_start_time = time.time()
profile_step = t % 66 == 0
with torch.autograd.profiler.profile(enabled=profile_step, use_cuda=True) as prof:
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
full_loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'FiLMedNet' if args.model_type == 'FiLM' else args.model_type)
check_grad_num_nans(program_generator, 'FiLMGen')
if profile_step:
with open(args.checkpoint_path + '.prof', 'wb') as dest:
pickle.dump(prof, dest)
print('profile dumped')
bwd_pass_time = time.time() - backward_start_time
if args.model_type == 'MAC':
if args.train_program_generator == 1 or args.train_execution_engine == 1:
if args.grad_clip > 0:
allMacParams = itertools.chain(program_generator.parameters(), execution_engine.parameters())
torch.nn.utils.clip_grad_norm_(allMacParams, args.grad_clip)
pg_optimizer.step()
ee_optimizer.step()
else:
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type == 'Tfilm':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, programs_var)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'TFiLMedNet' if args.model_type == 'Tfilm' else 'NMNFiLMedNet')
check_grad_num_nans(program_generator, 'FiLMGen')
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type == 'RTfilm':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'RTFiLMedNet')
check_grad_num_nans(program_generator, 'FiLMGen')
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type in ['RelNet', 'ConvLSTM']:
question_rep = program_generator(questions_var)
scores = execution_engine(feats_var, question_rep)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
loss.backward()
pg_optimizer.step()
ee_optimizer.step()
else:
raise ValueError()
if torch.isnan(loss).item():
print("NAN!")
sys.exit(1)
if t == args.num_iterations:
# Save the best model separately
break
if t % args.record_loss_every == 0:
if 'miss_mask' in locals():
print(miss_mask.sum())
running_loss += loss.item()
avg_loss = running_loss / args.record_loss_every
compute_time = time.time() - compute_start_time
batch_time = time.time() - batch_start_time
logger_format = "iter: {} t_b: {:.5f} t_c: {:.5f} t_m: {:.5f} t_fwd: {:.5f} t_bwd: {:.5f} loss: {:.5f}"
logger_data = (
t, batch_time, compute_time,
data_moving_time, fwd_pass_time, bwd_pass_time, avg_loss)
if acc is not None:
logger_format += " acc: {:.5f}"
logger_data += (acc.item(),)
if prog_acc is not None:
logger_format += " prog_acc: {:.5f}"
logger_data += (prog_acc.item(),)
if entropy is not None:
logger_format += " H: {:.9f}"
logger_data += (entropy.item(),)
logger.info(logger_format.format(*logger_data))
stats['train_losses'].append(avg_loss)
if prog_acc:
stats['prog_acc'].append(prog_acc.item())
if entropy:
stats['entropy'].append(entropy.item())
stats['train_losses_ts'].append(t)
if reward is not None:
stats['train_rewards'].append(reward.item())
stats['compute_time'].append(compute_time)
running_loss = 0.0
else:
running_loss += loss.item()
batch_start_time = time.time()
if args.local_rank > 0:
continue
if t == 1 or t % args.validate_every == 0:
logger.info('Checking training accuracy ... ')
start = time.time()
train_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, train_loader)
train_pass_time = (time.time() - start)
logger.info('train pass time: ' + str(train_pass_time))
logger.info('train accuracy is {}'.format(train_acc))
logger.info('Checking validation accuracy ...')
stats['train_accs'].append(train_acc)
first_val_acc = None
for val_part, val_loader in zip(args.val_part, val_loaders):
start = time.time()
val_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, val_loader)
if first_val_acc is None:
first_val_acc = val_acc
val_pass_time = (time.time() - start)
logger.info('{} pass time: {}'.format(val_part, val_pass_time))
logger.info('{} accuracy is {}'.format(val_part, val_acc))
stats['{}_accs'.format(val_part)].append(val_acc)
stats['val_accs_ts'].append(t)
if t == 1 or t % args.checkpoint_every == 0:
pg_state = get_state(program_generator)
ee_state = get_state(execution_engine)
baseline_state = get_state(baseline_model)
stats['model_t'] = t
stats['model_epoch'] = epoch
checkpoint = {
'args': args.__dict__,
'program_generator_kwargs': pg_kwargs,
'program_generator_state': pg_state,
'execution_engine_kwargs': ee_kwargs,
'execution_engine_state': ee_state,
'baseline_kwargs': baseline_kwargs,
'baseline_state': baseline_state,
'baseline_type': baseline_type,
'vocab': vocab
}
for k, v in stats.items():
checkpoint[k] = v
# Save current model
logger.info('Saving checkpoint to %s' % args.checkpoint_path)
atomic_torch_save(checkpoint, args.checkpoint_path)
# Save training status in a human-readable format
del checkpoint['program_generator_state']
del checkpoint['execution_engine_state']
del checkpoint['baseline_state']
with open(args.checkpoint_path + '.json', 'w') as f:
json.dump(checkpoint, f, indent=2, sort_keys=True)
# Save the best model separately
if t == 1 or t % args.validate_every == 0:
for val_part in args.val_part:
cur_acc = stats['{}_accs'.format(val_part)][-1]
best_acc_key = 'best_{}_acc'.format(val_part)
if cur_acc > stats.get(best_acc_key, -1):
best_path = '{}.{}.best'.format(args.checkpoint_path, val_part)
logger.info('Saving best so far checkpoint to ' + best_path)
stats[best_acc_key] = cur_acc
checkpoint['program_generator_state'] = pg_state
checkpoint['execution_engine_state'] = ee_state
checkpoint['baseline_state'] = baseline_state
atomic_torch_save(checkpoint, best_path)
def get_state(m):
if m is None:
return None
if isinstance(m, DistributedDataParallel):
return get_state(m.module)
if isinstance(m, ClevrExecutor):
return {}
state = {}
for k, v in m.state_dict().items():
state[k] = v.clone()
return state
def get_program_generator(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.program_generator_start_from is not None:
logger.info('start from pretrained PG')
pg, kwargs = vr.utils.load_program_generator(args.program_generator_start_from)
if args.temperature_increase:
pg.decoder_linear.weight.data /= args.temperature_increase
pg.decoder_linear.bias.data /= args.temperature_increase
elif args.ns_vqa:
pg, kwargs = Seq2seqParser(vocab), {}
else:
kwargs = {
'encoder_vocab_size': len(vocab['question_token_to_idx']),
'decoder_vocab_size': len(vocab['program_token_to_idx']),
'wordvec_dim': args.rnn_wordvec_dim,
'hidden_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
}
if args.model_type in ['FiLM', 'Tfilm', 'RTfilm', 'MAC', 'Control-EE']:
kwargs['parameter_efficient'] = args.program_generator_parameter_efficient == 1
kwargs['output_batchnorm'] = args.rnn_output_batchnorm == 1
kwargs['bidirectional'] = args.bidirectional == 1
kwargs['encoder_type'] = args.encoder_type
kwargs['decoder_type'] = args.decoder_type
kwargs['gamma_option'] = args.gamma_option
kwargs['gamma_baseline'] = args.gamma_baseline
kwargs['use_attention'] = args.film_use_attention == 1
if args.model_type == 'FiLM' or args.model_type == 'MAC':
kwargs['num_modules'] = args.num_modules
elif args.model_type == 'Tfilm':
kwargs['num_modules'] = args.max_program_module_arity * args.max_program_tree_depth + 1
elif args.model_type == 'RTfilm':
treeArities = TreeGenerator().gen(args.tree_type_for_RTfilm)
kwargs['num_modules'] = len(treeArities)
if args.model_type == 'MAC' or args.model_type == 'Control-EE':
kwargs['taking_context'] = True
kwargs['use_attention'] = False
kwargs['variational_embedding_dropout'] = args.variational_embedding_dropout
kwargs['embedding_uniform_boundary'] = args.mac_embedding_uniform_boundary
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_dim'] = args.module_dim
kwargs['debug_every'] = args.debug_every
pg = FiLMGen(**kwargs)
elif args.model_type in ['RelNet', 'ConvLSTM']:
kwargs['bidirectional'] = args.bidirectional == 1
kwargs['encoder_type'] = args.encoder_type
kwargs['taking_context'] = True # return the last hidden state of LSTM
pg = FiLMGen(**kwargs)
elif args.rnn_attention:
kwargs['autoregressive'] = not args.rnn_nonautoreg
pg = Seq2SeqAtt(**kwargs)
else:
pg = Seq2Seq(**kwargs)
pg.to(device)
pg.train()
if is_multigpu():
pg = DistributedDataParallel(pg, device_ids=[args.local_rank])
return pg, kwargs
def get_execution_engine(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.symbolic_ee:
return ClevrExecutor(vocab), {}
if args.execution_engine_start_from is not None:
logger.info("start from pretrained EE")
ee, kwargs = vr.utils.load_execution_engine(args.execution_engine_start_from)
else:
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'stem_dim': args.stem_dim,
'module_dim': args.module_dim,
'module_kernel_size': args.module_kernel_size,
'module_residual': args.module_residual == 1,
'module_input_proj': args.module_input_proj,
'module_batchnorm': args.module_batchnorm == 1,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
}
if args.model_type == 'FiLM':
kwargs['num_modules'] = args.num_modules
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = FiLMedNet(**kwargs)
elif args.model_type == 'Tfilm':
kwargs['num_modules'] = args.max_program_module_arity * args.max_program_tree_depth + 1
kwargs['max_program_module_arity'] = args.max_program_module_arity
kwargs['max_program_tree_depth'] = args.max_program_tree_depth
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = TFiLMedNet(**kwargs)
elif args.model_type == 'RTfilm':
treeArities = TreeGenerator().gen(args.tree_type_for_RTfilm)
kwargs['num_modules'] = len(treeArities)
kwargs['treeArities'] = treeArities
kwargs['tree_type_for_RTfilm'] = args.tree_type_for_RTfilm
kwargs['share_module_weight_at_depth'] = args.share_module_weight_at_depth
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = RTFiLMedNet(**kwargs)
elif args.model_type == 'MAC':
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_num_layers': args.module_stem_num_layers,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'num_modules': args.num_modules,
'module_dim': args.module_dim,
'stem_dim': args.stem_dim,
#'module_dropout': args.module_dropout,
'question_embedding_dropout': args.mac_question_embedding_dropout,
'stem_dropout': args.mac_stem_dropout,
'memory_dropout': args.mac_memory_dropout,
'read_dropout': args.mac_read_dropout,
'write_unit': args.mac_write_unit,
'read_connect': args.mac_read_connect,
'read_unit': args.mac_read_unit,
'question2output': args.mac_question2output,
'noisy_controls': bool(args.mac_vib_coof),
'use_prior_control_in_control_unit': args.mac_use_prior_control_in_control_unit == 1,
'use_self_attention': args.mac_use_self_attention,
'use_memory_gate': args.mac_use_memory_gate,
'nonlinearity': args.mac_nonlinearity,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
'use_coords': args.use_coords,
'debug_every': args.debug_every,
'print_verbose_every': args.print_verbose_every,
'hard_code_control' : args.hard_code_control
}
ee = MAC(**kwargs)
elif args.model_type == 'Hetero':
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'module_dim': args.module_dim,
'stem_dim': args.stem_dim,
'module_batchnorm': args.module_batchnorm == 1,
}
ee = HeteroModuleNet(**kwargs)
elif args.model_type == 'SimpleNMN':
kwargs['use_film'] = args.nmn_use_film
kwargs['forward_func'] = args.nmn_type
kwargs['use_color'] = args.use_color,
ee = SimpleModuleNet(**kwargs)
elif args.model_type == 'SHNMN':
kwargs = {
'vocab' : vocab,
'feature_dim' : args.feature_dim,
'stem_dim' : args.stem_dim,
'module_dim': args.module_dim,
'module_kernel_size' : args.module_kernel_size,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_num_layers': args.module_stem_num_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_padding': args.module_stem_padding,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout' : args.classifier_dropout,
'hard_code_alpha' : args.hard_code_alpha,
'hard_code_tau' : args.hard_code_tau,
'tau_init' : args.tau_init,
'alpha_init' : args.alpha_init,
'which_chain' : args.which_chain,
'model_type' : args.shnmn_type,
'model_bernoulli' : args.model_bernoulli,
'num_modules' : 3,
'use_module' : args.use_module
}
ee = SHNMN(**kwargs)
elif args.model_type == 'RelNet':
kwargs['module_num_layers'] = args.module_num_layers
kwargs['rnn_hidden_dim'] = args.rnn_hidden_dim
ee = RelationNet(**kwargs)
elif args.model_type == 'ConvLSTM':
kwargs['rnn_hidden_dim'] = args.rnn_hidden_dim
ee = ConvLSTM(**kwargs)
else:
kwargs['use_film'] = args.nmn_use_film
kwargs['use_simple_block'] = args.nmn_use_simple_block
kwargs['mod_id_loss'] = False
kwargs['kl_loss'] = False
kwargs['module_pool'] = args.nmn_module_pool
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_use_gammas'] = args.nmn_use_gammas
kwargs['learn_control'] = args.nmn_learn_control
kwargs['rnn_dim'] = args.rnn_hidden_dim
kwargs['type_anonymizer'] = False
kwargs['discriminator_proj_dim'] = args.discriminator_proj_dim
kwargs['discriminator_downsample'] = args.discriminator_downsample
kwargs['discriminator_fc_layers'] = args.discriminator_fc_dims
kwargs['discriminator_dropout'] = args.discriminator_dropout
ee = ModuleNet(**kwargs)
ee.to(device)
ee.train()
if is_multigpu():
ee = DistributedDataParallel(ee, device_ids=[args.local_rank])
return ee, kwargs
def get_baseline_model(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.baseline_start_from is not None:
model, kwargs = vr.utils.load_baseline(args.baseline_start_from)
elif args.model_type == 'LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = LstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': args.feature_dim,
'cnn_num_res_blocks': args.cnn_num_res_blocks,
'cnn_res_block_dim': args.cnn_res_block_dim,
'cnn_proj_dim': args.cnn_proj_dim,
'cnn_pooling': args.cnn_pooling,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM+SA':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': args.feature_dim,
'stacked_attn_dim': args.stacked_attn_dim,
'num_stacked_attn': args.num_stacked_attn,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmSaModel(**kwargs)
if model.rnn.token_to_idx != vocab['question_token_to_idx']:
# Make sure new vocab is superset of old
for k, v in model.rnn.token_to_idx.items():
assert k in vocab['question_token_to_idx']
assert vocab['question_token_to_idx'][k] == v
for token, idx in vocab['question_token_to_idx'].items():
model.rnn.token_to_idx[token] = idx
kwargs['vocab'] = vocab
model.rnn.expand_vocab(vocab['question_token_to_idx'])
model.to(device)
model.train()
return model, kwargs
def set_mode(mode, models):
assert mode in ['train', 'eval']
for m in models:
if m is None or isinstance(m, ClevrExecutor): continue
if mode == 'train': m.train()
if mode == 'eval': m.eval()
def check_accuracy(args, program_generator, execution_engine, baseline_model, loader):
set_mode('eval', [program_generator, execution_engine, baseline_model])
num_correct, num_samples = 0, 0
for batch in loader:
(questions, _, feats, scenes, answers, programs) = batch
if isinstance(questions, list):
questions = questions[0]
questions = questions[:, :(questions.sum(0) > 0).sum()]
questions_var = questions.to(device)
feats_var = feats.to(device)
if programs[0] is not None:
programs_var = programs.to(device)
def scope():
nonlocal num_samples
nonlocal num_correct
scores = None # Use this for everything but PG
if args.model_type == 'PG':
#TODO(mnoukhov) change to scores for attention
vocab = vr.utils.load_vocab(args.vocab_json)
programs_pred, _ = program_generator.forward(questions_var)
for i in range(questions.size(0)):
program_pred_str = vr.preprocess.decode(programs_pred[i].tolist(), vocab['program_idx_to_token'])
program_str = vr.preprocess.decode(programs[i].tolist(), vocab['program_idx_to_token'])
if program_pred_str == program_str:
num_correct += 1
num_samples += 1
return
elif args.model_type in ['EE', 'Hetero']:
scores, _2, _3 = execution_engine(feats_var, programs_var)
elif args.model_type == 'PG+EE':
programs_pred, _ = program_generator.forward(questions_var, argmax=True)
if isinstance(execution_engine, ClevrExecutor):
preds = execution_engine(scenes, programs_pred)
else:
scores, _2, _3 = execution_engine(feats_var, programs_pred)
elif args.model_type == 'Control-EE':
questions_repr = program_generator(questions_var)
scores, _2, _3 = execution_engine(feats_var, programs_var, question=questions_repr)
elif args.model_type == 'FiLM' or args.model_type == 'RTfilm':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
elif args.model_type == 'Tfilm':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, programs_var)
elif args.model_type == 'MAC':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, isTest=True)
elif args.model_type in ['ConvLSTM', 'RelNet']:
question_rep = program_generator(questions_var)
scores = execution_engine(feats_var, question_rep)
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
scores = baseline_model(questions_var, feats_var)
elif args.model_type in ['SimpleNMN', 'SHNMN']:
scores = execution_engine(feats_var, questions_var)
else:
raise NotImplementedError('model ', args.model_type, ' check_accuracy not implemented')
if scores is not None:
_, preds = scores.data.cpu().max(1)
num_correct += (preds == answers).sum().item()
num_samples += preds.size(0)
# dirty trick to make pytorch free memory earlier
with torch.no_grad():
scope()
if args.num_val_samples is not None and num_samples >= args.num_val_samples:
break
set_mode('train', [program_generator, execution_engine, baseline_model])
acc = float(num_correct) / num_samples
print("num check samples", num_samples)
return acc
def check_grad_num_nans(model, model_name='model'):
grads = [p.grad for p in model.parameters() if p.grad is not None]
num_nans = [np.sum(np.isnan(grad.data.cpu().numpy())) for grad in grads]
nan_checks = [num_nan == 0 for num_nan in num_nans]
if False in nan_checks:
print('Nans in ' + model_name + ' gradient!')
print(num_nans)
pdb.set_trace()
raise(Exception)
if __name__ == '__main__':
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(name)s: %(asctime)s: %(message)s")
main(args)
| 60,588
| 45.642802
| 138
|
py
|
CLOSURE
|
CLOSURE-master/scripts/generate_questions.py
|
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
from __future__ import print_function
import argparse, json, os, itertools, random, shutil
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--templates', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
parser.add_argument('--questions_per_template', default=None, type=int,
help="Limits the number of questions for each given template")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
parser.add_argument('--allow_degenerate', action='store_true')
# args = parser.parse_args()
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1: continue
if not include_zero and len(intersection) == 0: continue
if trivial:
trivial_options[(relationship, filters)] = sorted(list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False,
allow_degenerate=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
def pretty_print_program(program):
return " ".join(
["{}[{}]".format(x['type'], x['side_inputs'][0]) if x.get('side_inputs') else x['type']
for x in program][::-1])
if verbose: print(pretty_print_program(state['nodes']))
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__':
# print('wrong answer')
continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing': skip = True
if p_type != 'Shape' and v != '': skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not None and j is not None and outputs[i] == outputs[j]:
if verbose:
print('skipping due to OUT_NEQ constraint')
print(outputs[i])
print(outputs[j])
skip_state = True
break
else:
assert False, 'Unrecognized constraint type "%s"' % constraint['type']
if skip_state:
continue
# We have already checked to make sure the answer is valid, so if we have
# processed all the nodes in the template then the current state is a valid
# question, so add it if it passes our rejection sampling tests.
if state['next_template_node'] == len(template['nodes']):
# Dima: hardcode that the answer should be from 1 to 3
if isinstance(answer, int) and answer is not False and answer not in [1, 2, 3]:
continue
# Use our rejection sampling heuristics to decide whether we should
# keep this template instantiation
# Dima: even stricter heurisic
cur_answer_count = answer_counts[answer]
answer_counts_sorted = sorted(answer_counts.values())
if cur_answer_count > 1.1 * answer_counts_sorted[0]:
if verbose:
print('skipping due to enforced balance')
print(answer)
print(answer_counts)
continue
is_exist = 'exist' in template['nodes'][-1]['type']
if not is_exist and not allow_degenerate:
degen = qeng.is_degenerate(q, metadata, scene_struct, answer=answer,
verbose=verbose, check_uniqueness=False)
if degen:
if verbose: print('skip degenerate question')
continue
answer_counts[answer] += 1
state['answer'] = answer
final_states.append(state)
if max_instances is not None and len(final_states) == max_instances:
break
continue
# Otherwise fetch the next node from the template
# Make a shallow copy so cached _outputs don't leak ... this is very nasty
next_node = template['nodes'][state['next_template_node']]
next_node = node_shallow_copy(next_node)
special_nodes = {
'filter_unique', 'filter_count', 'filter_exist', 'filter',
'relate_filter', 'relate_filter_unique', 'relate_filter_count',
'relate_filter_exist',
}
if next_node['type'] in special_nodes:
if next_node['type'].startswith('relate_filter'):
unique = (next_node['type'] == 'relate_filter_unique')
include_zero = (next_node['type'] == 'relate_filter_count'
or next_node['type'] == 'relate_filter_exist')
filter_options = find_relate_filter_options(answer, scene_struct, metadata,
unique=unique, include_zero=include_zero)
else:
filter_options = find_filter_options(answer, scene_struct, metadata)
if next_node['type'] == 'filter':
# Remove null filter
filter_options.pop((None, None, None, None), None)
if next_node['type'] == 'filter_unique':
# Get rid of all filter options that don't result in a single object
filter_options = {k: v for k, v in filter_options.items()
if len(v) == 1}
else:
# Add some filter options that do NOT correspond to the scene
if next_node['type'] == 'filter_exist':
# For filter_exist we want an equal number that do and don't
num_to_add = len(filter_options)
elif next_node['type'] == 'filter_count' or next_node['type'] == 'filter':
# For filter_count add nulls equal to the number of singletons
num_to_add = sum(1 for k, v in filter_options.items() if len(v) == 1)
add_empty_filter_options(filter_options, metadata, num_to_add)
filter_option_keys = list(filter_options.keys())
random.shuffle(filter_option_keys)
for k in filter_option_keys:
new_nodes = []
cur_next_vals = {k: v for k, v in state['vals'].items()}
next_input = state['input_map'][next_node['inputs'][0]]
filter_side_inputs = next_node['side_inputs']
if next_node['type'].startswith('relate'):
param_name = next_node['side_inputs'][0] # First one should be relate
filter_side_inputs = next_node['side_inputs'][1:]
param_type = param_name_to_type[param_name]
assert param_type == 'Relation'
param_val = k[0]
k = k[1]
new_nodes.append({
'type': 'relate',
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
for param_name, param_val in zip(filter_side_inputs, k):
param_type = param_name_to_type[param_name]
filter_type = 'filter_%s' % param_type.lower()
if param_val is not None:
new_nodes.append({
'type': filter_type,
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
elif param_val is None:
if metadata['dataset'] == 'CLEVR-v1.0' and param_type == 'Shape':
param_val = 'thing'
else:
param_val = ''
cur_next_vals[param_name] = param_val
input_map = {k: v for k, v in state['input_map'].items()}
extra_type = None
if next_node['type'].endswith('unique'):
extra_type = 'unique'
if next_node['type'].endswith('count'):
extra_type = 'count'
if next_node['type'].endswith('exist'):
extra_type = 'exist'
if extra_type is not None:
new_nodes.append({
'type': extra_type,
'inputs': [input_map[next_node['inputs'][0]] + len(new_nodes)],
})
input_map[state['next_template_node']] = len(state['nodes']) + len(new_nodes) - 1
states.append({
'nodes': state['nodes'] + new_nodes,
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
elif 'side_inputs' in next_node:
# If the next node has template parameters, expand them out
# TODO: Generalize this to work for nodes with more than one side input
assert len(next_node['side_inputs']) == 1, 'NOT IMPLEMENTED'
# Use metadata to figure out domain of valid values for this parameter.
# Iterate over the values in a random order; then it is safe to bail
# from the DFS as soon as we find the desired number of valid template
# instantiations.
param_name = next_node['side_inputs'][0]
param_type = param_name_to_type[param_name]
param_vals = metadata['types'][param_type][:]
random.shuffle(param_vals)
for val in param_vals:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
cur_next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
'side_inputs': [val],
}
cur_next_vals = {k: v for k, v in state['vals'].items()}
cur_next_vals[param_name] = val
states.append({
'nodes': state['nodes'] + [cur_next_node],
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
else:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
}
states.append({
'nodes': state['nodes'] + [next_node],
'vals': state['vals'],
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
# Actually instantiate the template with the solutions we've found
text_questions, structured_questions, answers = [], [], []
for state in final_states:
structured_questions.append(state['nodes'])
answers.append(state['answer'])
text = random.choice(template['text'])
for name, val in state['vals'].items():
if val in synonyms:
val = random.choice(synonyms[val])
text = text.replace(name, val)
text = ' '.join(text.split())
text = replace_optionals(text)
text = ' '.join(text.split())
text = other_heuristic(text, state['vals'])
text_questions.append(text)
return text_questions, structured_questions, answers
def replace_optionals(s):
"""
Each substring of s that is surrounded in square brackets is treated as
optional and is removed with probability 0.5. For example the string
"A [aa] B [bb]"
could become any of
"A aa B bb"
"A B bb"
"A aa B "
"A B "
with probability 1/4.
"""
pat = re.compile(r'\[([^\[]*)\]')
while True:
match = re.search(pat, s)
if not match:
break
i0 = match.start()
i1 = match.end()
if random.random() > 0.5:
s = s[:i0] + match.groups()[0] + s[i1:]
else:
s = s[:i0] + s[i1:]
return s
def main(args):
if args.questions_per_template and args.reset_counts_every:
raise ValueError("you can only either --questions_per_template or --reset_counts_every, not both")
with open(args.metadata_file, 'r') as f:
metadata = json.load(f)
dataset = metadata['dataset']
if dataset != 'CLEVR-v1.0':
raise ValueError('Unrecognized dataset "%s"' % dataset)
functions_by_name = {}
for f in metadata['functions']:
functions_by_name[f['name']] = f
metadata['_functions_by_name'] = functions_by_name
# Load templates from disk
# Key is (filename, file_idx)
num_loaded_templates = 0
templates = {}
if os.path.isfile(args.templates):
template_files = [args.templates]
else:
template_files = [os.path.join(args.templates, fn)
for fn in os.listdir(args.templates)
if fn.endswith('.json')]
for fn in template_files:
with open(fn, 'r') as f:
base = os.path.splitext(fn)[0]
for i, template in enumerate(json.load(f)):
num_loaded_templates += 1
key = (fn, i)
templates[key] = template
print('Read %d templates from disk' % num_loaded_templates)
def reset_counts():
# Maps a template (filename, index) to the number of questions we have
# so far using that template
template_counts = {}
# Maps a template (filename, index) to a dict mapping the answer to the
# number of questions so far of that template type with that answer
template_answer_counts = {}
node_type_to_dtype = {n['name']: n['output'] for n in metadata['functions']}
for key, template in templates.items():
template_counts[key[:2]] = 0
final_node_type = template['nodes'][-1]['type']
final_dtype = node_type_to_dtype[final_node_type]
answers = metadata['types'][final_dtype]
if final_dtype == 'Bool':
answers = [True, False]
if final_dtype == 'Integer':
if metadata['dataset'] == 'CLEVR-v1.0':
answers = [1, 2, 3]
template_answer_counts[key[:2]] = {}
for a in answers:
template_answer_counts[key[:2]][a] = 0
return template_counts, template_answer_counts
template_counts, template_answer_counts = reset_counts()
# Read file containing input scenes
all_scenes = []
with open(args.input_scene_file, 'r') as f:
scene_data = json.load(f)
all_scenes = scene_data['scenes']
scene_info = scene_data['info']
begin = args.scene_start_idx
if args.num_scenes > 0:
end = args.scene_start_idx + args.num_scenes
all_scenes = all_scenes[begin:end]
else:
all_scenes = all_scenes[begin:]
# Read synonyms file
with open(args.synonyms_json, 'r') as f:
synonyms = json.load(f)
questions = []
scene_count = 0
for i, scene in enumerate(all_scenes):
if args.questions_per_template and min(template_counts.values()) == args.questions_per_template:
break
scene_fn = scene['image_filename']
scene_struct = scene
print('starting image %s (%d / %d)'
% (scene_fn, i + 1, len(all_scenes)))
if args.reset_counts_every and scene_count % args.reset_counts_every == 0:
print('resetting counts')
template_counts, template_answer_counts = reset_counts()
scene_count += 1
# Order templates by the number of questions we have so far for those
# templates. This is a simple heuristic to give a flat distribution over
# templates.
templates_items = list(templates.items())
templates_items = sorted(templates_items,
key=lambda x: template_counts[x[0][:2]])
num_instantiated = 0
for (fn, idx), template in templates_items:
print(template_answer_counts[(fn, idx)])
if (args.questions_per_template is not None
and template_counts[(fn, idx)] == args.questions_per_template):
continue
if args.verbose:
print('trying template ', fn, idx)
if args.time_dfs and args.verbose:
tic = time.time()
ts, qs, ans = instantiate_templates_dfs(
scene_struct,
template,
metadata,
template_answer_counts[(fn, idx)],
synonyms,
max_instances=args.instances_per_template,
verbose=args.verbose,
allow_degenerate=args.allow_degenerate)
if args.time_dfs and args.verbose:
toc = time.time()
print('that took ', toc - tic)
image_index = int(os.path.splitext(scene_fn)[0].split('_')[-1])
for t, q, a in zip(ts, qs, ans):
questions.append({
'split': scene_info['split'],
'image_filename': scene_fn,
'image_index': image_index,
'image': os.path.splitext(scene_fn)[0],
'question': t,
'program': q,
'answer': a,
'template_filename': fn,
'question_family_index': idx,
'question_index': len(questions),
})
if len(ts) > 0:
if args.verbose:
print('got one!')
num_instantiated += 1
template_counts[(fn, idx)] += 1
elif args.verbose:
print('did not get any =(')
if num_instantiated >= args.templates_per_image:
break
# Change "side_inputs" to "value_inputs" in all functions of all functional
# programs. My original name for these was "side_inputs" but I decided to
# change the name to "value_inputs" for the public CLEVR release. I should
# probably go through all question generation code and templates and rename,
# but that could be tricky and take a while, so instead I'll just do it here.
# To further complicate things, originally functions without value inputs did
# not have a "side_inputs" field at all, and I'm pretty sure this fact is used
# in some of the code above; however in the public CLEVR release all functions
# have a "value_inputs" field, and it's an empty list for functions that take
# no value inputs. Again this should probably be refactored, but the quick and
# dirty solution is to keep the code above as-is, but here make "value_inputs"
# an empty list for those functions that do not have "side_inputs". Gross.
for q in questions:
for f in q['program']:
f['function'] = f['type']
del f['type']
if 'side_inputs' in f:
f['value_inputs'] = f['side_inputs']
del f['side_inputs']
else:
f['value_inputs'] = []
if isinstance(q['answer'], bool):
q['answer'] = 'yes' if q['answer'] else 'no'
if isinstance(q['answer'], int):
q['answer'] = str(q['answer'])
with open(args.output_questions_file, 'w') as f:
print('Writing output to %s' % args.output_questions_file)
json.dump({
'info': scene_info,
'questions': questions,
}, f)
if __name__ == '__main__':
args = parser.parse_args()
if args.profile:
import cProfile
cProfile.run('main(args)')
else:
main(args)
| 27,902
| 37.170999
| 138
|
py
|
CLOSURE
|
CLOSURE-master/scripts/rewrite_programs.py
|
import h5py
import os
import shutil
import sys
import json
import copy
import numpy
import time
import timeit
from vr.data import ClevrDataset
from vr.utils import load_vocab
def needs_shortcut(token):
return (token.startswith('filter_') # because "pointers" need not contain attribute info
or token.startswith('query') # -//-
or token.startswith('same') # -//-
or token.startswith('relate')) # -//- because "pointers" need not contain position info
def add_shortcuts(program, vocab):
token = vocab['program_idx_to_token'][program[0]]
cur_arity = vocab['program_token_arity'][token]
if cur_arity == 0:
return 1, [program[0]]
if cur_arity == 1:
shift, subtree = add_shortcuts(program[1:], vocab)
if needs_shortcut(token):
return 1 + shift, [program[0], vocab['program_token_to_idx']['scene']] + subtree
else:
return 1 + shift, [program[0]] + subtree
if cur_arity == 2:
left_shift, left_subtree = add_shortcuts(program[1:], vocab)
right_shift, right_subtree = add_shortcuts(program[1 + left_shift:], vocab)
return 1 + left_shift + right_shift, [program[0]] + left_subtree + right_subtree
raise ValueError()
def rewrite_programs(src_dir, dst_dir):
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
vocab = load_vocab(os.path.join(src_dir, 'vocab.json'))
old_vocab = copy.deepcopy(vocab)
arity = vocab['program_token_arity']
program_vocab = vocab['program_idx_to_token']
question_vocab = vocab['question_idx_to_token']
# Step 1: change the arity of filters
for func in arity.keys():
if needs_shortcut(func):
arity[func] = 2
with open(os.path.join(dst_dir, 'vocab.json'), 'w') as dst:
json.dump(vocab, dst)
for part in ['train', 'val', 'test']:
src_questions = "{}/{}_questions.h5".format(src_dir, part)
dst_questions = "{}/{}_questions.h5".format(dst_dir, part)
with h5py.File(src_questions) as src_file:
programs = src_file['programs']
prog_wshortcuts = []
for i in range(len(programs)):
prog_wshortcuts.append(add_shortcuts(programs[i], old_vocab)[1])
new_max_program_len = max(len(p) for p in prog_wshortcuts)
shutil.copyfile(src_questions, dst_questions)
with h5py.File(dst_questions, 'a') as dst_file:
del dst_file['programs']
program_dataset = dst_file.create_dataset(
'programs', (len(prog_wshortcuts), new_max_program_len), dtype=numpy.int64)
for i in range(len(prog_wshortcuts)):
program_dataset[i, :len(prog_wshortcuts[i])] = prog_wshortcuts[i]
if __name__ == '__main__':
src_dir = sys.argv[1]
dst_dir = sys.argv[2]
rewrite_programs(src_dir, dst_dir)
| 2,901
| 35.734177
| 99
|
py
|
CLOSURE
|
CLOSURE-master/scripts/run_model.py
|
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import random
import shutil
from termcolor import colored
import time
from tqdm import tqdm
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
from scipy.misc import imread, imresize, imsave
import vr.utils as utils
import vr.programs
from vr.data import ClevrDataset, ClevrDataLoader
from vr.ns_vqa.clevr_executor import ClevrExecutor
from vr.ns_vqa.parser import Seq2seqParser
from vr.preprocess import tokenize, encode
from vr.models import *
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--baseline_model', default=None)
parser.add_argument('--debug_every', default=float('inf'), type=float)
parser.add_argument('--use_gpu', default=torch.cuda.is_available(), type=int)
# For running on a preprocessed dataset
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--part', default='val', type=str)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
# For running on a single example
parser.add_argument('--question', default=None)
parser.add_argument('--image', default='img/CLEVR_val_000017.png')
parser.add_argument('--cnn_model', default='resnet101')
parser.add_argument('--cnn_model_stage', default=3, type=int)
parser.add_argument('--image_width', default=224, type=int)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--enforce_clevr_vocab', default=1, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_samples', default=None, type=int)
parser.add_argument('--num_last_words_shuffled', default=0, type=int) # -1 for all shuffled
parser.add_argument('--q_family', type=int, action='append')
parser.add_argument('--sample_argmax', type=int, default=1)
parser.add_argument('--temperature', default=None, type=float)
# FiLM models only
parser.add_argument('--gamma_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp', 'relu', 'softplus'])
parser.add_argument('--gamma_scale', default=1, type=float)
parser.add_argument('--gamma_shift', default=0, type=float)
parser.add_argument('--gammas_from', default=None) # Load gammas from file
parser.add_argument('--beta_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp', 'relu', 'softplus'])
parser.add_argument('--beta_scale', default=1, type=float)
parser.add_argument('--beta_shift', default=0, type=float)
parser.add_argument('--betas_from', default=None) # Load betas from file
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
parser.add_argument('--dump_module_info', action='store_true')
parser.add_argument('--output_preds', default=None)
parser.add_argument('--output_viz_dir', default='img/')
parser.add_argument('--output_program_stats_dir', default=None)
grads = {}
programs = {} # NOTE: Useful for zero-shot program manipulation when in debug mode
def main(args):
if not args.program_generator:
args.program_generator = args.execution_engine
input_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(args.part))
input_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(args.part))
input_scenes = os.path.join(args.data_dir, '{}_scenes.json'.format(args.part))
vocab = load_vocab(args)
pg, _ = utils.load_program_generator(args.program_generator)
if pg:
pg.save_activations = True
if args.temperature:
pg.decoder_linear.weight.data /= args.temperature
pg.decoder_linear.bias.data /= args.temperature
if args.execution_engine:
ee, _ = utils.load_execution_engine(
args.execution_engine, verbose=False)
ee.noise_enabled = False
else:
ee = ClevrExecutor(vocab)
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
loader_kwargs = {
'question_h5': input_question_h5,
'feature_h5': input_features_h5,
'scene_path': input_scenes if isinstance(ee, ClevrExecutor) else None,
'vocab': vocab,
'batch_size': args.batch_size,
}
if args.num_samples is not None and args.num_samples > 0:
loader_kwargs['max_samples'] = args.num_samples
if args.q_family:
loader_kwargs['question_families'] = args.q_family
with ClevrDataLoader(**loader_kwargs) as loader:
with torch.no_grad():
run_batch(args, pg, ee, loader, dtype)
def run_batch(args, pg, ee, loader, dtype):
if pg:
pg.type(dtype)
pg.eval()
if ee and not isinstance(ee, ClevrExecutor):
ee.type(dtype)
ee.eval()
all_scores = []
all_probs = []
all_preds = []
all_correct = []
all_programs = []
all_groundtruth_programs = []
all_questions = []
all_correct_programs = []
all_seq2seq_attentions = []
num_samples = 0
total_nll = 0
total_prob = 0
start = time.time()
for batch in tqdm(loader):
assert(not pg or not pg.training)
assert(isinstance(ee, ClevrExecutor) or not ee.training)
questions, images, feats, scenes, answers, programs = batch
questions_var = questions[0].type(dtype).long()
questions_var = questions_var[:, :(questions_var.sum(0) > 0).sum()]
feats_var = feats.type(dtype)
programs_var = programs.to(feats_var.device)
question_repr = None
programs_pred = None
# PG
if isinstance(pg, FiLMGen):
question_repr = pg(questions_var)
if isinstance(pg, (Seq2seqParser, Seq2SeqAtt)):
programs_pred, _ = pg(questions_var, argmax=True)
all_groundtruth_programs.append(F.pad(programs_var, (0, 30 - programs_var.shape[1], 0, 0)))
all_programs.append(F.pad(programs_pred, (0, 30 - programs_pred.shape[1], 0, 0)))
all_questions.append(F.pad(questions_var, (0, 50 - questions_var.shape[1], 0, 0)))
for _ in range(30 - len(pg._attn_weights)):
pg._attn_weights.append(torch.zeros_like(pg._attn_weights[0]))
attn_weights = [F.pad(a, (0, 50 - a.shape[2], 0, 0, 0, 0)) for a in pg._attn_weights]
all_seq2seq_attentions.append(torch.cat(attn_weights, 1))
nlls = pg.log_likelihood(questions_var, programs_var)
total_nll += nlls.sum()
total_prob += torch.exp(-nlls).sum()
else:
programs_pred = programs_var
# EE
# arg 1
if isinstance(ee, ClevrExecutor):
pos_args = [scenes]
else:
pos_args = [feats_var]
# arg 2
if isinstance(ee, (ModuleNet, ClevrExecutor)):
pos_args.append(programs_pred)
else:
pos_args.append(question_repr)
# kwargs
kwargs = ({'save_activations': True}
if isinstance(ee, (FiLMedNet, ModuleNet, MAC))
else {})
if isinstance(ee, ModuleNet) and ee.learn_control:
kwargs['question'] = question_repr
result = ee(*pos_args, **kwargs)
# unpack outputs
preds = scores = None
if isinstance(ee, ModuleNet):
scores, _2, mod_id_targets = result
elif isinstance(ee, ClevrExecutor):
preds = result
else:
scores = result
# compute predictions
if preds is None:
probs = F.softmax(scores, dim=1)
_, preds = scores.data.cpu().max(1)
all_preds.append(preds.cpu().clone())
all_correct.append(preds == answers)
if programs_pred is not None:
min_length = min(programs_var.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs_var = programs_var[:, :min_length]
correct_programs = (programs_pred == programs_var).int().sum(1) == min_length
all_correct_programs.append(correct_programs.cpu().clone())
if args.dump_module_info:
all_module_outputs.append(ee.module_outputs.cpu().detach())
all_mod_id_targets.append(mod_id_targets.cpu().detach())
num_samples += preds.size(0)
num_correct = torch.cat(all_correct, 0).sum().item()
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
if all_correct_programs:
num_correct_programs = torch.cat(all_correct_programs, 0).sum().item()
prog_acc = float(num_correct_programs) / num_samples
print('Got %d / %d = %.2f programs correct' % (num_correct_programs, num_samples, 100 * prog_acc))
if total_nll:
print("GT program NLL: {}".format(total_nll / num_samples))
print("Average probability of sampling a GT program: {}".format(total_prob / num_samples))
print('%.2fs to evaluate' % (start - time.time()))
model = args.execution_engine if args.execution_engine else args.program_generator
output_path = ('output_' + args.part + "_" + model.split('.')[0].replace('/', '_') + ".h5"
if not args.output_h5
else args.output_h5)
print('Writing output to "%s"' % output_path)
with h5py.File(output_path, 'w') as fout:
fout.create_dataset('correct', data=torch.cat(all_correct, 0).numpy())
if all_scores:
fout.create_dataset('scores', data=torch.cat(all_scores, 0).numpy())
fout.create_dataset('probs', data=torch.cat(all_probs, 0).numpy())
if all_correct_programs:
fout.create_dataset('correct_programs', data=torch.cat(all_correct_programs, 0).numpy())
if all_seq2seq_attentions:
fout.create_dataset('seq2seq_attentions', data=torch.cat(all_seq2seq_attentions, 0).cpu().numpy())
if all_programs:
fout.create_dataset('programs', data=torch.cat(all_programs, 0).cpu().numpy())
if all_groundtruth_programs:
fout.create_dataset('groundtruth_programs', data=torch.cat(all_groundtruth_programs, 0).cpu().numpy())
if all_questions:
fout.create_dataset('questions', data=torch.cat(all_questions, 0).cpu().numpy())
if args.output_preds is not None:
all_preds_strings = []
for i in range(len(all_preds)):
all_preds_strings.append(vocab['answer_idx_to_token'][all_preds[i]])
save_to_file(all_preds_strings, args.output_preds)
if args.debug_every <= 1:
pdb.set_trace()
return
def load_vocab(args):
path = None
if args.baseline_model is not None:
path = args.baseline_model
elif args.program_generator is not None:
path = args.program_generator
elif args.execution_engine is not None:
path = args.execution_engine
return utils.load_cpu(path)['vocab']
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
def save_to_file(text, filename):
with open(filename, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(text))
myfile.write('\n')
def get_index(l, index, default=-1):
try:
return l.index(index)
except ValueError:
return default
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 11,896
| 36.648734
| 138
|
py
|
sarpy
|
sarpy-master/setup.py
|
"""
Setup module for SarPy.
"""
import os
from setuptools import setup, find_packages
# Get the long description from the README file
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), 'r') as f:
long_description = f.read()
# Get the relevant setup parameters from the package
parameters = {}
with open(os.path.join(here, 'sarpy', '__about__.py'), 'r') as f:
exec(f.read(), parameters)
def my_package_data():
def find_dirs(init_dir, start, the_list):
for root, dirs, files in os.walk(os.path.join(init_dir, start)):
include_root = False
for fil in files:
if os.path.splitext(fil)[1] == '.xsd':
include_root = True
break
if include_root:
root_dir = root.replace('\\', '/')
rel_dir = root_dir[len(init_dir):]
if rel_dir[0] == '/':
rel_dir = rel_dir[1:]
if rel_dir[-1] == '/':
rel_dir = rel_dir[:-1]
the_list.append(rel_dir + '/*.xsd')
package_list = []
find_dirs('sarpy', 'io/complex/sicd_schema/', package_list)
find_dirs('sarpy', 'io/phase_history/cphd_schema/', package_list)
find_dirs('sarpy', 'io/phase_history/crsd_schema/', package_list)
find_dirs('sarpy', 'io/product/sidd_schema/', package_list)
find_dirs('sarpy', 'annotation/afrl_rde_schema/', package_list)
return package_list
def my_test_suite():
import unittest
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', top_level_dir='.')
return test_suite
setup(name=parameters['__title__'],
version=parameters['__version__'],
description=parameters['__summary__'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=('*tests*', )),
include_package_data=True,
package_data={'sarpy': my_package_data()},
url=parameters['__url__'],
author=parameters['__author__'],
author_email=parameters['__email__'], # The primary POC
install_requires=['numpy>=1.11.0', 'scipy'],
zip_safe=False, # Use of __file__ and __path__ in some code makes it unusable from zip
test_suite="setup.my_test_suite",
tests_require=['pillow', 'lxml>=4.1.1', 'matplotlib', 'h5py', 'smart_open[http]', 'pytest>=3.3.2', 'networkx>=2.5', 'shapely>=1.6.4'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
platforms=['any'],
license='MIT')
| 3,049
| 36.195122
| 140
|
py
|
sarpy
|
sarpy-master/examples/polygon_example.py
|
"""
Basic polygon example - this needs to be reworked
"""
import numpy
from matplotlib import pyplot
import time
from sarpy.geometry.geometry_elements import Polygon
def generate_random_polygon(segment_count=12):
"""
Generate the coordinates for a random polygon going from (-1, 0) to (1, 0) and back.
It will be contained in square [-1, 1] x [-1, 1].
Parameters
----------
segment_count : int
Returns
-------
numpy.ndarray
"""
# starts at (-1, 0) and goes up to (1, 0) with negative y coordinates by <segment_count> random steps,
# then back to (-1, 0) with positive y coordinates by <segment_count> random steps.
coords = numpy.zeros((2 * segment_count + 1, 2), dtype=numpy.float64)
# fill in the randomly generated x coordinates
x1 = numpy.random.rand(segment_count + 1)
x1[0] = 0
l1 = numpy.cumsum(x1)
coords[:segment_count+1, 0] = -1 + 2*l1/l1[-1]
x2 = numpy.cumsum(numpy.random.rand(segment_count))
coords[segment_count + 1:, 0] = 1 - 2*x2/x2[-1]
# fill in the randomly generated y coordinates
coords[1:segment_count, 1] = -numpy.random.rand(segment_count - 1)
coords[segment_count+1:2*segment_count, 1] = numpy.random.rand(segment_count - 1)
return coords
def basic_check():
"""
Example for checks on a basic polygon.
Returns
-------
None
"""
# create our polygon object with coordinates bounded by square [0, 1]x[-1, 1]
coords = generate_random_polygon()
poly = Polygon(coordinates=[coords, ])
#############################
# perform random samples check
samples = 10000
pts = 2.2*numpy.random.rand(samples, 2) - 1.1
start = time.time()
in_poly_condition = poly.contain_coordinates(pts[:, 0], pts[:, 1])
lapsed = time.time() - start
print('basic poly: lapsed = {}, lapsed/point = {}'.format(lapsed, lapsed/samples))
###########################
# perform grid check
grid_samples = 1001
x_grid = numpy.linspace(-1.1, 1.1, grid_samples)
y_grid = numpy.linspace(-1.1, 1.1, grid_samples)
start = time.time()
in_poly_condition2 = poly.grid_contained(x_grid[:-1], y_grid[:-1])
lapsed = time.time() - start
print('basic poly: lapsed = {}, lapsed/point = {}'.format(lapsed, lapsed/((grid_samples - 1)**2)))
#############################
# visualize results
fig, axs = pyplot.subplots(nrows=2, ncols=1, sharex='col', sharey='col')
fig.suptitle('Basic polygon example')
axs[0].scatter(pts[in_poly_condition, 0], pts[in_poly_condition, 1], color='r', marker='.', s=16)
axs[0].scatter(pts[~in_poly_condition, 0], pts[~in_poly_condition, 1], color='b', marker='.', s=16)
axs[0].plot(coords[:, 0], coords[:, 1], 'k-')
y2d, x2d = numpy.meshgrid(y_grid, x_grid, indexing='xy')
axs[1].pcolormesh(x2d, y2d, in_poly_condition2, cmap='jet')
axs[1].plot(coords[:, 0], coords[:, 1], 'k-', lw=2, zorder=99)
pyplot.show()
def compound_poly_check():
"""
Example for compound polygon with a hole in it.
Returns
-------
None
"""
# create our polygon object with coordinates bounded by square [0, 1]x[-1, 1]
outer_coords = numpy.array([
[-1, 0], [-0.5, -1], [0.5, -1], [1, 0], [0.5, 1], [-0.5, 1], [-1, 0], ], dtype='float64')
inner_coords = 0.5*generate_random_polygon()
poly = Polygon(coordinates=[outer_coords, inner_coords])
#############################
# perform random samples check
samples = 10000
pts = 2.2*numpy.random.rand(samples, 2) - 1.1
start = time.time()
in_poly_condition = poly.contain_coordinates(pts[:, 0], pts[:, 1])
lapsed = time.time() - start
print('compound poly: lapsed = {}, lapsed/point = {}'.format(lapsed, lapsed/samples))
###########################
# perform grid check
grid_samples = 1001
x_grid = numpy.linspace(-1.1, 1.1, grid_samples)
y_grid = numpy.linspace(-1.1, 1.1, grid_samples)
start = time.time()
in_poly_condition2 = poly.grid_contained(x_grid[:-1], y_grid[:-1])
lapsed = time.time() - start
print('compound poly: lapsed = {}, lapsed/point = {}'.format(lapsed, lapsed/((grid_samples - 1)**2)))
#############################
# visualize results
fig, axs = pyplot.subplots(nrows=2, ncols=1, sharex='col', sharey='col')
fig.suptitle('Compound polygon example')
axs[0].scatter(pts[in_poly_condition, 0], pts[in_poly_condition, 1], color='r', marker='.', s=16)
axs[0].scatter(pts[~in_poly_condition, 0], pts[~in_poly_condition, 1], color='b', marker='.', s=16)
axs[0].plot(outer_coords[:, 0], outer_coords[:, 1], 'k-')
axs[0].plot(inner_coords[:, 0], inner_coords[:, 1], 'k-')
y2d, x2d = numpy.meshgrid(y_grid, x_grid, indexing='xy')
axs[1].pcolormesh(x2d, y2d, in_poly_condition2, cmap='jet')
axs[1].plot(outer_coords[:, 0], outer_coords[:, 1], 'k-')
axs[1].plot(inner_coords[:, 0], inner_coords[:, 1], 'k-')
pyplot.show()
if __name__ == '__main__':
basic_check()
compound_poly_check()
| 5,054
| 34.104167
| 106
|
py
|
sarpy
|
sarpy-master/examples/first_example.py
|
"""
Basic First Example
===================
This indicates the basic functionality for reading a complex format data set. It
is intended that this script will be read, and the relevant portions run
individually.
Learning Python
----------------
It seems somewhat common for some users of sarpy to be experts in SAR, but not
well versed with Python. If this describes you (and even if it doesn't), the
built-in `help()` function can often help clear up confusion found during an
interactive Python session.
Check out a given module, class, or function using it's path
>>> help('sarpy')
or
>>> help('sarpy.io.complex.sicd.SICDReader')
or, check out a variable that has been defined somehow
>>> from sarpy.io.complex.converter import open_complex
>>> reader = open_complex('<path to file>')
>>> help(reader)
"""
"""
General file opening
--------------------
Open a **complex format file** using a general purpose opener from a specified
file name.
This general purpose opener iterates over the reader objects defined in
sarpy.io.complex, trying each and attempting to catch exceptions. It returns
the first one that works, or raises an exception if none of the readers work.
Note that this exception catching process, though we attempt to make everything
completely robust, can sometimes confusingly hide errors in the file identification,
parsing and/or interpretation.
It is important to note that this opener will not open any file that is not
**complex format** (like a WBID or detected image).
"""
from sarpy.io.complex.converter import open_complex
reader = open_complex('<path to file>')
# this will return an instance of one of the reader classes defined in the modules in sarpy.io.complex
print(type(reader))
"""
Direct file opening
-------------------
If you know the file type of your file, you may want to use the correct reader
class directly, especially if the general opener is not working.
"""
from sarpy.io.complex.sicd import SICDReader
reader = SICDReader('<path to file>') # same class as referenced above
"""
SICD metadata structure
-----------------------
Access the SICD structure or tuple of structures associated with the reader.
Note that the sicd structure is defined using elements of sarpy.io.complex.sicd_elements
A sicd file will necessarily be composed of a single image, but other file formats
(like sentinel or radarsat) often contain multiple images combined into a single
package (i.e. multiple polarizations or other aggregate collections).
"""
# nebulous contents - this will be an instance of the sicd structure, or a tuple
# of sicd structures
nebulous_contents = reader.sicd_meta
# Unified access - this will always be a tuple of sicd structures,
# with one sicd structure per image
sicd_tuple = reader.get_sicds_as_tuple()
the_sicd = sicd_tuple[0] # access the desired sicd structure
# provide a human readable, if long, contents to terminal
print(the_sicd)
# get xml string representation
xml_string = the_sicd.to_xml_string(tag='SICD')
# get json friendly dict representation
dict_representation = the_sicd.to_dict()
# access field values
print(the_sicd.CollectionInfo.CollectorName)
"""
Read complex pixel data
-----------------------
The recommended methodology uses slice notation. The basic syntax is as:
>>> data = reader[row_start:row_end:row_step, col_start:col_end:col_step, image_index=0]
"""
# in the event of a single image segment
all_data = reader[:] # or reader[:, :] - reads all data
decimated_data = reader[::10, ::10] # reads every 10th pixel
# in the event of multiple image segments, use another slice index like
all_data = reader[:, :, 0]
"""
Opening remote file
-------------------
The SICD reader (and also SIDD reader) have been implemented to accept binary file-like
objects, specifically intended to enable
**Speed/efficiency:** Files read using the file system (i.e. via file name
or local file-like object) are read efficiently via numpy memory map. Reading
across a network file system, commonly encountered as reading from a file-share
drive mounted to your local system, maintains the efficiency of numpy memory map
usage, but the speed will be impacted (perhaps significantly) by network latency.
It should be noted that the flexibility of reading using a file-like object comes
at a significant efficiency and speed cost, particularly for reading decimated or
down-selected data. A numpy memory map can not be utilized (at least as of May 2021)
for a non-file object, and reading/interpreting data becomes a fully manual
and non-optimized process. The entire continuous chunk of data containing the
desired segment of data will be read, then down-selected. This is to accommodate
for the overhead of the connection request for remote reading - simple bench marks
indicate that the bottleneck for performing a remote read is clearly the connection
request, and presents no good opportunity for clear optimization.
"""
# for the purposes of general purpose example, we reference a basic example sicd
# file hosted for the SIX project usage. It is recommended to use local files, as
# described below
import smart_open
file_object = smart_open.open(
'https://six-library.s3.amazonaws.com/sicd_example_RMA_RGZERO_RE32F_IM32F_cropped_multiple_image_segments.nitf',
mode='rb', # must be opened in binary mode
buffering=4*1024*1024) # it has been observed that setting a manual buffer size may help
"""
Basic data plot
---------------
Show some basic plots of the data.
**Note:** the sarpy_apps project provides robust interactive tools.
"""
from matplotlib import pyplot
from sarpy.visualization.remap import Density
remap_function = Density()
fig, axs = pyplot.subplots(nrows=1, ncols=1, figsize=(5, 5))
axs.imshow(remap_function(all_data), cmap='gray')
pyplot.show()
"""
Convert to SICD format
----------------------
Convert a complex dataset, in any format handled by sarpy, to SICD
"""
from sarpy.io.complex.converter import conversion_utility
conversion_utility('<complex format file>', '<output_directory>')
| 6,047
| 33.758621
| 116
|
py
|
sarpy
|
sarpy-master/tests/test_class_string.py
|
from sarpy.utils.review_class import check_classification
import unittest
class TestClassString(unittest.TestCase):
def test_class_str(self):
results_dict = check_classification('sarpy')
key = '__NO_CLASSIFICATION__'
if key in results_dict:
raise ValueError(
'The following modules have no classification string defined {}'.format(results_dict[key]))
| 408
| 33.083333
| 106
|
py
|
sarpy
|
sarpy-master/tests/__init__.py
|
import os
import logging
parent_path = os.environ.get('SARPY_TEST_PATH', None)
if parent_path == 'NONE':
parent_path = None
if parent_path is not None:
parent_path = os.path.expanduser(parent_path)
if parent_path is not None and not os.path.isdir(parent_path):
raise IOError('SARPY_TEST_PATH is given as {}, but is not a directory'.format(parent_path))
def parse_file_entry(entry, default='absolute'):
"""
Evaluate input as a path for file used in a unit test.
Parameters
----------
entry : NOne|dict
dict of the form {'path': <value>', 'path_type': 'relative' or 'absolute'}
default : str
The default value for 'path_type', if it is not provided.
Returns
-------
None|str
The absolute path if the evaluated path exists, or `None` if not.
"""
if entry is None:
return None
if not isinstance(entry, dict):
raise ValueError('Got unexpected input.')
if 'path' not in entry:
raise KeyError('Input must have key "entry"')
path_type = entry.get('path_type', default).lower()
if path_type == 'absolute':
the_file = os.path.expanduser(entry['path'])
elif path_type == 'relative':
if parent_path is None:
logging.warning('Environment variable SARPY_TEST_PATH unset, but relative path identified in unit test')
the_file = None
else:
the_file = os.path.join(parent_path, entry['path'])
else:
raise ValueError('value associated with "path_type" must be one of "absolute" or "relative"')
if the_file is None:
return None
elif os.path.exists(the_file):
return the_file
else:
return None
| 1,717
| 28.118644
| 116
|
py
|
sarpy
|
sarpy-master/tests/io/__init__.py
|
__classification__ = 'UNCLASSIFIED'
| 37
| 11.666667
| 35
|
py
|
sarpy
|
sarpy-master/tests/io/received/test_crsd.py
|
import logging
import os
import json
import tempfile
import unittest
import shutil
import numpy.testing
from sarpy.io.received.crsd import CRSDReader, CRSDReader1, CRSDWriter1
from sarpy.io.received.converter import open_received
from sarpy.io.received.crsd_schema import get_schema_path
from tests import parse_file_entry
DEFAULT_SCHEMA = get_schema_path('1.0.0')
crsd_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'crsd_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
crsd_file_types[the_type] = valid_entries
def generic_io_test(instance, test_file, reader_type_string, reader_type):
assert isinstance(instance, unittest.TestCase)
reader = None
with instance.subTest(msg='establish reader for type {} and file {}'.format(reader_type_string, test_file)):
reader = open_received(test_file)
instance.assertTrue(reader is not None, msg='Returned None, so opening failed.')
if reader is None:
return # remaining tests make no sense
assert isinstance(reader, CRSDReader)
with instance.subTest(msg='Reader for type {} should be appropriate reader'):
instance.assertTrue(isinstance(reader, reader_type), msg='Returned reader should be of type {}'.format(reader_type))
if not isinstance(reader, reader_type):
return # remaining tests might be misleading
with instance.subTest(msg='Verify reader_type for type {} and file {}'.format(reader_type_string, test_file)):
instance.assertEqual(reader.reader_type, "CRSD", msg='reader.reader_type should be "CRSD"')
with instance.subTest(msg='Validity of crsd in reader of '
'type {} for file {}'.format(reader_type_string, test_file)):
if not reader.crsd_meta.is_valid(recursive=True, stack=False):
logging.warning(
'crsd in reader of type {} for file {} not valid'.format(reader_type_string, test_file))
with instance.subTest(msg='Fetch data_sizes and sidds for type {} and file {}'.format(reader_type_string, test_file)):
data_sizes = reader.get_data_size_as_tuple()
if isinstance(reader, CRSDReader1):
elements = reader.crsd_meta.Data.Channels
else:
raise TypeError('Got unhandled reader type {}'.format(type(reader)))
for i, (data_size, element) in enumerate(zip(data_sizes, elements)):
with instance.subTest(msg='Verify image size for sidd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(data_size[0], element.NumVectors, msg='data_size[0] and NumVectors do not agree')
instance.assertEqual(data_size[1], element.NumSamples, msg='data_size[1] and NumSamples do not agree')
with instance.subTest(msg='Basic fetch test for crsd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(reader[:2, :2, i].shape[:2], (2, 2), msg='upper left fetch')
instance.assertEqual(reader[-2:, :2, i].shape[:2], (2, 2), msg='lower left fetch')
instance.assertEqual(reader[-2:, -2:, i].shape[:2], (2, 2), msg='lower right fetch')
instance.assertEqual(reader[:2, -2:, i].shape[:2], (2, 2), msg='upper right fetch')
with instance.subTest(msg='Verify fetching complete row(s) have correct size '
'for crsd index {} in reader of type {} and file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:, :2, i]
instance.assertEqual(test_data.shape[:2], (data_size[0], 2), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching complete columns(s) have correct size '
'for crsd index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:2, :, i]
instance.assertEqual(test_data.shape[:2], (2, data_size[1]), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching entire pvp data has correct size for crsd '
'index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_pvp = reader.read_pvp_variable('TxTime', i, the_range=None)
instance.assertEqual(test_pvp.shape, (data_size[0], ), msg='Unexpected pvp total fetch size')
with instance.subTest(msg='Verify fetching pvp data for slice has correct size for crsd '
'index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_pvp = reader.read_pvp_variable('TxTime', i, the_range=(0, 10, 2))
instance.assertEqual(test_pvp.shape, (5, ), msg='Unexpected pvp strided slice fetch size')
# create a temp directory
temp_directory = tempfile.mkdtemp()
if isinstance(reader, CRSDReader1):
with instance.subTest(msg='crsd writer test'):
generic_writer_test(reader, temp_directory)
shutil.rmtree(temp_directory)
del reader
def generic_writer_test(crsd_reader, the_directory):
written_crsd_name = os.path.join(the_directory, 'example_crsd.crsd')
read_support = crsd_reader.read_support_block()
read_pvp = crsd_reader.read_pvp_block()
read_signal = crsd_reader.read_signal_block()
# write the crsd file
with CRSDWriter1(written_crsd_name, crsd_reader.crsd_meta, check_existence=False) as writer:
writer.write_file(read_pvp, read_signal, read_support)
# reread the newly written data
rereader = CRSDReader(written_crsd_name)
reread_support = rereader.read_support_block()
reread_pvp = rereader.read_pvp_block()
reread_signal = rereader.read_signal_block()
# byte compare that the original data and re-read data are identical
numpy.testing.assert_equal(read_support, reread_support)
numpy.testing.assert_equal(read_pvp, reread_pvp)
numpy.testing.assert_equal(read_signal, reread_signal)
class TestCRSD(unittest.TestCase):
@unittest.skipIf(len(crsd_file_types.get('CRSD', [])) == 0, 'No CRSD files specified or found')
def test_crsd_io(self):
for test_file in crsd_file_types['CRSD']:
generic_io_test(self, test_file, 'CRSD', CRSDReader)
| 6,802
| 47.942446
| 129
|
py
|
sarpy
|
sarpy-master/tests/io/received/__init__.py
|
__classification__ = 'UNCLASSIFIED'
| 37
| 11.666667
| 35
|
py
|
sarpy
|
sarpy-master/tests/io/general/test_tre.py
|
from sarpy.io.general.nitf_elements.tres.registration import find_tre
from sarpy.io.general.nitf_elements.tres.unclass.ACFTA import ACFTA
import unittest
class TestTreRegistry(unittest.TestCase):
def test_find_tre(self):
the_tre = find_tre('ACFTA')
self.assertEqual(the_tre, ACFTA)
| 304
| 29.5
| 69
|
py
|
sarpy
|
sarpy-master/tests/io/general/test_nitf_headers.py
|
import os
import time
import logging
import json
import unittest
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader
from sarpy.io.general.nitf_elements.text import TextSegmentHeader
from sarpy.io.general.nitf_elements.graphics import GraphicsSegmentHeader
from sarpy.io.general.nitf_elements.des import DataExtensionHeader
from tests import parse_file_entry
no_files = False
test_files = []
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'nitf_headers.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
for entry in json.load(fi):
the_file = parse_file_entry(entry)
if the_file is not None:
test_files.append(the_file)
if len(test_files) == 0:
logging.warning('No files have been identified for the nitf header tests.')
no_files = True
else:
logging.error('Can not find the nitf_header.json file identifying nitf header tests.')
no_files = True
def generic_nitf_header_test(instance, test_file):
assert isinstance(instance, unittest.TestCase)
# can we parse it at all? how long does it take?
with instance.subTest(msg="header parsing"):
start = time.time()
details = NITFDetails(test_file)
# how long does it take?
logging.info('unpacked nitf details in {}'.format(time.time() - start))
# how does it look?
logging.debug(details.nitf_header)
# is the output as long as it should be?
with instance.subTest(msg="header length match"):
header_string = details.nitf_header.to_bytes()
equality = (len(header_string) == details.nitf_header.HL)
if not equality:
logging.error(
'len(produced header) = {}, nitf_header.HL = {}'.format(len(header_string),
details.nitf_header.HL))
instance.assertTrue(equality)
# is the output what it should be?
with instance.subTest(msg="header content match"):
with open(test_file, 'rb') as fi:
file_header = fi.read(details.nitf_header.HL)
equality = (file_header == header_string)
if not equality:
chunk_size = 80
start_chunk = 0
while start_chunk < len(header_string):
end_chunk = min(start_chunk + chunk_size, len(header_string))
logging.error('real[{}:{}] = {}'.format(
start_chunk, end_chunk, file_header[start_chunk:end_chunk]))
logging.error('prod[{}:{}] = {}'.format(
start_chunk, end_chunk, header_string[start_chunk:end_chunk]))
start_chunk = end_chunk
instance.assertTrue(equality)
# is each image subheader working?
if details.img_segment_offsets is not None:
for i in range(details.img_segment_offsets.size):
with instance.subTest('image subheader {} match'.format(i)):
img_bytes = details.get_image_subheader_bytes(i)
img_sub = ImageSegmentHeader.from_bytes(img_bytes, start=0)
instance.assertEqual(
len(img_bytes), img_sub.get_bytes_length(), msg='image subheader as long as expected')
instance.assertEqual(
img_bytes, img_sub.to_bytes(), msg='image subheader serializes and deserializes as expected')
# is each text segment working?
if details.text_segment_offsets is not None:
for i in range(details.text_segment_offsets.size):
with instance.subTest('text subheader {} match'.format(i)):
txt_bytes = details.get_text_subheader_bytes(i)
txt_sub = TextSegmentHeader.from_bytes(txt_bytes, start=0)
instance.assertEqual(
len(txt_bytes), txt_sub.get_bytes_length(), msg='text subheader as long as expected')
instance.assertEqual(
txt_bytes, txt_sub.to_bytes(), msg='text subheader serializes and deserializes as expected')
# is each graphics segment working?
if details.graphics_segment_offsets is not None:
for i in range(details.graphics_segment_offsets.size):
with instance.subTest('graphics subheader {} match'.format(i)):
graphics_bytes = details.get_graphics_subheader_bytes(i)
graphics_sub = GraphicsSegmentHeader.from_bytes(graphics_bytes, start=0)
instance.assertEqual(
len(graphics_bytes), graphics_sub.get_bytes_length(), msg='graphics subheader as long as expected')
instance.assertEqual(
graphics_bytes, graphics_sub.to_bytes(), msg='graphics subheader serializes and deserializes as expected')
# is each data extension subheader working?
if details.des_segment_offsets is not None:
for i in range(details.des_segment_offsets.size):
with instance.subTest('des subheader {} match'.format(i)):
des_bytes = details.get_des_subheader_bytes(i)
des_sub = DataExtensionHeader.from_bytes(des_bytes, start=0)
instance.assertEqual(
len(des_bytes), des_sub.get_bytes_length(), msg='des subheader as long as expected')
instance.assertEqual(
des_bytes, des_sub.to_bytes(), msg='des subheader serializes and deserializes as expected')
class TestNITFHeader(unittest.TestCase):
@unittest.skipIf(no_files, 'No nitf files identified for testing')
def test_nitf_header(self):
for test_file in test_files:
generic_nitf_header_test(self, test_file)
| 5,793
| 45.352
| 126
|
py
|
sarpy
|
sarpy-master/tests/io/general/test_data_segment.py
|
import unittest
import numpy
from sarpy.io.general.format_function import ComplexFormatFunction
from sarpy.io.general.data_segment import NumpyArraySegment, SubsetSegment, \
BandAggregateSegment, BlockAggregateSegment, FileReadDataSegment
from io import BytesIO
class TestNumpyArraySegment(unittest.TestCase):
def test_basic_read(self):
data = numpy.reshape(numpy.arange(60, dtype='int16'), (5, 6, 2))
complex_data = numpy.empty((5, 6), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
data_segment = NumpyArraySegment(
data, formatted_dtype='complex64', formatted_shape=(5, 6),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='r')
with self.subTest(msg='read_raw full'):
test_data = data_segment.read_raw(None)
self.assertTrue(numpy.all(data == test_data))
with self.subTest(msg='read_raw subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment.read_raw(subscript)
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.subTest(msg='read_raw index with squeeze'):
test_data = data_segment.read_raw((0, 1, 1), squeeze=True)
self.assertTrue(test_data.ndim == 0, msg='{}'.format(test_data))
self.assertTrue(data[0, 1, 1] == test_data)
with self.subTest(msg='read_raw index without squeeze'):
test_data = data_segment.read_raw((0, 1, 1), squeeze=False)
self.assertTrue(test_data.ndim == 3)
self.assertTrue(data[0, 1, 1] == test_data)
with self.subTest(msg='read full'):
test_data = data_segment.read(None)
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='read subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment.read(subscript)
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read subscript with ellipsis'):
subscript = (..., slice(1, 3, 1))
test_data = data_segment.read(subscript)
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read index with squeeze'):
test_data = data_segment.read((0, 1), squeeze=True)
self.assertTrue(test_data.ndim == 0)
self.assertTrue(complex_data[0, 1] == test_data)
with self.subTest(msg='read index without squeeze'):
test_data = data_segment.read((0, 1), squeeze=False)
self.assertTrue(test_data.ndim == 2)
self.assertTrue(complex_data[0, 1] == test_data)
with self.subTest(msg='read using __getitem__ subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment[0:2, 1:3]
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read using __getitem__ and specifying raw'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment[0:2, 1:3, 'raw']
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.subTest(msg='corners :3,:3'):
test_data = data_segment[:3, :3]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners -3:,:3'):
test_data = data_segment[-3:, :3]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners :3,-3:'):
test_data = data_segment[:3, -3:]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners -3:,-3:'):
test_data = data_segment[-3:, -3:]
self.assertTrue(test_data.shape == (3, 3))
with self.assertRaises(ValueError, msg='write_raw attempt'):
data_segment.write_raw(data, start_indices=0)
with self.assertRaises(ValueError, msg='write attempt'):
data_segment.write(complex_data, start_indices=0)
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = data_segment.read(None)
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = data_segment.read_raw(None)
def test_read_with_transpose_and_reverse(self):
data = numpy.reshape(numpy.arange(60, dtype='int16'), (5, 6, 2))
for axis in [None, (0, ), (1, ), (0, 1)]:
complex_data = numpy.empty((5, 6), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
if axis is None:
complex_data = numpy.transpose(complex_data)
else:
complex_data = numpy.transpose(numpy.flip(complex_data, axis=axis))
data_segment = NumpyArraySegment(
data, formatted_dtype='complex64', formatted_shape=(6, 5),
reverse_axes=axis, transpose_axes=(1, 0, 2),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='r')
with self.subTest(msg='read full, axis {}'.format(axis)):
test_data = data_segment.read(None)
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='read subscript, axis {}'.format(axis)):
subscript = (slice(1, 3, 1), slice(0, 2, 1))
test_data = data_segment.read(subscript)
self.assertTrue(numpy.all(test_data == complex_data[subscript]))
with self.subTest(msg='corners :3,:3, axis {}'.format(axis)):
test_data = data_segment[:3, :3]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners -3:,:3, axis {}'.format(axis)):
test_data = data_segment[-3:, :3]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners :3,-3:, axis {}'.format(axis)):
test_data = data_segment[:3, -3:]
self.assertTrue(test_data.shape == (3, 3))
with self.subTest(msg='corners -3:,-3:, axis {}'.format(axis)):
test_data = data_segment[-3:, -3:]
self.assertTrue(test_data.shape == (3, 3))
def test_basic_write(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (3, 4, 2))
complex_data = numpy.empty((3, 4), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
with self.subTest(msg='write_raw'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
data_segment.write_raw(data, start_indices=0)
self.assertTrue(numpy.all(empty == data))
with self.subTest(msg='write'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
data_segment.write(complex_data, start_indices=0)
self.assertTrue(numpy.all(empty == data))
with self.assertRaises(ValueError, msg='read_raw attempt'):
_ = data_segment.read_raw(0)
with self.assertRaises(ValueError, msg='read attempt'):
_ = data_segment.read(0)
with self.subTest(msg='close'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='write access when closed'):
data_segment.write(complex_data)
with self.assertRaises(ValueError, msg='write_raw access when closed'):
data_segment.write(complex_data)
class TestSubsetSegment(unittest.TestCase):
def test_read(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (6, 4))
subset_def = (slice(2, 5, 1), slice(2, 4, 1))
parent_segment = NumpyArraySegment(data, mode='r')
data_segment = SubsetSegment(parent_segment, subset_def, 'raw')
with self.subTest(msg='full read'):
test_data = data_segment[:]
self.assertTrue(numpy.all(data[subset_def] == test_data))
with self.subTest(msg='partial read'):
test_data = data_segment[1:3]
self.assertTrue(numpy.all(data[(slice(3, 5, 1), slice(2, 4, 1))] == test_data))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = data_segment.read_raw(None)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = data_segment.read(None)
def test_write(self):
data = numpy.zeros((6, 4), dtype='int16')
subset_def = (slice(2, 5, 1), slice(2, 4, 1))
parent_segment = NumpyArraySegment(data, mode='w')
data_segment = SubsetSegment(parent_segment, subset_def, 'raw')
test_data = numpy.reshape(numpy.arange(6, dtype='int16'), (3, 2))
with self.subTest(msg='subset write'):
data_segment.write(test_data, start_indices=0)
self.assertTrue(numpy.all(data[subset_def] == test_data))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='write access when closed'):
data_segment.write(test_data)
with self.assertRaises(ValueError, msg='write_raw access when closed'):
data_segment.write(test_data)
class TestBandAggregateSegment(unittest.TestCase):
def test_read(self):
data0 = numpy.reshape(numpy.arange(12, dtype='uint8'), (3, 4))
data1 = numpy.reshape(numpy.arange(12, 24, dtype='uint8'), (3, 4))
ds0 = NumpyArraySegment(data0, mode='r')
ds1 = NumpyArraySegment(data1, mode='r')
data_segment = BandAggregateSegment((ds0, ds1), 2)
with self.subTest(msg='direct band comparison'):
test_data = data_segment[:]
self.assertTrue(numpy.all(test_data[..., 0] == data0))
self.assertTrue(numpy.all(test_data[..., 1] == data1))
with self.subTest(msg='reading band comparison'):
self.assertTrue(numpy.all(data_segment[..., 0] == data0))
self.assertTrue(numpy.all(data_segment[..., 1] == data1))
with self.subTest(msg='section reading'):
subset = (slice(2, 3, 1), slice(1, 3, 1))
test_data = data_segment.read(subset)
self.assertTrue(numpy.all(test_data[..., 0] == data0[subset]))
self.assertTrue(numpy.all(test_data[..., 1] == data1[subset]))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = data_segment.read_raw(None)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = data_segment.read(None)
def test_write(self):
data0 = numpy.empty((3, 4), dtype='uint16')
data1 = numpy.empty((3, 4), dtype='uint16')
ds0 = NumpyArraySegment(data0, mode='w')
ds1 = NumpyArraySegment(data1, mode='w')
data_segment = BandAggregateSegment((ds0, ds1), 2)
with self.subTest(msg='writing check'):
test_data = numpy.reshape(numpy.arange(24, dtype='uint16'), (3, 4, 2))
data_segment.write(test_data)
self.assertTrue(numpy.all(test_data[..., 0] == data0))
self.assertTrue(numpy.all(test_data[..., 1] == data1))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='write access when closed'):
data_segment.write(test_data)
with self.assertRaises(ValueError, msg='write_raw access when closed'):
data_segment.write(test_data)
class TestBlockAggregateSegment(unittest.TestCase):
def test_read(self):
data0 = numpy.reshape(numpy.arange(6, dtype='int16'), (3, 2))
data1 = numpy.reshape(numpy.arange(6, 12, dtype='int16'), (3, 2))
ds0 = NumpyArraySegment(data0, mode='r')
ds1 = NumpyArraySegment(data1, mode='r')
data_segment = BlockAggregateSegment(
(ds0, ds1), (
(slice(0, 3, 1), slice(0, 2, 1)),
(slice(0, 3, 1), slice(2, 4, 1))),
'raw', 0, (3, 4), 'int16', (3, 4))
with self.subTest(msg='read'):
test_data = data_segment[:]
self.assertTrue(numpy.all(data0 == test_data[:, :2]))
self.assertTrue(numpy.all(data1 == test_data[:, 2:]))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = data_segment.read_raw(None)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = data_segment.read(None)
def test_write(self):
data0 = numpy.empty((3, 2), dtype='int16')
data1 = numpy.empty((3, 2), dtype='int16')
ds0 = NumpyArraySegment(data0, mode='w')
ds1 = NumpyArraySegment(data1, mode='w')
data_segment = BlockAggregateSegment(
(ds0, ds1), (
(slice(0, 3, 1), slice(0, 2, 1)),
(slice(0, 3, 1), slice(2, 4, 1))),
'raw', 0, (3, 4), 'int16', (3, 4))
test_data = numpy.reshape(numpy.arange(12, dtype='int16'), (3, 4))
with self.subTest(msg='write'):
data_segment.write(test_data, start_indices=0)
self.assertTrue(numpy.all(test_data[:, :2] == data0))
self.assertTrue(numpy.all(test_data[:, 2:] == data1))
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='write access when closed'):
data_segment.write(test_data)
with self.assertRaises(ValueError, msg='write_raw access when closed'):
data_segment.write(test_data)
class TestFileReadSegment(unittest.TestCase):
def test_read(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (3, 4, 2))
complex_data = numpy.empty((3, 4), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
file_object = BytesIO(data.tobytes())
data_segment = FileReadDataSegment(
file_object, 0, 'int16', (3, 4, 2), 'complex64', (3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2))
with self.subTest(msg='read_raw full'):
test_data = data_segment.read_raw(None)
self.assertTrue(numpy.all(data == test_data))
with self.subTest(msg='read_raw subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment.read_raw(subscript)
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.subTest(msg='read_raw index with squeeze'):
test_data = data_segment.read_raw((0, 1, 1), squeeze=True)
self.assertTrue(test_data.ndim == 0, msg='{}'.format(test_data))
self.assertTrue(data[0, 1, 1] == test_data)
with self.subTest(msg='read_raw index without squeeze'):
test_data = data_segment.read_raw((0, 1, 1), squeeze=False)
self.assertTrue(test_data.ndim == 3)
self.assertTrue(data[0, 1, 1] == test_data)
with self.subTest(msg='read full'):
test_data = data_segment.read(None)
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='read subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment.read(subscript)
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read subscript with ellipsis'):
subscript = (..., slice(1, 3, 1))
test_data = data_segment.read(subscript)
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read index with squeeze'):
test_data = data_segment.read((0, 1), squeeze=True)
self.assertTrue(test_data.ndim == 0)
self.assertTrue(complex_data[0, 1] == test_data)
with self.subTest(msg='read index without squeeze'):
test_data = data_segment.read((0, 1), squeeze=False)
self.assertTrue(test_data.ndim == 2)
self.assertTrue(complex_data[0, 1] == test_data)
with self.subTest(msg='read using __getitem__ subscript'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment[0:2, 1:3]
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='read using __getitem__ and specifying raw'):
subscript = (slice(0, 2, 1), slice(1, 3, 1))
test_data = data_segment[0:2, 1:3, 'raw']
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.assertRaises(ValueError, msg='write_raw attempt'):
data_segment.write_raw(data, start_indices=0)
with self.assertRaises(ValueError, msg='write attempt'):
data_segment.write(complex_data, start_indices=0)
with self.subTest(msg='close functionality test'):
self.assertFalse(data_segment.closed)
data_segment.close()
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = data_segment.read(None)
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = data_segment.read_raw(None)
| 19,452
| 42.228889
| 91
|
py
|
sarpy
|
sarpy-master/tests/io/general/test_format_function.py
|
import unittest
import numpy
from sarpy.io.general.format_function import IdentityFunction, ComplexFormatFunction
class TestIdentityFunction(unittest.TestCase):
def test_reverse(self):
base_data = numpy.reshape(numpy.arange(6, dtype='int32'), (3, 2))
for axis in [(0, ), (1, ), (0, 1)]:
with self.subTest(msg='axes[{}] reverse'.format(axis)):
func_rev = IdentityFunction(
raw_shape=(3, 2), formatted_shape=(3, 2), reverse_axes=axis,
transpose_axes=None)
out_data = func_rev(base_data, (slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.flip(base_data, axis=axis)
self.assertTrue(numpy.all(test_data == out_data), msg='reverse failure')
def test_transpose(self):
base_data = numpy.reshape(numpy.arange(6, dtype='int32'), (3, 2))
func_transpose = IdentityFunction(raw_shape=(3, 2), formatted_shape=(2, 3), transpose_axes=(1, 0))
out_data = func_transpose(base_data, (slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.transpose(base_data, (1, 0))
self.assertTrue(numpy.all(test_data == out_data), msg='transpose failure')
def test_combined(self):
base_data = numpy.reshape(numpy.arange(6, dtype='int32'), (3, 2))
for axis in ((0, ), (1, ), (0, 1)):
with self.subTest(msg='combined, reverse axes = `{}`'.format(axis)):
func = IdentityFunction(
raw_shape=(3, 2), formatted_shape=(2, 3), reverse_axes=axis, transpose_axes=(1, 0))
out_data = func(base_data, (slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.transpose(numpy.flip(base_data, axis=axis), (1, 0))
self.assertTrue(numpy.all(test_data == out_data), msg='combined failure')
class TestComplexFunction(unittest.TestCase):
def test_bad_typing(self):
for order in ['IQ', 'QI']:
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('uint8', order)
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('uint16', order)
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('uint32', order)
for order in ['MP', 'PM']:
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('int8', order)
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('int16', order)
with self.assertRaises(ValueError, msg='{} typing'.format(order)):
_ = ComplexFormatFunction('int32', order)
def test_float(self):
base_data = numpy.reshape(numpy.arange(2, 14, dtype='float32'), (2, 3, 2))
with self.subTest(msg='IQ float'):
func = ComplexFormatFunction(
'float32', 'IQ', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = base_data[:, :, 0]
test_data.imag = base_data[:, :, 1]
self.assertTrue(numpy.all(out_data == test_data), msg='IQ forward')
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(numpy.abs(base_data - inv_data) < 1e-10), msg='IQ inverse')
with self.subTest(msg='QI float'):
func = ComplexFormatFunction(
'float32', 'QI', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = base_data[:, :, 1]
test_data.imag = base_data[:, :, 0]
self.assertTrue(numpy.all(out_data == test_data), msg='QI forward')
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(numpy.abs(base_data - inv_data) < 1e-10), msg='QI inverse')
with self.subTest(msg='MP float'):
func = ComplexFormatFunction(
'float32', 'MP', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
magnitude = base_data[:, :, 0]
theta = base_data[:, :, 1]
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = magnitude * numpy.cos(theta)
test_data.imag = magnitude * numpy.sin(theta)
self.assertTrue(numpy.all(out_data == test_data), msg='MP forward')
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
inv_array_check = (base_data - inv_data)
self.assertTrue(numpy.all(numpy.abs(inv_array_check[:, :, 0]) < 1e-5), msg='M of MP inverse')
self.assertTrue(numpy.all(numpy.abs(numpy.sin(inv_array_check[:, :, 1])) < 1e-5), msg='P of MP inverse')
with self.subTest(msg='PM float'):
func = ComplexFormatFunction(
'float32', 'PM', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
magnitude = base_data[:, :, 1]
theta = base_data[:, :, 0]
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = magnitude * numpy.cos(theta)
test_data.imag = magnitude * numpy.sin(theta)
self.assertTrue(numpy.all(out_data == test_data), msg='PM forward')
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
inv_array_check = (base_data - inv_data)
self.assertTrue(numpy.all(numpy.abs(inv_array_check[:, :, 1]) < 1e-5), msg='M of PM inverse')
self.assertTrue(numpy.all(numpy.abs(numpy.sin(inv_array_check[:, :, 0])) < 1e-5), msg='P of PM inverse')
def test_int(self):
for raw_type in ['int8', 'int16']:
base_data = numpy.reshape(numpy.arange(2, 14, dtype=raw_type), (2, 3, 2))
with self.subTest(msg='IQ {}'.format(raw_type)):
func = ComplexFormatFunction(
'int16', 'IQ', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = base_data[:, :, 0]
test_data.imag = base_data[:, :, 1]
self.assertTrue(numpy.all(out_data == test_data), msg='IQ {} forward'.format(raw_type))
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(base_data == inv_data), msg='IQ {} inverse'.format(raw_type))
with self.subTest(msg='QI {}'.format(raw_type)):
func = ComplexFormatFunction(
'int16', 'QI', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = base_data[:, :, 1]
test_data.imag = base_data[:, :, 0]
self.assertTrue(numpy.all(out_data == test_data), msg='QI {} forward'.format(raw_type))
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(base_data == inv_data), msg='QI {} inverse'.format(raw_type))
def test_uint(self):
for bit_depth in [8, 16]:
raw_type = 'uint{}'.format(bit_depth)
base_data = numpy.reshape(numpy.arange(2, 14, dtype=raw_type), (2, 3, 2))
with self.subTest(msg='MP {}'.format(raw_type)):
func = ComplexFormatFunction(
raw_type, 'MP', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
magnitude = base_data[:, :, 0]
theta = base_data[:, :, 1]*2*numpy.pi/(1 << bit_depth)
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = magnitude * numpy.cos(theta)
test_data.imag = magnitude * numpy.sin(theta)
self.assertTrue(numpy.all(out_data == test_data), msg='MP {} forward'.format(raw_type))
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(base_data == inv_data), msg='MP {} inverse'.format(raw_type))
with self.subTest(msg='PM {}'.format(raw_type)):
func = ComplexFormatFunction(
raw_type, 'PM', raw_shape=(2, 3, 2), formatted_shape=(2, 3), band_dimension=2)
out_data = func(base_data, (slice(0, 2, 1), slice(0, 3, 1), slice(0, 2, 1)))
magnitude = base_data[:, :, 1]
theta = base_data[:, :, 0]*2*numpy.pi/(1 << bit_depth)
test_data = numpy.empty((2, 3), dtype='complex64')
test_data.real = magnitude * numpy.cos(theta)
test_data.imag = magnitude * numpy.sin(theta)
self.assertTrue(numpy.all(out_data == test_data), msg='PM {} forward'.format(raw_type))
inv_data = func.inverse(out_data, (slice(0, 2, 1), slice(0, 3, 1)))
self.assertTrue(numpy.all(base_data == inv_data), msg='PM {} inverse'.format(raw_type))
| 9,911
| 55.965517
| 116
|
py
|
sarpy
|
sarpy-master/tests/io/general/__init__.py
|
__classification__ = 'UNCLASSIFIED'
| 37
| 11.666667
| 35
|
py
|
sarpy
|
sarpy-master/tests/io/general/test_base.py
|
import unittest
import numpy
from sarpy.io.general.format_function import ComplexFormatFunction
from sarpy.io.general.data_segment import NumpyArraySegment
from sarpy.io.general.base import BaseReader, BaseWriter
class TestBaseReader(unittest.TestCase):
def test_read(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (3, 4, 2))
complex_data = numpy.empty((3, 4), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
data_segment = NumpyArraySegment(
data, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='r')
with self.subTest(msg='blank initialization'):
reader = BaseReader(None)
with self.subTest(msg='reinitialization with segment'):
BaseReader.__init__(reader, data_segment)
with self.assertRaises(ValueError, msg='repeated initialization with segment'):
BaseReader.__init__(reader, data_segment)
with self.subTest(msg='full read'):
test_data = reader.read(index=0)
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='full read_raw'):
test_data = reader.read_raw(index=0)
self.assertTrue(numpy.all(data == test_data))
with self.subTest(msg='full __getitem__ read'):
test_data = reader[:]
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='full __getitem__ raw read'):
test_data = reader[:, 'raw']
self.assertTrue(numpy.all(data == test_data))
with self.subTest(msg='partial read'):
subscript = (slice(1, 2, 1), slice(2, 4, 1))
test_data = reader.read(*subscript)
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='partial __getitem__ read'):
subscript = (slice(1, 2, 1), slice(2, 4, 1))
test_data = reader[1:2, 2:4]
self.assertTrue(numpy.all(complex_data[subscript] == test_data))
with self.subTest(msg='partial raw read'):
subscript = (slice(1, 2, 1), slice(2, 4, 1))
test_data = reader.read_raw(*subscript)
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.subTest(msg='partial __getitem__ raw read'):
subscript = (slice(1, 2, 1), slice(2, 4, 1))
test_data = reader[1:2, 2:4, 'raw']
self.assertTrue(numpy.all(data[subscript] == test_data))
with self.subTest(msg='close functionality test'):
self.assertFalse(reader.closed)
the_segment = reader.data_segment # NB: it will be flushed from the reader
self.assertFalse(the_segment.closed)
reader.close()
self.assertTrue(reader.closed)
self.assertTrue(reader.data_segment is None)
self.assertTrue(the_segment.closed)
with self.assertRaises(ValueError, msg='read access when closed'):
_ = reader.read()
with self.assertRaises(ValueError, msg='read_raw access when closed'):
_ = reader.read_raw()
def test_read_with_symmetry(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (3, 4, 2))
complex_data = numpy.empty((3, 4), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
complex_data = numpy.transpose(complex_data)
data_segment = NumpyArraySegment(
data, formatted_dtype='complex64', formatted_shape=(4, 3),
transpose_axes=(1, 0, 2),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='r')
reader = BaseReader(data_segment)
with self.subTest(msg='read full'):
test_data = reader.read()
self.assertTrue(numpy.all(complex_data == test_data))
with self.subTest(msg='read subscript'):
subscript = (slice(1, 3, 1), slice(0, 2, 1))
test_data = reader.read(*subscript)
self.assertTrue(numpy.all(test_data == complex_data[subscript]))
class TestBaseWriter(unittest.TestCase):
def test_write(self):
data = numpy.reshape(numpy.arange(24, dtype='int16'), (3, 4, 2))
complex_data = numpy.empty((3, 4), dtype='complex64')
complex_data.real = data[:, :, 0]
complex_data.imag = data[:, :, 1]
with self.subTest(msg='write_raw'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
with BaseWriter(data_segment) as writer:
writer.write_raw(data, start_indices=0)
self.assertTrue(numpy.all(empty == data))
with self.subTest(msg='write'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
with BaseWriter(data_segment) as writer:
writer.write(complex_data, start_indices=0)
self.assertTrue(numpy.all(empty == data))
with self.subTest(msg='close'):
empty = numpy.empty((3, 4, 2), dtype='int16')
data_segment = NumpyArraySegment(
empty, formatted_dtype='complex64', formatted_shape=(3, 4),
format_function=ComplexFormatFunction('int16', 'IQ', band_dimension=2),
mode='w')
writer = BaseWriter(data_segment)
self.assertFalse(writer.closed)
self.assertFalse(writer.data_segment[0].closed)
writer.close()
self.assertTrue(writer.closed)
self.assertTrue(writer.data_segment is None)
self.assertTrue(data_segment.closed)
with self.assertRaises(ValueError, msg='write access when closed'):
data_segment.write(complex_data)
with self.assertRaises(ValueError, msg='write_raw access when closed'):
data_segment.write(complex_data)
| 6,472
| 41.032468
| 87
|
py
|
sarpy
|
sarpy-master/tests/io/product/test_reader.py
|
import json
import logging
import lxml.etree
import os
import pathlib
import unittest
from sarpy.io.product.converter import open_product
from sarpy.io.product.sidd import SIDDReader
import sarpy.io.product.sidd
import sarpy.io.product.sidd3_elements.SIDD as sarpy_sidd3
from tests import parse_file_entry
product_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'product_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
product_file_types[the_type] = valid_entries
def generic_reader_test(instance, test_file, reader_type_string, reader_type):
assert isinstance(instance, unittest.TestCase)
reader = None
with instance.subTest(msg='establish reader for type {} and file {}'.format(reader_type_string, test_file)):
reader = open_product(test_file)
instance.assertTrue(reader is not None, msg='Returned None, so opening failed.')
if reader is None:
return # remaining tests make no sense
assert isinstance(reader, SIDDReader)
with instance.subTest(msg='Reader for type {} should be appropriate reader'):
instance.assertTrue(isinstance(reader, reader_type), msg='Returned reader should be of type {}'.format(reader_type))
if not isinstance(reader, reader_type):
return # remaining tests might be misleading
with instance.subTest(msg='Verify reader_type for type {} and file {}'.format(reader_type_string, test_file)):
instance.assertEqual(reader.reader_type, "SIDD", msg='reader.reader_type should be "SIDD"')
with instance.subTest(msg='Fetch data_sizes and sidds for type {} and file {}'.format(reader_type_string, test_file)):
data_sizes = reader.get_data_size_as_tuple()
sidds = reader.sidd_meta
for i, (data_size, sidd) in enumerate(zip(data_sizes, sidds)):
with instance.subTest(msg='Verify image size for sidd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(data_size[0], sidd.Measurement.PixelFootprint.Row, msg='data_size[0] and Row do not agree')
instance.assertEqual(data_size[1], sidd.Measurement.PixelFootprint.Col, msg='data_size[1] and Col do not agree')
with instance.subTest(msg='Basic fetch test for sidd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(reader[:2, :2, i].shape[:2], (2, 2), msg='upper left fetch')
instance.assertEqual(reader[-2:, :2, i].shape[:2], (2, 2), msg='lower left fetch')
instance.assertEqual(reader[-2:, -2:, i].shape[:2], (2, 2), msg='lower right fetch')
instance.assertEqual(reader[:2, -2:, i].shape[:2], (2, 2), msg='upper right fetch')
with instance.subTest(msg='Verify fetching complete row(s) have correct size '
'for sidd index {} in reader of type {} and file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:, :2, i]
instance.assertEqual(test_data.shape[:2], (data_size[0], 2), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching complete columns(s) have correct size '
'for sidd index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:2, :, i]
instance.assertEqual(test_data.shape[:2], (2, data_size[1]), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Validity of sidd at index {} in reader of '
'type {} for file {}'.format(i, reader_type_string, test_file)):
if not sidd.is_valid(recursive=True, stack=False):
logging.warning('sidd at index {} in reader of type {} for file {} not valid'.format(i, reader_type_string, test_file))
del reader
# NB: I'm splitting these tests to ensure interpretable names - each reader has its own test.
class TestSIDD(unittest.TestCase):
@unittest.skipIf(len(product_file_types.get('SIDD', [])) == 0, 'No SIDD files specified or found')
def test_sidd_reader(self):
for test_file in product_file_types['SIDD']:
generic_reader_test(self, test_file, 'SIDD', SIDDReader)
def test_from_xml(self):
sidd_meta = sarpy_sidd3.SIDDType.from_xml_file(pathlib.Path(__file__).parents[2] / 'data/example.sidd.xml')
sarpy.io.product.sidd.validate_sidd_for_writing(sidd_meta)
def test_from_xml_no_sfa(self):
sidd_etree = lxml.etree.parse(str(pathlib.Path(__file__).parents[2] / 'data/example.sidd.xml'))
pre_nsmap = sidd_etree.getroot().nsmap
lxml.etree.cleanup_namespaces(sidd_etree)
post_nsmap = sidd_etree.getroot().nsmap
assert 'sfa' in set(pre_nsmap).difference(post_nsmap)
sidd_meta = sarpy_sidd3.SIDDType.from_xml_string(lxml.etree.tostring(sidd_etree))
sarpy.io.product.sidd.validate_sidd_for_writing(sidd_meta)
| 5,464
| 49.601852
| 135
|
py
|
sarpy
|
sarpy-master/tests/io/product/test_sidd_schema.py
|
#
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import re
import lxml.etree
import pytest
import sarpy.io.product.sidd_schema as sarpy_sidd
@pytest.mark.parametrize('sidd_version', sarpy_sidd.get_versions())
def test_validate_xml_ns(sidd_version):
xml_ns, ns_key = get_sidd_nsmap(sidd_version)
assert sarpy_sidd.validate_xml_ns(xml_ns, ns_key)
def get_sidd_nsmap(sidd_version):
schema_root = lxml.etree.parse(sarpy_sidd.get_schema_path(sidd_version)).getroot()
reverse_nsmap = {v: k for k, v in schema_root.nsmap.items()}
return schema_root.nsmap, reverse_nsmap[schema_root.get('targetNamespace')]
@pytest.fixture
def sidd_nsmap():
return get_sidd_nsmap(sarpy_sidd.get_versions()[-1])
def test_validate_xml_ns_no_ns_key(sidd_nsmap):
xml_ns, ns_key = sidd_nsmap
del xml_ns[ns_key]
with pytest.raises(ValueError):
sarpy_sidd.validate_xml_ns(xml_ns, ns_key)
def test_validate_xml_ns_unmapped(sidd_nsmap):
xml_ns, _ = sidd_nsmap
xml_ns['bad'] = 'urn:SIDD:9.9.9'
assert not sarpy_sidd.validate_xml_ns(xml_ns, 'bad')
def test_validate_xml_change_keys(sidd_nsmap):
xml_ns, ns_key = sidd_nsmap
xml_ns_changed_keys = {f'{k}_changed': v for k, v in xml_ns.items()}
assert sarpy_sidd.validate_xml_ns(xml_ns_changed_keys, f'{ns_key}_changed')
def test_validate_xml_mismatched_ns(sidd_nsmap, caplog):
xml_ns, ns_key = sidd_nsmap
xml_ns['ism'] += '_make_bad'
assert not sarpy_sidd.validate_xml_ns(xml_ns, ns_key)
assert re.search(r'namespace urn is expected to be.*but we got', caplog.text)
def test_validate_xml_missing_required_ns(sidd_nsmap, caplog):
xml_ns, ns_key = sidd_nsmap
del xml_ns['ism']
assert not sarpy_sidd.validate_xml_ns(xml_ns, ns_key)
assert re.search(r'No.* namespace defined.', caplog.text)
def test_validate_xml_missing_optional_ns(sidd_nsmap):
xml_ns, ns_key = sidd_nsmap
del xml_ns['sfa']
assert sarpy_sidd.validate_xml_ns(xml_ns, ns_key)
| 2,029
| 28.852941
| 86
|
py
|
sarpy
|
sarpy-master/tests/io/product/test_sidd_writing.py
|
import os
import json
import tempfile
import shutil
import unittest
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.product.sidd import SIDDReader
from sarpy.io.product.sidd_schema import get_schema_path
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd
from sarpy.processing.ortho_rectify import NearestNeighborMethod
import sarpy.geometry.geometry_elements as ge
from tests import parse_file_entry
try:
from lxml import etree
except ImportError:
etree = None
product_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'product_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
product_file_types[the_type] = valid_entries
sicd_files = product_file_types.get('SICD', [])
def check_versus_schema(input_nitf, the_schema):
reader = SIDDReader(input_nitf)
sidd_bytes = reader.nitf_details.get_des_bytes(0)
xml_doc = etree.fromstring(sidd_bytes)
xml_schema = etree.XMLSchema(file=the_schema)
return xml_schema.validate(xml_doc)
class TestSIDDWriting(unittest.TestCase):
@unittest.skipIf(len(sicd_files) == 0, 'No sicd files found')
def test_sidd_creation(self):
for fil in sicd_files:
reader = SICDReader(fil)
ortho_helper = NearestNeighborMethod(reader)
# create a temp directory
temp_directory = tempfile.mkdtemp()
sidd_files = []
# create a basic sidd detected image
with self.subTest(msg='Create version 1 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_1.nitf', version=1)
sidd_files.append('di_1.nitf')
with self.subTest(msg='Create version 2 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_2.nitf', version=2)
sidd_files.append('di_2.nitf')
with self.subTest(msg='Create version 3 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_3.nitf', version=3)
sidd_files.append('di_3.nitf')
# create a csi image
with self.subTest(msg='Create version 1 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_1.nitf', version=1)
sidd_files.append('csi_1.nitf')
with self.subTest(msg='Create version 2 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_2.nitf', version=2)
sidd_files.append('csi_2.nitf')
with self.subTest(msg='Create version 3 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_3.nitf', version=3)
sidd_files.append('csi_3.nitf')
# create a dynamic image
with self.subTest(msg='Create version 1 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_1.nitf', version=1, frame_count=3)
sidd_files.append('sast_1.nitf')
with self.subTest(msg='Create version 2 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_2.nitf', version=2, frame_count=3)
sidd_files.append('sast_2.nitf')
with self.subTest(msg='Create version 3 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_3.nitf', version=3, frame_count=3)
sidd_files.append('sast_3.nitf')
# check that each sidd structure serialized according to the schema
if etree is not None:
for vers in [1, 2, 3]:
schema = get_schema_path('urn:SIDD:{}.0.0'.format(vers))
the_fil = 'di_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Detected image version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'csi_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'csi version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'sast_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Dynamic image version {} structure not valid versus schema {}'.format(vers, schema))
# clean up the temporary directory
shutil.rmtree(temp_directory)
class TestSIDDOptionalFields(unittest.TestCase):
def setUp(self):
if not sicd_files:
return
sicd_filename = sicd_files[0]
self.temp_directory = tempfile.mkdtemp()
reader = SICDReader(sicd_filename)
ortho_helper = NearestNeighborMethod(reader)
self.sidd_filename = 'di.nitf'
create_detected_image_sidd(
ortho_helper, self.temp_directory, output_file=self.sidd_filename, version=3)
self.schema = get_schema_path('urn:SIDD:3.0.0')
def is_instance_valid(self, instance_bytes):
xml_doc = etree.fromstring(instance_bytes)
xml_schema = etree.XMLSchema(file=self.schema)
result = xml_schema.validate(xml_doc)
if not result:
print(xml_schema.error_log)
return result
@unittest.skipIf(len(sicd_files) == 0, 'No sicd files found')
def tearDown(self):
if not sicd_files:
return
shutil.rmtree(self.temp_directory)
| 6,799
| 42.589744
| 126
|
py
|
sarpy
|
sarpy-master/tests/io/product/__init__.py
|
__classification__ = 'UNCLASSIFIED'
| 37
| 11.666667
| 35
|
py
|
sarpy
|
sarpy-master/tests/io/product/sidd3_elements/test_exploitationfeatures.py
|
#
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import datetime
import itertools
import logging
import pathlib
import numpy as np
import pytest
import sarpy.geometry.geocoords
import sarpy.io.complex.sicd_elements.SCPCOA
from sarpy.io.complex.sicd_elements.SICD import SICDType
import sarpy.io.product.sidd3_elements.ExploitationFeatures as ef3
def test_exploitation_calculator():
scp = sarpy.geometry.geocoords.geodetic_to_ecf([0, 0, 0])
geom_calc = sarpy.io.complex.sicd_elements.SCPCOA.GeometryCalculator(
SCP=scp,
ARPPos=scp + [1000, 0, -1000],
ARPVel=np.asarray([0, 100, 0]),
)
calc3 = ef3.ExploitationCalculator(geom_calc,
# "Shadows Up"
row_vector=np.asarray([0, 0, -1]), # South
col_vector=np.asarray([0, 1, 0])) # East
assert calc3.North == pytest.approx(270)
assert calc3.Shadow.Angle == pytest.approx(270)
assert calc3.Layover.Angle == pytest.approx(90)
assert calc3.MultiPath == pytest.approx(90)
assert calc3.GroundTrack == pytest.approx(0)
POLS = ('V', 'H', 'X', 'Y', 'S', 'E', 'RHC', 'LHC', 'OTHER', 'OTHER_CUSTOM', 'UNKNOWN')
@pytest.mark.parametrize(('txpol', 'rcvpol'), itertools.product(POLS, POLS))
def test_txrcvpolarization(txpol, rcvpol, caplog):
with caplog.at_level(logging.INFO, 'sarpy.io.xml.descriptors'):
ef3.TxRcvPolarizationType(TxPolarization=txpol, RcvPolarization=rcvpol)
assert not caplog.records
@pytest.mark.parametrize(('sicd_str', 'expected'),
[(None, ('UNKNOWN', 'UNKNOWN')),
('OTHER', ('UNKNOWN', 'UNKNOWN')),
('UNKNOWN', ('UNKNOWN', 'UNKNOWN')),
('V:H', ('V', 'H'))])
def test_txrcvpolarization_from_sicd(sicd_str, expected):
txp = ef3.TxRcvPolarizationType.from_sicd_value(sicd_str)
assert txp.TxPolarization == expected[0]
assert txp.RcvPolarization == expected[1]
def test_txrcvpolarization_from_sicd_error():
with pytest.raises(TypeError):
ef3.TxRcvPolarizationType.from_sicd_value(0)
def test_exploitation_features_collection_information(caplog):
efci = ef3.ExploitationFeaturesCollectionInformationType()
efci.LocalDateTime = None
assert efci.LocalDateTime is None
efci.LocalDateTime = datetime.datetime(2023, 1, 1, 12, 34, 56, 789000)
assert efci.LocalDateTime == '2023-01-01T12:34:56.789000'
efci.LocalDateTime = '1985-10-26T01:20:00'
assert efci.LocalDateTime == '1985-10-26T01:20:00'
with caplog.at_level(logging.INFO, 'sarpy.io.product.sidd3_elements.ExploitationFeatures'):
efci.LocalDateTime = 1234
assert efci.LocalDateTime is None
assert caplog.records
def test_exploitation_features_collection_information_from_sicd_error():
with pytest.raises(TypeError):
ef3.ExploitationFeaturesCollectionInformationType.from_sicd(None)
@pytest.fixture(scope='module')
def sicd():
tests_dir = pathlib.Path(__file__).parent.parent.parent.parent
xml_file = tests_dir / 'data' / 'example.sicd.xml'
structure = SICDType().from_xml_file(xml_file)
return structure
def test_from_sicd(sicd):
features = ef3.ExploitationFeaturesType.from_sicd(
sicd,
sicd.RadarCollection.Area.Plane.XDir.UVectECF.get_array(),
sicd.RadarCollection.Area.Plane.YDir.UVectECF.get_array())
assert features.Collections[0].Geometry.Azimuth == pytest.approx(sicd.SCPCOA.AzimAng)
layover_cw_from_col = (sicd.SCPCOA.LayoverAng - sicd.SCPCOA.AzimAng - 90) % 360
assert features.Collections[0].Phenomenology.Layover.Angle == pytest.approx(layover_cw_from_col, abs=0.01)
| 3,799
| 36.254902
| 110
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.